diff --git a/config.toml b/config.toml index 1632b7b..1a9d191 100644 --- a/config.toml +++ b/config.toml @@ -49,7 +49,7 @@ enabled = true generate_models = ["llama-3.2-90b-vision-preview","llama-3.3-70b-versatile","llama-3.3-70b-specdec","llama-3.2-11b-vision-preview","llama3-70b-8192","llama-3.1-8b-instant","llama3-8b-8192","llama-3.2-3b-preview","llama-3.2-1b-preview"] # for home usage gemma3:1b recommended, for Groq llama-3.1-8b-instant generate_endpoint = "http://IP:PORT/api/generate" # Can be empty if using groq use_groq = true -groq_api_token = "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" # Get one at https://console.groq.com/keys +groq_api_token = "" # Get one at https://console.groq.com/keys # Example API key @@ -58,6 +58,6 @@ prompt = "Analyze the given code and return an abuse score (0-10) with a brief r [INTEGRATION.DISCORD] enabled = true -webhook_url = "https://discord.com/api/webhooks/1353420411018416128/2lK_3tB4DGYfO6bUMfghNuf6N1tr4A8mPM-S3Lct0qwnibuGbzsDfSoK62231Qi3QQsM" +webhook_url = "" # Example webhook truncate_text = true # Used only if AI INTEGRATION is enabled, trunclates text if true to maxium allowed characters or when false splits in few webhook messages.