94 lines
3.1 KiB
Python
Raw Normal View History

2025-03-23 19:04:26 +01:00
#region Imports
import tomllib
import requests
2025-03-24 18:03:56 +00:00
from utils.Logger import Log
2025-03-23 19:04:26 +01:00
#endregion
#region Variables
# Load configuration from the config.toml file
with open("./config.toml", "rb") as f:
data = tomllib.load(f)
enabled = data["INTEGRATION"]["AI"]["enabled"]
generate_endpoint = data["INTEGRATION"]["AI"]["generate_endpoint"]
2025-03-24 20:07:14 +00:00
model_list = data["INTEGRATION"]["AI"]["generate_models"]
2025-03-23 19:04:26 +01:00
use_groq = data["INTEGRATION"]["AI"]["use_groq"]
groq_api = data["INTEGRATION"]["AI"]["groq_api_token"]
prompt = data["INTEGRATION"]["AI"]["prompt"]
# If Groq is enabled, update the generate endpoint
if use_groq:
generate_endpoint = "https://api.groq.com/openai/v1/chat/completions"
#endregion
def generate_response(data):
2025-03-24 20:07:14 +00:00
"""Generate a response using the Groq or OLLAMA API."""
error_messages = []
for generate_model in model_list:
try:
headers = {
"Content-Type": "application/json",
}
2025-03-23 19:04:26 +01:00
2025-03-24 20:07:14 +00:00
# Add authorization header if using Groq
if use_groq:
headers["Authorization"] = f"Bearer {groq_api}"
2025-03-23 19:04:26 +01:00
2025-03-24 20:07:14 +00:00
# Create payload
payload = {
"model": generate_model,
"temperature": 1,
"max_completion_tokens": 1024,
"top_p": 1,
"stream": False,
"stop": None,
}
2025-03-23 19:04:26 +01:00
2025-03-24 20:07:14 +00:00
# Conditional message structure for Groq
if use_groq:
payload["messages"] = [
{
"role": "system",
"content": f"{prompt}"
},
{
"role": "user",
"content": f"```code\n{data}\n```"
}
]
else:
payload["prompt"] = f"Using this data: {data}. Respond to this prompt: {prompt}\n"
2025-03-23 19:04:26 +01:00
2025-03-24 20:07:14 +00:00
response = requests.post(generate_endpoint, json=payload, headers=headers)
response.raise_for_status()
if use_groq:
2025-03-24 20:24:14 +00:00
return response.json()["choices"][0]["message"]["content"] + f"\n\n> AI Model: {generate_model}"
2025-03-24 20:07:14 +00:00
else:
return response.json()
2025-03-23 19:04:26 +01:00
2025-03-24 20:07:14 +00:00
except requests.exceptions.RequestException as e:
Log.e(f"Failed to generate response: {e}")
Log.e(f"Using model: {generate_model}")
error_messages.append(f"Model {generate_model} failed: {e}")
return None
2025-03-24 20:18:56 +00:00
return f"All models failed to generate response. Errors: {error_messages}"
2025-03-23 19:04:26 +01:00
2025-03-24 18:03:56 +00:00
def ai_analyse(src):
2025-03-23 19:04:26 +01:00
"""Analyze a file and generate a response based on the user's input."""
if enabled:
try:
# Generate response using the file data
2025-03-24 18:03:56 +00:00
response = generate_response(src)
2025-03-23 19:04:26 +01:00
if response:
#Log.s(f"Generated Response: {response}")
return response
else:
2025-03-24 20:07:14 +00:00
return "No AI Description provided for this action; check config.toml maybe?"
2025-03-23 19:04:26 +01:00
except Exception as e:
Log.e(f"Unexpected error: {e}")
else:
2025-03-24 20:07:14 +00:00
return "No AI Description provided for this action; check config.toml maybe?"
2025-03-23 19:04:26 +01:00
return None