100 lines
3.0 KiB
Python
100 lines
3.0 KiB
Python
|
#region Imports
|
||
|
import tomllib
|
||
|
import requests
|
||
|
from utils.logger import Log
|
||
|
#endregion
|
||
|
|
||
|
#region Variables
|
||
|
# Load configuration from the config.toml file
|
||
|
with open("./config.toml", "rb") as f:
|
||
|
data = tomllib.load(f)
|
||
|
|
||
|
enabled = data["INTEGRATION"]["AI"]["enabled"]
|
||
|
generate_endpoint = data["INTEGRATION"]["AI"]["generate_endpoint"]
|
||
|
generate_model = data["INTEGRATION"]["AI"]["generate_model"]
|
||
|
use_groq = data["INTEGRATION"]["AI"]["use_groq"]
|
||
|
groq_api = data["INTEGRATION"]["AI"]["groq_api_token"]
|
||
|
prompt = data["INTEGRATION"]["AI"]["prompt"]
|
||
|
|
||
|
# If Groq is enabled, update the generate endpoint
|
||
|
if use_groq:
|
||
|
generate_endpoint = "https://api.groq.com/openai/v1/chat/completions"
|
||
|
#endregion
|
||
|
|
||
|
def generate_response(data):
|
||
|
"""Generate a response using the Groq API."""
|
||
|
try:
|
||
|
# Create headers
|
||
|
headers = {
|
||
|
"Content-Type": "application/json",
|
||
|
}
|
||
|
|
||
|
# Add authorization header if using Groq
|
||
|
if use_groq:
|
||
|
headers["Authorization"] = f"Bearer {groq_api}"
|
||
|
|
||
|
# Create payload
|
||
|
payload = {
|
||
|
"model": generate_model,
|
||
|
"temperature": 1,
|
||
|
"max_completion_tokens": 1024,
|
||
|
"top_p": 1,
|
||
|
"stream": False,
|
||
|
"stop": None,
|
||
|
}
|
||
|
|
||
|
# Conditional message structure for Groq
|
||
|
if use_groq:
|
||
|
payload["messages"] = [
|
||
|
{
|
||
|
"role": "user",
|
||
|
"content": f"Using this data: {data}. Respond to this prompt: {prompt}"
|
||
|
}
|
||
|
]
|
||
|
else:
|
||
|
payload["prompt"] = f"Using this data: {data}. Respond to this prompt: {prompt}"
|
||
|
|
||
|
response = requests.post(generate_endpoint, json=payload, headers=headers)
|
||
|
response.raise_for_status()
|
||
|
if use_groq:
|
||
|
return response.json()["choices"][0]["message"]["content"]
|
||
|
else:
|
||
|
return response.json()
|
||
|
|
||
|
except requests.exceptions.RequestException as e:
|
||
|
Log.e(f"Failed to generate response: {e}")
|
||
|
return None
|
||
|
|
||
|
|
||
|
def ai_analyse(file_path):
|
||
|
"""Analyze a file and generate a response based on the user's input."""
|
||
|
if enabled:
|
||
|
try:
|
||
|
# Open and read file data
|
||
|
with open(file_path, "r", encoding="utf-8") as file:
|
||
|
file_data = file.read()
|
||
|
|
||
|
# Generate response using the file data
|
||
|
response = generate_response(file_data)
|
||
|
if response:
|
||
|
#Log.s(f"Generated Response: {response}")
|
||
|
return response
|
||
|
else:
|
||
|
Log.e("AI did not respond.")
|
||
|
except FileNotFoundError:
|
||
|
Log.e(f"File not found: {file_path}")
|
||
|
except Exception as e:
|
||
|
Log.e(f"Unexpected error: {e}")
|
||
|
else:
|
||
|
return "AI integration is disabled in the configuration, enable AI integration for AI File Analyse."
|
||
|
return None
|
||
|
|
||
|
|
||
|
# Example usage
|
||
|
if __name__ == "__main__":
|
||
|
file_path = "example.txt" # Replace with your input file path
|
||
|
result = ai_analyse(file_path)
|
||
|
if result:
|
||
|
print("[INFO] Analysis Result:")
|
||
|
print(result)
|