Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .idea/csv-editor.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions CODE/Logicytics.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ def __and_log(directory: str, name: str):
log.debug(
f"Zipping directory '{directory}' with name '{name}' under action '{ACTION}'"
)
# noinspection PyUnreachableCode
zip_values = file_management.Zip.and_hash(
directory,
name,
Expand Down
98 changes: 10 additions & 88 deletions CODE/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ save_preferences = true
[System Settings]
# Do not play with these settings unless you know what you are doing
# Dev Mode allows a safe way to modify these settings!!
version = 3.5.1
files = "bluetooth_details.py, bluetooth_logger.py, browser_miner.ps1, cmd_commands.py, config.ini, dir_list.py, dump_memory.py, event_log.py, Logicytics.py, log_miner.py, media_backup.py, netadapter.ps1, network_psutil.py, packet_sniffer.py, property_scraper.ps1, registry.py, sensitive_data_miner.py, ssh_miner.py, sys_internal.py, tasklist.py, tree.ps1, vulnscan.py, wifi_stealer.py, window_feature_miner.ps1, wmic.py, logicytics\Checks.py, logicytics\Config.py, logicytics\Execute.py, logicytics\FileManagement.py, logicytics\Flag.py, logicytics\Get.py, logicytics\Logger.py, logicytics\User_History.json.gz, vulnscan\SenseMini.3n3.pth, vulnscan\vectorizer.3n3.pkl"
version = 3.6.0
files = "bluetooth_details.py, bluetooth_logger.py, browser_miner.ps1, cmd_commands.py, config.ini, dir_list.py, dump_memory.py, encrypted_drive_audit.py, event_log.py, Logicytics.py, log_miner.py, media_backup.py, netadapter.ps1, network_psutil.py, packet_sniffer.py, property_scraper.ps1, registry.py, sensitive_data_miner.py, ssh_miner.py, sys_internal.py, tasklist.py, tree.ps1, usb_history.py, vulnscan.py, wifi_stealer.py, window_feature_miner.ps1, wmic.py, logicytics\Checks.py, logicytics\Config.py, logicytics\Execute.py, logicytics\FileManagement.py, logicytics\Flag.py, logicytics\Get.py, logicytics\Logger.py, logicytics\User_History.json.gz, vulnscan\Model_SenseMacro.4n1.pth"
# If you forked the project, change the USERNAME to your own to use your own fork as update material,
# I dont advise doing this however
config_url = https://raw.githubusercontent.com/DefinetlyNotAI/Logicytics/main/CODE/config.ini
Expand Down Expand Up @@ -100,93 +100,15 @@ timeout = 10
max_retry_time = 30

###################################################

[VulnScan Settings]
# Following extensions to be skipped by the model
# Format: comma-separated list with dots (e.g., .exe, .dll)
unreadable_extensions = .exe, .dll, .so, .zip, .tar, .gz, .7z, .rar, .jpg, .jpeg, .png, .gif, .bmp, .tiff, .webp, .mp3, .wav, .flac, .aac, .ogg, .mp4, .mkv, .avi, .mov, .wmv, .flv, .pdf, .doc, .docx, .xls, .xlsx, .ppt, .pptx, .odt, .ods, .odp, .bin, .dat, .iso, .class, .pyc, .o, .obj, .sqlite, .db, .ttf, .otf, .woff, .woff2, .lnk, .url
# In MB, max file size that the model is allowed to scan, if commented out disables the limit, you can also just say None
max_file_size_mb = None
# Max workers to be used, either integer or use auto to make it decide the best value
# Max characters of text from each file to analyze. Set an integer or None to disable truncation.
text_char_limit = None
# Max workers to be used, either integer or use "auto" to make it decide the best value
max_workers = auto

[VulnScan.generate Settings]
# The following settings are for the Generate module for fake training data
extensions = .txt, .log, .md, .csv, .json, .xml, .html, .yaml, .ini, .pdf, .docx, .xlsx, .pptx
save_path = PATH

# Options include:
# 'Sense' - Generates 50k files, each 25KB in size.
# 'SenseNano' - Generates 5 files, each 5KB in size.
# 'SenseMacro' - Generates 1m files, each 10KB in size.
# 'SenseMini' - Generates 10k files, each 10KB in size.
# 'SenseCustom' - Uses custom size settings from the configuration file.
code_name = SenseMini

# This allows more randomness in the file sizes, use 0 to disable
# this is applied randomly every time a file is generated
# Variation is applied in the following way:
# size +- (size */ variation) where its random weather to add or subtract and divide or multiply
size_variation = 0.1

# Set to SenseCustom to use below size settings
min_file_size = 5KB
max_file_size = 50KB

# Chances for the following data types in files:
# 0.0 - 1.0, the rest will be for pure data
full_sensitive_chance = 0.07
partial_sensitive_chance = 0.2

[VulnScan.vectorizer Settings]
# The following settings are for the Vectorizer module for vectorizing data
# Usually it automatically vectorizes data, but this is for manual vectorization

# We advise to use this vectorization, although not knowing the vectorizer is not advised
# as this may lead to ValueErrors due to different inputs
# Use the vectorizer supplied for any v3 model on SenseMini

# The path to the data to vectorize, either a file or a directory
data_path = PATH
# The path to save the vectorized data - It will automatically be appended '\Vectorizer.pkl'
# Make sure the path is a directory, and it exists
output_path = PATH

# Vectorizer to use, options include:
# tfidf or count - The code for the training only supports tfidf - we advise to use tfidf
vectorizer_type = tfidf

[VulnScan.train Settings]
# The following settings are for the Train module for training models
# NeuralNetwork seems to be the best choice for this task
# Options: "NeuralNetwork", "LogReg",
# "RandomForest", "ExtraTrees", "GBM",
# "XGBoost", "DecisionTree", "NaiveBayes"
model_name = NeuralNetwork

# General Training Parameters
epochs = 10
batch_size = 32
learning_rate = 0.001
use_cuda = true

# Paths to train and save data
train_data_path = PATH
# If all models are to be trained, this is the path to save all models,
# and will be appended with the model codename and follow naming convention
save_model_path = PATH

[VulnScan.study Settings]
# Here is the basics of the study module
# This is useful to generate graphs and data that may help in understanding the model
# Everything is found online pre-studied, so this is not necessary
# But it is useful for understanding the model locally
# All files be saved here, and can't be changed, PATH is "NN features/"

# This is the path to the model, and the vectorizer
model_path = PATH
vectorizer_path = PATH
# Number of features to visualise in the SVG Bar graph, maximum is 3000 due to limitations
# Placing -1 will visualise first 3000 features. Bar will be a color gradient heatmap.
number_of_features = -1
# Sensitivity threshold (0.0–1.0) for the model to flag content as sensitive
threshold = 0.6
# Paths for required files
model = vulnscan/Model_SenseMacro.4n1.pth

##################################################
106 changes: 106 additions & 0 deletions CODE/encrypted_drive_audit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import datetime
import getpass
import os
import platform
import shutil
import subprocess
from pathlib import Path

from logicytics import check, log


def now_iso():
return datetime.datetime.now().astimezone().isoformat()


def run_cmd(cmd):
log.debug(f"Running command: {cmd}")
try:
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
if proc.returncode == 0:
log.debug(f"Command succeeded: {cmd}")
else:
log.warning(f"Command returned {proc.returncode}: {cmd}")
return proc.stdout.strip(), proc.stderr.strip(), proc.returncode
except FileNotFoundError:
log.error(f"Command not found: {cmd[0]}")
return "", "not found", 127
except subprocess.TimeoutExpired:
log.error(f"Command timed out: {cmd}")
return "", "timeout", 124


def have(cmd_name):
exists = shutil.which(cmd_name) is not None
log.debug(f"Check if '{cmd_name}' exists: {exists}")
return exists


def get_mountvol_output():
log.info("Gathering mounted volumes via mountvol")
out, err, _ = run_cmd(["mountvol"])
if not out:
return err
lines = out.splitlines()
filtered = []
keep = False
for line in lines:
if line.strip().startswith("\\\\?\\Volume"):
keep = True
if keep:
filtered.append(line)
return "\n".join(filtered)


def main():
script_dir = Path(__file__).resolve().parent
report_path = script_dir / "win_encrypted_volume_report.txt"
log.info(f"Starting encrypted volume analysis, report will be saved to {report_path}")

with report_path.open("w", encoding="utf-8") as f:
f.write("=" * 80 + "\n")
f.write("Windows Encrypted Volume Report\n")
f.write("=" * 80 + "\n")
f.write(f"Generated at: {now_iso()}\n")
f.write(f"User: {getpass.getuser()}\n")
f.write(f"IsAdmin: {check.admin()}\n")
f.write(f"Hostname: {platform.node()}\n")
f.write(f"Version: {platform.platform()}\n\n")

# Logical drives
log.info("Gathering logical volumes via wmic")
f.write("Logical Volumes (wmic):\n")
out, err, _ = run_cmd(["wmic", "logicaldisk", "get",
"DeviceID,DriveType,FileSystem,FreeSpace,Size,VolumeName"])
f.write(out + "\n" + err + "\n\n")

# Mounted volumes
f.write("Mounted Volumes (mountvol):\n")
f.write(get_mountvol_output() + "\n\n")

# BitLocker status
f.write("=" * 80 + "\nBitLocker Status\n" + "=" * 80 + "\n")
if have("manage-bde"):
log.info("Checking BitLocker status with manage-bde")
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
path = f"{letter}:"
if os.path.exists(f"{path}\\"):
out, err, _ = run_cmd(["manage-bde", "-status", path])
f.write(f"Drive {path}:\n{out}\n{err}\n\n")
else:
log.warning("manage-bde not found")

if have("powershell"):
log.info("Checking BitLocker status with PowerShell")
f.write("PowerShell Get-BitLockerVolume:\n")
ps_cmd = r"Get-BitLockerVolume | Format-List *"
out, err, _ = run_cmd(["powershell", "-NoProfile", "-Command", ps_cmd])
f.write(out + "\n" + err + "\n\n")
else:
log.warning("PowerShell not available")

log.info(f"Report successfully saved to {report_path}")


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion CODE/logicytics/Flag.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ def __available_arguments(
"and not the best, use only if the device doesnt have python installed.",
)

# TODO v3.6.0 -> Out of beta
# TODO v3.6.1 -> Out of beta
parser.add_argument(
"--vulnscan-ai",
action="store_true",
Expand Down
1 change: 1 addition & 0 deletions CODE/logicytics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,4 +122,5 @@ def wrapper(*args, **kwargs) -> callable:
"ObjectLoadError",
"log",
"Log",
"config",
]
89 changes: 89 additions & 0 deletions CODE/usb_history.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import ctypes
import os
import winreg
from datetime import datetime, timedelta

from logicytics import log


class USBHistory:
def __init__(self):
self.history_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "usb_history.txt")

def _save_history(self, message: str):
"""Append a timestamped message to the history file and log it."""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
entry = f"{timestamp} - {message}\n"
try:
with open(self.history_path, "a", encoding="utf-8") as f:
f.write(entry)
log.debug(f"Saved entry: {message}")
except Exception as e:
log.error(f"Failed to write history: {e}")

# noinspection PyUnresolvedReferences
@staticmethod
def _get_last_write_time(root_key, sub_key_path):
"""Return the precise last write time of a registry key, or None on failure."""
handle = ctypes.wintypes.HANDLE()
try:
advapi32 = ctypes.windll.advapi32
if advapi32.RegOpenKeyExW(root_key, sub_key_path, 0, winreg.KEY_READ, ctypes.byref(handle)) != 0:
return None
ft = ctypes.wintypes.FILETIME()
if advapi32.RegQueryInfoKeyW(handle, None, None, None, None, None, None, None, None, None, None,
ctypes.byref(ft)) != 0:
return None
t = ((ft.dwHighDateTime << 32) + ft.dwLowDateTime) // 10
return datetime(1601, 1, 1) + timedelta(microseconds=t)
finally:
if handle:
ctypes.windll.advapi32.RegCloseKey(handle)

@staticmethod
def _enum_subkeys(root, path, warn_func):
"""Yield all subkeys of a registry key, logging warnings on errors."""
try:
with winreg.OpenKey(root, path) as key:
subkey_count, _, _ = winreg.QueryInfoKey(key)
for i in range(subkey_count):
try:
yield winreg.EnumKey(key, i)
except OSError as e:
if getattr(e, "winerror", None) == 259: # ERROR_NO_MORE_ITEMS
break
warn_func(f"Error enumerating {path} index {i}: {e}")
except OSError as e:
warn_func(f"Failed to open registry key {path}: {e}")

@staticmethod
def _get_friendly_name(dev_info_path, device_id):
"""Return the friendly name of a device if available, else the device ID."""
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, dev_info_path) as dev_key:
return winreg.QueryValueEx(dev_key, "FriendlyName")[0]
except FileNotFoundError:
return device_id
except Exception as e:
log.warning(f"Failed to read friendly name for {dev_info_path}: {e}")
return device_id

def read(self):
"""Read all USB devices from USBSTOR and log their info."""
log.info("Starting USB history extraction...")
reg_path = r"SYSTEM\CurrentControlSet\Enum\USBSTOR"
try:
for device_class in self._enum_subkeys(winreg.HKEY_LOCAL_MACHINE, reg_path, log.warning):
dev_class_path = f"{reg_path}\\{device_class}"
for device_id in self._enum_subkeys(winreg.HKEY_LOCAL_MACHINE, dev_class_path, log.warning):
dev_info_path = f"{dev_class_path}\\{device_id}"
friendly_name = self._get_friendly_name(dev_info_path, device_id)
last_write = self._get_last_write_time(winreg.HKEY_LOCAL_MACHINE, dev_info_path) or "Unknown"
self._save_history(f"USB Device Found: {friendly_name} | LastWriteTime: {last_write}")
log.info(f"USB history extraction complete, saved to {self.history_path}")
except Exception as e:
log.error(f"Error during USB history extraction: {e}")


if __name__ == "__main__":
USBHistory().read()
Loading