From 0e93e926e54866e5549c04e9e908dbc19b7b1e49 Mon Sep 17 00:00:00 2001 From: Oleg Date: Tue, 11 Mar 2025 22:42:36 +0200 Subject: [PATCH] init --- config.py | 2 ++ plugins/calculate_example_plugin.py | 23 +++++++++++++++++++++++ plugins/time_example_plugin.py | 23 +++++++++++++++++++++++ services/llm_service.py | 28 +++++++++++++++++++++++++++- 4 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 plugins/calculate_example_plugin.py create mode 100644 plugins/time_example_plugin.py diff --git a/config.py b/config.py index be7da00..5b6b42a 100644 --- a/config.py +++ b/config.py @@ -12,6 +12,8 @@ def __init__(self): self.system_prompt = os.getenv('SYSTEM_PROMPT', 'You are a helpful assistant') self.history_limit = int(os.getenv('HISTORY_LIMIT', 3)) self.stream_mode = os.getenv('STREAM_MODE', 'false').lower() == 'true' + self.plugins_dir = os.getenv('PLUGINS_DIR', 'plugins') + self.chat_format = os.getenv('CHAT_FORMAT', 'chatml-function-calling') self.model_params = { 'n_ctx': int(os.getenv('MODEL_N_CTX', 1024)), diff --git a/plugins/calculate_example_plugin.py b/plugins/calculate_example_plugin.py new file mode 100644 index 0000000..7e6fd18 --- /dev/null +++ b/plugins/calculate_example_plugin.py @@ -0,0 +1,23 @@ +# plugins/calculator_plugin.py +def get_functions(): + return [{ + "type": "function", + "function": { + "name": "calculate", + "description": "Perform math calculations", + "parameters": { + "type": "object", + "properties": { + "expression": {"type": "string"} + }, + "required": ["expression"] + } + } + }] + +def handle_function_call(name, args): + if name == "calculate": + try: + return eval(args["expression"]) + except: + return "Invalid expression" \ No newline at end of file diff --git a/plugins/time_example_plugin.py b/plugins/time_example_plugin.py new file mode 100644 index 0000000..3a46f1a --- /dev/null +++ b/plugins/time_example_plugin.py @@ -0,0 +1,23 @@ +import datetime + +def get_functions(): + return [{ + "type": "function", + "function": { + "name": "get_current_time", + "description": "Get current time in specified timezone", + "parameters": { + "type": "object", + "properties": { + "timezone": {"type": "string", "enum": ["UTC", "EST", "PST"]} + }, + "required": ["timezone"] + } + } + }] + +def handle_function_call(name, arguments): + if name == "get_current_time": + tz = arguments.get('timezone', 'UTC') + return "Time: " + datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%d %H:%M:%S') + " timezone " + tz + return None \ No newline at end of file diff --git a/services/llm_service.py b/services/llm_service.py index e6928d2..63f9e83 100644 --- a/services/llm_service.py +++ b/services/llm_service.py @@ -1,19 +1,43 @@ import threading from llama_cpp import Llama from typing import Dict, List +from pathlib import Path +import importlib.util class LLMService: def __init__(self, config): self.config = config self.model = None + self.functions = [] + self.function_handlers = {} + self._load_plugins() self.conversations: Dict[str, List[dict]] = {} self.lock = threading.Lock() + print(self.functions) + print(self.function_handlers) + def _load_plugins(self): + plugins_dir = Path(__file__).parent.parent / Path(self.config.plugins_dir) + for plugin_file in plugins_dir.glob('*.py'): + spec = importlib.util.spec_from_file_location(plugin_file.stem, plugin_file) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + if hasattr(module, 'get_functions'): + self.functions.extend(module.get_functions()) + + if hasattr(module, 'handle_function_call'): + self.function_handlers.update({ + func['function']['name']: module.handle_function_call + for func in module.get_functions() + }) + def initialize_model(self): with self.lock: if not self.model: self.model = Llama( model_path=self.config.model_path, + chat_format=self.config.chat_format, n_ctx=self.config.model_params.get('n_ctx', 1024), n_gpu_layers=self.config.model_params.get('n_gpu_layers', 0), verbose=self.config.full_log @@ -36,7 +60,9 @@ def get_response(self, user_id, message, username=None): completion_params = { 'messages': messages, - 'stream': True, + 'tools': self.functions, + 'tool_choice': "auto", + 'stream': self.config.stream_mode, 'max_tokens': self.config.model_params.get('max_tokens'), 'temperature': self.config.model_params.get('temperature'), 'top_k': self.config.model_params.get('top_k'),