From b8fdcae5ecd76702070752bc46b59e3de35d4549 Mon Sep 17 00:00:00 2001 From: Luciano Gervasoni Date: Wed, 23 Apr 2025 17:46:47 +0200 Subject: [PATCH] Temperature and seed LLM --- app_urls/fetcher/src/llm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app_urls/fetcher/src/llm.py b/app_urls/fetcher/src/llm.py index 8107b31..014b4aa 100644 --- a/app_urls/fetcher/src/llm.py +++ b/app_urls/fetcher/src/llm.py @@ -9,6 +9,7 @@ class OllamaClient(): def __init__(self): self.host = os.getenv("ENDPOINT_OLLAMA", "https://ollamamodel.matitos.org") self.client = ollama.Client(host=self.host) + self.options = {"temperature": 0, "seed": 13579} def _get_default_model(self): return os.getenv("OLLAMA_MODEL_DEFAULT", "llama3.2:3b") @@ -44,7 +45,7 @@ class OllamaClient(): def generate(self, model, prompt, format=None): try: # Generate response - response = self.client.generate(model=model, prompt=prompt, format=format) + response = self.client.generate(model=model, prompt=prompt, format=format, options=self.options) # Extract response response = response.response # Json? -> Dict @@ -65,7 +66,7 @@ class OllamaClient(): def generate_stream(self, model, prompt): try: # Generate response - response = self.client.generate(model=model, prompt=prompt, format="json", stream=True) + response = self.client.generate(model=model, prompt=prompt, format="json", stream=True, options=self.options) # Streamed chunks for chunk in response: yield chunk.response