LLM view refactor

This commit is contained in:
Luciano Gervasoni
2025-04-14 13:49:06 +02:00
parent 43c6c3aabf
commit b876f6d720
4 changed files with 39 additions and 21 deletions

View File

@@ -94,6 +94,12 @@
});
});
// CSRF token helper (required if CSRF protection is enabled)
function getCookie(name) {
const match = document.cookie.match(new RegExp('(^| )' + name + '=([^;]+)'));
return match ? match[2] : null;
}
function fetchDetails(urlId, url) {
// Show the loading spinner
document.getElementById("loading-spinner").style.display = "block";
@@ -109,22 +115,24 @@
}
// Fetch URL
let fetchUrl = `/urls/${urlId}/fetch/?url=${encodeURIComponent(url)}&model=${encodeURIComponent(selectedModel)}&text=${encodeURIComponent(inputText)}`;
let fetchUrl = `/urls/llm/`;
let resultContainer = $("#chat-output");
resultContainer.html(""); // Clear previous content before fetching
let fetchButton = $("button[onclick^='fetchDetails']"); // Select the button
fetchButton.prop("disabled", true); // Disable button
fetch(fetchUrl/*, {
fetch(fetchUrl, {
method: "POST",
body: JSON.stringify({
text: inputText
}),
headers: {
"Content-type": "application/json; charset=UTF-8"
}
}*/).then(response => {
"Content-type": "application/json; charset=UTF-8",
'X-CSRFToken': getCookie('csrftoken') // required if CSRF middleware is active
},
body: JSON.stringify({
model: selectedModel,
message: inputText
})
}).then(response => {
if (!response.ok) {
throw new Error("Error on network response");
}

View File

@@ -17,6 +17,6 @@ urlpatterns = [
#
path('urls/', views.filtered_urls, name='filtered_urls'),
path('urls/<int:id>/', views.url_detail_view, name='url_detail'),
path('urls/<int:id>/fetch/', views.fetch_details, name='fetch_details'),
path('urls/llm/', views.llm, name='llm'),
path('urls/content_generation', views.content_generation, name='content_generation'),
]

View File

@@ -9,7 +9,7 @@ from django.utils.timezone import now, timedelta
from .models import Urls, Source, Search, UrlContent, UrlsSourceSearch, UrlsDuplicate
import ollama
import os
#from datetime import timedelta
import json
####################################################################################################
@@ -30,14 +30,10 @@ class OllamaClient():
def get_prompt(self):
return "Rewrite the text below into a clear and concise summary, presenting the key points as if they are newly written insights. Do not mention or reference the original text, its source, or any phrases like 'According to' or 'The text states'. Instead, write in a natural, standalone format that feels like an original explanation. Keep it brief, engaging, informative, in the style of a news article, and no longer than a paragraph:"
def fetch_details(request, id):
url_item = get_object_or_404(Urls, id=id)
url_param = request.GET.get("url", "") # Get URL
model = request.GET.get("model", "") # Get LLM model
# TODO: post with body
text = request.GET.get("text", "") # Get LLM prompt
def stream_response():
def llm(request):
def stream_response(model, text):
msg_content = {
"role": "user",
"content": text,
@@ -45,8 +41,21 @@ def fetch_details(request, id):
response = OllamaClient().client.chat(model=model, messages=[msg_content], stream=True)
for chunk in response:
yield chunk["message"]["content"] # Stream each chunk of text
return StreamingHttpResponse(stream_response(), content_type="text/plain")
if request.method == 'POST':
try:
body_data = json.loads(request.body)
message = body_data.get('message')
model = body_data.get('model')
if message is None:
return JsonResponse({'error': 'No message found in request'}, status=400)
return StreamingHttpResponse(stream_response(model, message), content_type="text/plain")
except json.JSONDecodeError:
return JsonResponse({'error': 'Invalid JSON'}, status=400)
return JsonResponse({'error': 'Only POST method allowed'}, status=405)
def url_detail_view(request, id):
url_item = get_object_or_404(Urls, id=id)

View File

@@ -1,7 +1,8 @@
#!/bin/bash
if [ "${DJANGO_DEBUG}" = true ] | [ "${DJANGO_DEBUG}" == "True" ]; then
gunicorn core.wsgi:application --reload --log-level debug --bind 0.0.0.0:8000 --timeout 300 & python manage.py rqworker high default low
echo "Running in DEBUG mode"
gunicorn core.wsgi:application --reload --log-level debug --bind 0.0.0.0:8000 --timeout 600 & python manage.py rqworker high default low
else
gunicorn core.wsgi:application --bind 0.0.0.0:8000 --timeout 300 & python manage.py rqworker high default low
gunicorn core.wsgi:application --bind 0.0.0.0:8000 --timeout 600 & python manage.py rqworker high default low
fi