106 lines
3.8 KiB
Python
106 lines
3.8 KiB
Python
from django.http import StreamingHttpResponse, HttpResponse, JsonResponse
|
|
from django.shortcuts import render, get_object_or_404
|
|
from django.core.paginator import Paginator
|
|
import requests
|
|
from django.http import StreamingHttpResponse
|
|
import json
|
|
import time
|
|
import ollama
|
|
|
|
from .models import Urls, Source, UrlsSource, UrlContent
|
|
|
|
# Create your views here.
|
|
def index(request):
|
|
return HttpResponse("Hello, world. You're at the news index.")
|
|
|
|
def news(request):
|
|
# URLs
|
|
urls = Urls.objects.all()
|
|
# Sources
|
|
sources = Source.objects.all()
|
|
|
|
# Parameters
|
|
page_number = request.GET.get("page", 1)
|
|
num_items = request.GET.get("items", 15)
|
|
source_ids = request.GET.get("sources", ','.join([str(s.id) for s in sources]))
|
|
status_filters = request.GET.get("status", None)
|
|
|
|
# Filters
|
|
if (status_filters) and (status_filters != "all"):
|
|
urls = urls.filter(status__in=status_filters.split(","))
|
|
if (source_ids) and (source_ids != "all"):
|
|
# TODO: Distinct needed?
|
|
urls = urls.filter(urlssource__id_source__in=source_ids.split(",")).distinct()
|
|
|
|
# Pagination
|
|
paginator = Paginator(urls, num_items)
|
|
page_obj = paginator.get_page(page_number)
|
|
|
|
# Map URL IDs to their sources, only for subset of URLs (page of interest)
|
|
sources_map = {
|
|
url.id: list(Source.objects.filter(urlssource__id_url=url).values_list('source', flat=True))
|
|
for url in page_obj.object_list
|
|
}
|
|
|
|
context = {
|
|
"page_obj": page_obj,
|
|
"sources": sources,
|
|
"sources_map": sources_map,
|
|
"list_status": Urls.STATUS_ENUM.values,
|
|
"list_urls_per_page": [15, 50, 100],
|
|
}
|
|
|
|
# If request is AJAX, return JSON response
|
|
if request.headers.get("X-Requested-With") == "XMLHttpRequest":
|
|
return JsonResponse({'items_html': render(request, 'item_list_partial.html', context).content.decode('utf-8')})
|
|
|
|
return render(request, "item_list.html", context)
|
|
|
|
|
|
def url_detail_view(request, id):
|
|
url_item = get_object_or_404(Urls, id=id)
|
|
url_sources = list(Source.objects.filter(urlssource__id_url=url_item).values_list('source', flat=True))
|
|
try:
|
|
url_content = UrlContent.objects.get(pk=id)
|
|
except UrlContent.DoesNotExist:
|
|
url_content = {}
|
|
|
|
#print(url_content.__dict__)
|
|
|
|
# TODO: https://github.com/ollama/ollama-python?tab=readme-ov-file#async-client
|
|
# LLM models available
|
|
client = ollama.Client(host = 'https://ollamamodel.matitos.org')
|
|
models = sorted([m.model for m in client.list().models])
|
|
print(models)
|
|
|
|
context = {
|
|
'url_item': url_item,
|
|
'sources': url_sources,
|
|
'models': models,
|
|
'prompt': "Provide in one paragraph the what, why, when, where, who, and how of the content below. Also provide a one paragraph summary of the content:",
|
|
#"prompt": "Image you are a journalist, TLDR in a paragraph:",
|
|
#"prompt": "Below you will find the whole content of a news article:\n{}\nProvide a concise summary of one paragraph maximum of the content.".format(content)
|
|
'url_content': url_content,
|
|
}
|
|
return render(request, 'url_detail.html', context)
|
|
|
|
def fetch_details(request, id):
|
|
url_item = get_object_or_404(Urls, id=id)
|
|
url_param = request.GET.get("url", "") # Get URL
|
|
model = request.GET.get("model", "") # Get LLM model
|
|
text = request.GET.get("text", "") # Get LLM prompt
|
|
|
|
# LLM
|
|
client = ollama.Client(host = 'https://ollamamodel.matitos.org')
|
|
|
|
def stream_response():
|
|
msg_content = {
|
|
"role": "user",
|
|
"content": text,
|
|
}
|
|
response = client.chat(model=model, messages=[msg_content], stream=True)
|
|
for chunk in response:
|
|
yield chunk["message"]["content"] # Stream each chunk of text
|
|
|
|
return StreamingHttpResponse(stream_response(), content_type="text/plain")
|