Files
matitos_news/app_urls/api/views.py
2025-03-21 16:13:56 +01:00

131 lines
4.9 KiB
Python

# import django_rq
from .tasks import background_task
from django.http import JsonResponse
import os
def trigger_task(request, task):
# View that enqueues a task
# Enqueue function in "default" queue
background_task.delay(task)
return JsonResponse({"message": "Task has been enqueued!", "task": task})
# queue = django_rq.get_queue('default') # Get the default queue
# job = queue.enqueue(background_task, task, job_timeout="30m")
# return JsonResponse({"message": "Task has been enqueued!", "job_id": job.id})
def link_list(request):
prefix = "http://localhost:8000/api/task"
links = ["fetch_feeds", "fetch_parser", "fetch_search", "process_raw_urls_50", "process_error_urls_50", "process_missing_kids_urls_50", "process_missing_kids_urls_500000"]
db_links = ["http://localhost:8080/?pgsql=matitos_db&username=supermatitos&db=matitos&ns=public&select=urls&order%5B0%5D=id&limit=500"]
return JsonResponse({"links": ["http://localhost:8000/api/url"] + db_links + [os.path.join(prefix, l) for l in links]})
from django.http import StreamingHttpResponse, HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
import requests
from django.http import StreamingHttpResponse
import json
import time
import ollama
from .models import Urls, Source, Search, UrlsSourceSearch, UrlContent
# Create your views here.
def news(request):
# URLs
urls = Urls.objects.all()
# Sources
sources = Source.objects.all()
seaerches = Search.objects.all()
# Parameters
page_number = request.GET.get("page", 1)
num_items = request.GET.get("items", 15)
source_ids = request.GET.get("sources", ','.join([str(s.id) for s in sources]))
status_filters = request.GET.get("status", None)
# Filters
if (status_filters) and (status_filters != "all"):
urls = urls.filter(status__in=status_filters.split(","))
if (source_ids) and (source_ids != "all"):
# TODO: Distinct needed?
# urls = urls.filter(urlssource__id_source__in=source_ids.split(",")).distinct()
pass
# Pagination
paginator = Paginator(urls, num_items)
page_obj = paginator.get_page(page_number)
# Map URL IDs to their sources, only for subset of URLs (page of interest)
sources_map= {}
"""
sources_map = {
url.id: list(Source.objects.filter(urlssource__id_url=url).values_list('source', flat=True))
for url in page_obj.object_list
}
"""
context = {
"page_obj": page_obj,
"sources": sources,
"sources_map": sources_map,
"list_status": Urls.STATUS_ENUM.values,
"list_urls_per_page": [15, 50, 100],
}
# If request is AJAX, return JSON response
if request.headers.get("X-Requested-With") == "XMLHttpRequest":
return JsonResponse({'items_html': render(request, 'item_list_partial.html', context).content.decode('utf-8')})
return render(request, "item_list.html", context)
def url_detail_view(request, id):
url_item = get_object_or_404(Urls, id=id)
url_sources = list(Source.objects.filter(urlssource__id_url=url_item).values_list('source', flat=True))
try:
url_content = UrlContent.objects.get(pk=id)
except UrlContent.DoesNotExist:
url_content = {}
# TODO: https://github.com/ollama/ollama-python?tab=readme-ov-file#async-client
# LLM models available
client = ollama.Client(host = 'https://ollamamodel.matitos.org')
models = sorted([m.model for m in client.list().models])
# default_model = "llama3.2:3b"
context = {
'url_item': url_item,
'sources': url_sources,
'models': models,
#'default_model': default_model,
'prompt': "Provide in one paragraph the what, why, when, where, who, and how of the content below. Also provide a one paragraph summary of the content:",
#"prompt": "Image you are a journalist, TLDR in a paragraph:",
#"prompt": "Below you will find the whole content of a news article:\n{}\nProvide a concise summary of one paragraph maximum of the content.".format(content)
'url_content': url_content,
}
return render(request, 'url_detail.html', context)
def fetch_details(request, id):
url_item = get_object_or_404(Urls, id=id)
url_param = request.GET.get("url", "") # Get URL
model = request.GET.get("model", "") # Get LLM model
text = request.GET.get("text", "") # Get LLM prompt
# LLM
client = ollama.Client(host = 'https://ollamamodel.matitos.org')
def stream_response():
msg_content = {
"role": "user",
"content": text,
}
response = client.chat(model=model, messages=[msg_content], stream=True)
for chunk in response:
yield chunk["message"]["content"] # Stream each chunk of text
return StreamingHttpResponse(stream_response(), content_type="text/plain")