Add AI functionality; fuck up UI royally, still a piece of shit.
This commit is contained in:
+302
-1
@@ -3,10 +3,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from collections.abc import AsyncIterator
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Query, BackgroundTasks
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from crawler import sFetchBot
|
||||
from config import TOP_SITE_SEED_LIMIT, TOP_SITE_SEED_META_KEY
|
||||
@@ -19,7 +22,16 @@ from database import (
|
||||
init_db,
|
||||
set_meta_value,
|
||||
)
|
||||
from models import CrawlRequest, SearchResponse
|
||||
from models import AIAnswerResponse, AIChatRequest, AISearchRequest, AISource, CrawlRequest, SearchResponse
|
||||
from ollama_cloud import (
|
||||
OllamaCloudError,
|
||||
chat as ollama_chat,
|
||||
default_model,
|
||||
is_ollama_configured,
|
||||
list_models as list_ollama_models,
|
||||
stream_chat as ollama_stream_chat,
|
||||
web_search as ollama_web_search,
|
||||
)
|
||||
from searcher import search, search_images_api, search_videos_api
|
||||
from top_sites import load_top_site_seed_urls
|
||||
|
||||
@@ -205,3 +217,292 @@ async def crawl_top_sites_status_endpoint() -> dict[str, object]:
|
||||
async def stats_endpoint() -> dict[str, object]:
|
||||
stats = await get_stats()
|
||||
return stats
|
||||
|
||||
|
||||
@app.get("/ai/config")
|
||||
async def ai_config_endpoint() -> dict[str, object]:
|
||||
return {
|
||||
"configured": is_ollama_configured(),
|
||||
"default_model": default_model(),
|
||||
"provider": "Ollama Cloud",
|
||||
}
|
||||
|
||||
|
||||
@app.get("/ai/models")
|
||||
async def ai_models_endpoint() -> dict[str, object]:
|
||||
try:
|
||||
models = await list_ollama_models()
|
||||
except OllamaCloudError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=str(exc)) from exc
|
||||
return {
|
||||
"default_model": default_model(),
|
||||
"models": models,
|
||||
}
|
||||
|
||||
|
||||
@app.post("/ai/chat", response_model=AIAnswerResponse)
|
||||
async def ai_chat_endpoint(request: AIChatRequest) -> AIAnswerResponse:
|
||||
model = (request.model or default_model()).strip()
|
||||
if not model:
|
||||
raise HTTPException(status_code=400, detail="Model is required.")
|
||||
if not request.messages:
|
||||
raise HTTPException(status_code=400, detail="At least one message is required.")
|
||||
|
||||
try:
|
||||
messages, sources = await _build_chat_messages_and_sources(request)
|
||||
response = await ollama_chat(model=model, messages=messages, think=request.think)
|
||||
except OllamaCloudError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=str(exc)) from exc
|
||||
|
||||
message = response.get("message") or {}
|
||||
return AIAnswerResponse(
|
||||
model=response.get("model") or model,
|
||||
content=message.get("content") or "",
|
||||
thinking=message.get("thinking"),
|
||||
sources=sources,
|
||||
configured=is_ollama_configured(),
|
||||
)
|
||||
|
||||
|
||||
def _sse(event: str, data: object) -> str:
|
||||
return f"event: {event}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
|
||||
|
||||
|
||||
async def _build_chat_messages_and_sources(request: AIChatRequest) -> tuple[list[dict[str, object]], list[AISource]]:
|
||||
messages = [
|
||||
message.model_dump(exclude_none=True)
|
||||
for message in request.messages
|
||||
if message.content.strip() or message.tool_calls
|
||||
]
|
||||
if not messages:
|
||||
raise OllamaCloudError("At least one message is required.", status_code=400)
|
||||
|
||||
sources: list[AISource] = []
|
||||
if request.use_web_search:
|
||||
latest_user_message = next(
|
||||
(message.content for message in reversed(request.messages) if message.role == "user" and message.content.strip()),
|
||||
"",
|
||||
)
|
||||
if latest_user_message:
|
||||
web_results = await ollama_web_search(latest_user_message, max_results=request.web_result_limit)
|
||||
sources = [
|
||||
AISource(
|
||||
title=result.get("title") or result.get("url") or "Web result",
|
||||
url=result.get("url") or "",
|
||||
source_type="web",
|
||||
content=result.get("content") or "",
|
||||
)
|
||||
for result in web_results
|
||||
if result.get("url")
|
||||
]
|
||||
if sources:
|
||||
context = "\n".join(_source_text(source, index) for index, source in enumerate(sources, start=1))
|
||||
messages.insert(
|
||||
0,
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"Use the following web search context when it is relevant. "
|
||||
"Cite sources inline using bracket numbers such as [1].\n\n"
|
||||
f"{context}"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
return messages, sources
|
||||
|
||||
|
||||
async def _stream_ollama_events(
|
||||
model: str,
|
||||
messages: list[dict[str, object]],
|
||||
think: bool | str | None,
|
||||
sources: list[AISource],
|
||||
) -> AsyncIterator[str]:
|
||||
content = ""
|
||||
thinking = ""
|
||||
yield _sse(
|
||||
"meta",
|
||||
{
|
||||
"model": model,
|
||||
"configured": is_ollama_configured(),
|
||||
"sources": [source.model_dump() for source in sources],
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
async for chunk in ollama_stream_chat(model=model, messages=messages, think=think):
|
||||
message = chunk.get("message") or {}
|
||||
thinking_delta = message.get("thinking") or ""
|
||||
content_delta = message.get("content") or ""
|
||||
|
||||
if thinking_delta:
|
||||
thinking += thinking_delta
|
||||
yield _sse("thinking", {"delta": thinking_delta})
|
||||
|
||||
if content_delta:
|
||||
content += content_delta
|
||||
yield _sse("content", {"delta": content_delta})
|
||||
|
||||
if chunk.get("done"):
|
||||
yield _sse(
|
||||
"done",
|
||||
{
|
||||
"model": chunk.get("model") or model,
|
||||
"content": content,
|
||||
"thinking": thinking,
|
||||
"sources": [source.model_dump() for source in sources],
|
||||
},
|
||||
)
|
||||
return
|
||||
except OllamaCloudError as exc:
|
||||
yield _sse("error", {"detail": str(exc), "status_code": exc.status_code})
|
||||
except Exception as exc:
|
||||
yield _sse("error", {"detail": f"Streaming failed: {exc}", "status_code": 502})
|
||||
|
||||
|
||||
@app.post("/ai/chat/stream")
|
||||
async def ai_chat_stream_endpoint(request: AIChatRequest) -> StreamingResponse:
|
||||
model = (request.model or default_model()).strip()
|
||||
if not model:
|
||||
raise HTTPException(status_code=400, detail="Model is required.")
|
||||
if not is_ollama_configured():
|
||||
raise HTTPException(status_code=503, detail="Ollama Cloud is not configured. Set OLLAMA_API_KEY.")
|
||||
|
||||
try:
|
||||
messages, sources = await _build_chat_messages_and_sources(request)
|
||||
except OllamaCloudError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=str(exc)) from exc
|
||||
|
||||
return StreamingResponse(
|
||||
_stream_ollama_events(model=model, messages=messages, think=request.think, sources=sources),
|
||||
media_type="text/event-stream",
|
||||
headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"},
|
||||
)
|
||||
|
||||
|
||||
def _source_text(source: AISource, index: int) -> str:
|
||||
return (
|
||||
f"[{index}] {source.title}\n"
|
||||
f"Type: {source.source_type}\n"
|
||||
f"URL: {source.url}\n"
|
||||
f"Excerpt: {source.content[:1200]}\n"
|
||||
)
|
||||
|
||||
|
||||
async def _build_ai_search_sources(request: AISearchRequest) -> list[AISource]:
|
||||
local_results = await search(query=request.query, limit=request.local_result_limit, offset=0)
|
||||
sources = [
|
||||
AISource(
|
||||
title=result["title"],
|
||||
url=result["url"],
|
||||
source_type="local",
|
||||
content=result["snippet"],
|
||||
)
|
||||
for result in local_results
|
||||
]
|
||||
|
||||
if request.include_web:
|
||||
web_results = await ollama_web_search(request.query, max_results=request.web_result_limit)
|
||||
sources.extend(
|
||||
AISource(
|
||||
title=result.get("title") or result.get("url") or "Web result",
|
||||
url=result.get("url") or "",
|
||||
source_type="web",
|
||||
content=result.get("content") or "",
|
||||
)
|
||||
for result in web_results
|
||||
if result.get("url")
|
||||
)
|
||||
|
||||
return sources
|
||||
|
||||
|
||||
@app.post("/ai/search", response_model=AIAnswerResponse)
|
||||
async def ai_search_endpoint(request: AISearchRequest) -> AIAnswerResponse:
|
||||
model = (request.model or default_model()).strip()
|
||||
query = request.query.strip()
|
||||
if not model:
|
||||
raise HTTPException(status_code=400, detail="Model is required.")
|
||||
if not query:
|
||||
raise HTTPException(status_code=400, detail="Query is required.")
|
||||
|
||||
try:
|
||||
sources = await _build_ai_search_sources(request)
|
||||
source_context = "\n".join(_source_text(source, index) for index, source in enumerate(sources, start=1))
|
||||
if not source_context:
|
||||
source_context = "No search sources were found for this query."
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are sFetch AI, a precise search assistant. Answer only from the provided sources. "
|
||||
"Write in a neutral, professional tone. Keep the response concise. "
|
||||
"Cite sources inline using bracket numbers such as [1]. "
|
||||
"If the sources are insufficient, say what is missing rather than guessing."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Search query: {query}\n\nSources:\n{source_context}",
|
||||
},
|
||||
]
|
||||
response = await ollama_chat(model=model, messages=messages, think=request.think)
|
||||
except OllamaCloudError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=str(exc)) from exc
|
||||
|
||||
message = response.get("message") or {}
|
||||
return AIAnswerResponse(
|
||||
model=response.get("model") or model,
|
||||
content=message.get("content") or "",
|
||||
thinking=message.get("thinking"),
|
||||
sources=sources,
|
||||
configured=is_ollama_configured(),
|
||||
)
|
||||
|
||||
|
||||
async def _build_ai_search_messages(request: AISearchRequest) -> tuple[list[dict[str, str]], list[AISource]]:
|
||||
sources = await _build_ai_search_sources(request)
|
||||
source_context = "\n".join(_source_text(source, index) for index, source in enumerate(sources, start=1))
|
||||
if not source_context:
|
||||
source_context = "No search sources were found for this query."
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": (
|
||||
"You are sFetch AI, a precise search assistant. Answer only from the provided sources. "
|
||||
"Write in a neutral, useful tone with direct synthesis. "
|
||||
"Cite sources inline using bracket numbers such as [1]. "
|
||||
"If sources are insufficient, say what is missing rather than guessing."
|
||||
),
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Search query: {request.query.strip()}\n\nSources:\n{source_context}",
|
||||
},
|
||||
]
|
||||
return messages, sources
|
||||
|
||||
|
||||
@app.post("/ai/search/stream")
|
||||
async def ai_search_stream_endpoint(request: AISearchRequest) -> StreamingResponse:
|
||||
model = (request.model or default_model()).strip()
|
||||
query = request.query.strip()
|
||||
if not model:
|
||||
raise HTTPException(status_code=400, detail="Model is required.")
|
||||
if not query:
|
||||
raise HTTPException(status_code=400, detail="Query is required.")
|
||||
if not is_ollama_configured():
|
||||
raise HTTPException(status_code=503, detail="Ollama Cloud is not configured. Set OLLAMA_API_KEY.")
|
||||
|
||||
try:
|
||||
messages, sources = await _build_ai_search_messages(request)
|
||||
except OllamaCloudError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=str(exc)) from exc
|
||||
|
||||
return StreamingResponse(
|
||||
_stream_ollama_events(model=model, messages=messages, think=request.think, sources=sources),
|
||||
media_type="text/event-stream",
|
||||
headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user