Private
Public Access
1
0

eventstream and longer timeout
All checks were successful
Redeploy landing on Push / Explore-Gitea-Actions (push) Successful in 7s

This commit is contained in:
2025-12-22 21:38:12 +01:00
parent daaaa9c545
commit 3dfa3da397
2 changed files with 16 additions and 18 deletions

26
app.py
View File

@@ -27,32 +27,34 @@ def about():
def proxy_chat():
target_url = "http://192.168.0.37:5002/v1/chat/completions"
# Ensure 'stream' is set to True for the backend
payload = request.json
payload['stream'] = True
try:
# We use stream=True so we don't load the whole response into RAM at once
# We use stream=True so requests doesn't buffer the whole response
response = requests.post(
target_url,
json=request.json,
timeout=300,
json=payload,
timeout=300,
stream=True
)
# Generator to yield chunks of data as they arrive
def generate():
for chunk in response.iter_content(chunk_size=1024):
yield chunk
# This yields chunks of data to the browser as they arrive
for chunk in response.iter_content(chunk_size=None):
if chunk:
yield chunk
return Response(
generate(),
status=response.status_code,
content_type=response.headers.get('content-type', 'application/json')
content_type='text/event-stream' # Standard for streaming AI responses
)
except requests.exceptions.Timeout:
return {"error": "The backend LLM timed out."}, 504
return {"error": "Backend timed out"}, 504
except Exception as e:
app.logger.error(f"Proxy error: {str(e)}")
return {"error": "Internal server error"}, 500
return {"error": str(e)}, 500
@app.route('/post/<path:path>/')
def post(path):
page = pages.get_or_404(path)