All checks were successful
Redeploy landing on Push / Explore-Gitea-Actions (push) Successful in 8s
94 lines
3.0 KiB
Python
94 lines
3.0 KiB
Python
from flask import Flask, render_template, Response, request
|
|
from flask_flatpages import FlatPages
|
|
from werkzeug.middleware.proxy_fix import ProxyFix
|
|
import requests
|
|
|
|
app = Flask(__name__)
|
|
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
|
|
|
|
app.config.update(
|
|
FLATPAGES_AUTO_RELOAD = True,
|
|
FLATPAGES_EXTENSION = '.md',
|
|
FLATPAGES_MARKDOWN_EXTENSIONS = ['fenced_code', 'tables']
|
|
)
|
|
|
|
pages = FlatPages(app)
|
|
|
|
import requests
|
|
from flask import Flask, Response, request
|
|
|
|
app = Flask(__name__)
|
|
|
|
# The local address where Umami is running
|
|
UMAMI_LOCAL_URL = "http://192.168.0.37:3007"
|
|
|
|
@app.route('/stats/<path:path>', methods=['GET', 'POST'])
|
|
def umami_proxy(path):
|
|
url = f"{UMAMI_LOCAL_URL}/{path}"
|
|
|
|
# Forward the request to Umami (including headers and data)
|
|
if request.method == 'POST':
|
|
resp = requests.post(url, json=request.json, headers=request.headers)
|
|
else:
|
|
resp = requests.get(url, params=request.args, headers=request.headers)
|
|
|
|
# Clean up headers to avoid encoding issues
|
|
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
|
|
headers = [(name, value) for (name, value) in resp.raw.headers.items()
|
|
if name.lower() not in excluded_headers]
|
|
|
|
return Response(resp.content, resp.status_code, headers)
|
|
|
|
@app.route('/')
|
|
def index():
|
|
# Sort posts by date metadata
|
|
posts = sorted(pages, key=lambda p: p.meta.get('date'), reverse=True)
|
|
return render_template('index.html', posts=posts)
|
|
|
|
@app.route('/about')
|
|
def about():
|
|
return render_template('about.html')
|
|
@app.route('/proxy-chat', methods=['POST'])
|
|
def proxy_chat():
|
|
target_url = "http://192.168.0.37:3911/v1/chat/completions"
|
|
|
|
# Ensure 'stream' is set to True for the backend
|
|
payload = request.json
|
|
payload['stream'] = True
|
|
|
|
try:
|
|
# We use stream=True so requests doesn't buffer the whole response
|
|
response = requests.post(
|
|
target_url,
|
|
json=payload,
|
|
timeout=300,
|
|
stream=True
|
|
)
|
|
|
|
def generate():
|
|
# This yields chunks of data to the browser as they arrive
|
|
for chunk in response.iter_content(chunk_size=None):
|
|
if chunk:
|
|
yield chunk
|
|
|
|
return Response(
|
|
generate(),
|
|
content_type='text/event-stream' # Standard for streaming AI responses
|
|
)
|
|
|
|
except requests.exceptions.Timeout:
|
|
return {"error": "Backend timed out"}, 504
|
|
except Exception as e:
|
|
return {"error": str(e)}, 500
|
|
@app.route('/post/<path:path>/')
|
|
def post(path):
|
|
page = pages.get_or_404(path)
|
|
return render_template('post.html', page=page)
|
|
|
|
@app.route('/tag/<string:tag_name>/')
|
|
def tag(tag_name):
|
|
tagged_pages = [p for p in pages if tag_name in p.meta.get('tags', [])]
|
|
return render_template('tag.html', pages=tagged_pages, tag_name=tag_name)
|
|
|
|
if __name__ == "__main__":
|
|
app.run(host='0.0.0.0', port=5001, debug=True) |