from flask import Flask, render_template, Response, request from flask_flatpages import FlatPages from werkzeug.middleware.proxy_fix import ProxyFix import requests app = Flask(__name__) app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1) app.config.update( FLATPAGES_AUTO_RELOAD = True, FLATPAGES_EXTENSION = '.md', FLATPAGES_MARKDOWN_EXTENSIONS = ['fenced_code', 'tables'] ) pages = FlatPages(app) @app.route('/') def index(): # Sort posts by date metadata posts = sorted(pages, key=lambda p: p.meta.get('date'), reverse=True) return render_template('index.html', posts=posts) @app.route('/about') def about(): return render_template('about.html') @app.route('/proxy-chat', methods=['POST']) def proxy_chat(): target_url = "http://192.168.0.37:5002/v1/chat/completions" # Ensure 'stream' is set to True for the backend payload = request.json payload['stream'] = True try: # We use stream=True so requests doesn't buffer the whole response response = requests.post( target_url, json=payload, timeout=300, stream=True ) def generate(): # This yields chunks of data to the browser as they arrive for chunk in response.iter_content(chunk_size=None): if chunk: yield chunk return Response( generate(), content_type='text/event-stream' # Standard for streaming AI responses ) except requests.exceptions.Timeout: return {"error": "Backend timed out"}, 504 except Exception as e: return {"error": str(e)}, 500 @app.route('/post//') def post(path): page = pages.get_or_404(path) return render_template('post.html', page=page) @app.route('/tag//') def tag(tag_name): tagged_pages = [p for p in pages if tag_name in p.meta.get('tags', [])] return render_template('tag.html', pages=tagged_pages, tag_name=tag_name) if __name__ == "__main__": app.run(host='0.0.0.0', port=5001, debug=True)