Spaces:
Paused
Paused
| import gradio as gr | |
| import requests | |
| import os | |
| # Hugging Face API URL and token for the model | |
| API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoogle%2Fbigbird-pegasus-large-pubmed%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END --> | |
| HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY") | |
| # Define a function to send user input to the model | |
| def get_bot_response(user_input): | |
| headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"} | |
| response = requests.post(API_URL, headers=headers, json={"inputs": user_input}) | |
| # Debugging: print status and response | |
| print("Status Code:", response.status_code) | |
| print("Response:", response.text) | |
| if response.status_code == 200: | |
| result = response.json() | |
| bot_response = result[0].get("generated_text", "Sorry, I couldn't generate a response.") | |
| else: | |
| bot_response = "Sorry, the model is currently unavailable." | |
| return bot_response | |
| # Set up Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Medical Consultation Chatbot") | |
| user_input = gr.Textbox(label="Enter your question:") | |
| output = gr.Textbox(label="Bot Response") | |
| # On submit, call the get_bot_response function | |
| user_input.submit(get_bot_response, user_input, output) | |
| demo.launch() | |