import gradio as gr import requests, json public_ip = '71.202.66.108' model = 'llama3.1:latest' #You can replace the model name if needed context = [] import gradio as gr # ollama_serve = f"http://{mac_pro_ip}:11434/api/generate" ollama_serve = f"http://{public_ip}:11434/api/generate" #Call Ollama API def generate(prompt, context, top_k, top_p, temp): r = requests.post(ollama_serve, json={ 'model': model, 'prompt': prompt, 'context': context, 'options':{ 'top_k': top_k, 'temperature':top_p, 'top_p': temp } }, stream=True) r.raise_for_status() response = "" for line in r.iter_lines(): body = json.loads(line) response_part = body.get('response', '') print(response_part) if 'error' in body: raise Exception(body['error']) response += response_part if body.get('done', False): context = body.get('context', []) return response, context def chat(input, chat_history, top_k, top_p, temp): chat_history = chat_history or [] global context output, context = generate(input, context, top_k, top_p, temp) chat_history.append((input, output)) return chat_history, chat_history #the first history in return history, history is meant to update the #chatbot widget, and the second history is meant to update the state #(which is used to maintain conversation history across interactions) #########################Gradio Code########################## block = gr.Blocks() with block: gr.Markdown("""