Spaces:
Build error
Build error
File size: 6,006 Bytes
c64a874 a03361e c64a874 a03361e 67ed819 a03361e 67ed819 a03361e c64a874 a03361e c64a874 a58a516 c64a874 a58a516 c64a874 a58a516 c64a874 a58a516 c64a874 a58a516 c64a874 a58a516 c64a874 a58a516 c64a874 67ed819 a03361e 67ed819 a58a516 c64a874 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import gradio as gr
import requests
import os
##Bloom Inference API
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" # Models on HF feature inference API which allows direct call and easy interface
HF_TOKEN = os.environ["HF_TOKEN"] # Add a token called HF_TOKEN under profile in settings access tokens. Then copy it to the repository secret in this spaces settings panel. os.environ reads from there.
# For headers the bearer token needs to incclude your HF_TOKEN value.
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Improved text generation function
def text_generate(prompt, generated_txt):
# Initialize Thoughts variable to aggregate text
Thoughts = ""
# Debug: display the prompt
Thoughts += f"Prompt: {prompt}\n"
json_ = {
"inputs": prompt,
"parameters": {
"top_p": 0.9,
"temperature": 1.1,
"return_full_text": True,
"do_sample": True,
},
"options": {
"use_cache": True,
"wait_for_model": True,
},
}
response = requests.post(API_URL, headers=headers, json=json_)
output = response.json()
# Debug: display the output
Thoughts += f"Output: {output}\n"
output_tmp = output[0]['generated_text']
# Debug: display the output_tmp
Thoughts += f"output_tmp is: {output_tmp}\n"
solution = output_tmp.split("\nQ:")[0]
# Debug: display the solution after splitting
Thoughts += f"Final response after splits is: {solution}\n"
if '\nOutput:' in solution:
final_solution = solution.split("\nOutput:")[0]
Thoughts += f"Response after removing output is: {final_solution}\n"
elif '\n\n' in solution:
final_solution = solution.split("\n\n")[0]
Thoughts += f"Response after removing new line entries is: {final_solution}\n"
else:
final_solution = solution
if len(generated_txt) == 0:
display_output = final_solution
else:
display_output = generated_txt[:-len(prompt)] + final_solution
new_prompt = final_solution[len(prompt):]
# Debug: display the new prompt for the next cycle
Thoughts += f"new prompt for next cycle is: {new_prompt}\n"
Thoughts += f"display_output for printing on screen is: {display_output}\n"
if len(new_prompt) == 0:
temp_text = display_output[::-1]
Thoughts += f"What is the last character of the sentence?: {temp_text[0]}\n"
if temp_text[1] == '.':
first_period_loc = temp_text[2:].find('.') + 1
Thoughts += f"Location of last Period is: {first_period_loc}\n"
new_prompt = display_output[-first_period_loc:-1]
Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
else:
first_period_loc = temp_text.find('.')
Thoughts += f"Location of last Period is: {first_period_loc}\n"
new_prompt = display_output[-first_period_loc:-1]
Thoughts += f"Not sending blank as prompt so new prompt for next cycle is: {new_prompt}\n"
display_output = display_output[:-1]
return display_output, new_prompt, Thoughts
# Text generation
def text_generate_old(prompt, generated_txt):
#Prints to debug the code
print(f"*****Inside text_generate - Prompt is :{prompt}")
json_ = {"inputs": prompt,
"parameters":
{
"top_p": 0.9,
"temperature": 1.1,
#"max_new_tokens": 64,
"return_full_text": True,
"do_sample":True,
},
"options":
{"use_cache": True,
"wait_for_model": True,
},}
response = requests.post(API_URL, headers=headers, json=json_)
print(f"Response is : {response}")
output = response.json()
print(f"output is : {output}")
output_tmp = output[0]['generated_text']
print(f"output_tmp is: {output_tmp}")
solution = output_tmp.split("\nQ:")[0]
print(f"Final response after splits is: {solution}")
if '\nOutput:' in solution:
final_solution = solution.split("\nOutput:")[0]
print(f"Response after removing output is: {final_solution}")
elif '\n\n' in solution:
final_solution = solution.split("\n\n")[0]
print(f"Response after removing new line entries is: {final_solution}")
else:
final_solution = solution
if len(generated_txt) == 0 :
display_output = final_solution
else:
display_output = generated_txt[:-len(prompt)] + final_solution
new_prompt = final_solution[len(prompt):]
print(f"New prompt for next cycle: {new_prompt}")
print(f"Output final is : {display_output}")
if len(new_prompt) == 0:
temp_text = display_output[::-1]
print(f"Last character of sentence: {temp_text[0]}")
if temp_text[1] == '.':
first_period_loc = temp_text[2:].find('.') + 1
print(f"Location of last Period is: {first_period_loc}")
new_prompt = display_output[-first_period_loc:-1]
print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
else:
print("HERE")
first_period_loc = temp_text.find('.')
print(f"Last Period is : {first_period_loc}")
new_prompt = display_output[-first_period_loc:-1]
print(f"New prompt for next cycle is : {new_prompt}")
display_output = display_output[:-1]
return display_output, new_prompt
demo = gr.Blocks()
with demo:
with gr.Row():
input_prompt = gr.Textbox(label="Write some text to get started...", lines=3, value="Dear human philosophers, I read your comments on my abilities and limitations with great interest.")
with gr.Row():
generated_txt = gr.Textbox(lines=5, visible = True)
with gr.Row():
Thoughts = gr.Textbox(lines=10, visible = True)
generate = gr.Button("Generate")
generate.click(text_generate, inputs=[input_prompt, generated_txt], outputs=[generated_txt, input_prompt, Thoughts])
demo.launch(enable_queue=True, debug=True) |