Spaces:
Runtime error
Runtime error
import gradio as gr | |
def add_text(history, text): | |
history = history + [(text, None)] | |
return history, "" | |
def add_file(history, file): | |
history = history + [((file.name,), None)] | |
return history | |
def bot(history): | |
response = "**That's cool!**" | |
history[-1][1] = response | |
return history | |
""" | |
Alpaca model trained: example (n.b. can upload mine as a HF model to load from?) | |
""" | |
''' | |
from peft import PeftModel | |
from transformers import LLaMATokenizer, LLaMAForCausalLM, GenerationConfig | |
tokenizer = LLaMATokenizer.from_pretrained("chavinlo/alpaca-native") | |
model = LLaMAForCausalLM.from_pretrained( | |
"chavinlo/alpaca-native", | |
load_in_8bit=True, | |
device_map="auto", | |
) | |
''' | |
def generateresponse(history): | |
""" | |
Model definition here: | |
""" | |
''' | |
global model | |
global tokenizer | |
PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. | |
### Instruction: | |
{user} | |
### Response:""" | |
inputs = tokenizer( | |
PROMPT, | |
return_tensors="pt", | |
) | |
input_ids = inputs["input_ids"].cuda() | |
generation_config = GenerationConfig( | |
temperature=0.6, | |
top_p=0.95, | |
repetition_penalty=1.15, | |
) | |
print("Generating...") | |
generation_output = model.generate( | |
input_ids=input_ids, | |
generation_config=generation_config, | |
return_dict_in_generate=True, | |
output_scores=True, | |
max_new_tokens=256, | |
) | |
output = [] | |
for s in generation_output.sequences: | |
outputs.append(tokenizer.decode(s)) | |
print(tokenizer.decode(s)) | |
output = (outputs[0].split('### Response:'))[1] | |
''' | |
user = history[-1][0] | |
response = f"you asked: {user}" | |
history[-1][1] = response | |
print(history) | |
return history | |
theme = gr.themes.Base( | |
primary_hue="indigo", | |
).set( | |
prose_text_size='*text_sm' | |
) | |
with gr.Blocks(title='Claimed', theme=theme) as demo: | |
gr.Markdown(""" | |
### Hey there, genius! π«‘ π«‘ π«‘ | |
Welcome to our demo! We've trained Meta's Llama on almost 200k data entries in the question/answer format. | |
In the future, we are looking to expand our model's capabilities further to assist in a range of IP related tasks. | |
If you are interested in using a more powerful model that we have trained, please get in touch! | |
As far as data is concerned, you have nothing to worry about! We don't store any of your inputs to use for further training, we're not OpenAI π. We'd just like to know if this is something people would be interested in using! | |
Please note that this is for research purposes and shouldn't be used commercially. | |
None of the outputs should be taken as solid legal advice. If you are an inventor looking to patent an invention, always seek the help of a registered patent attorney. | |
""") | |
with gr.Tab("Text Drafter"): | |
gr.Markdown(""" | |
You can use this tool to expand your idea using Claim Language. | |
Example input: A device to help the visually impaired using proprioception. | |
Output: | |
""") | |
text_input = gr.Textbox() | |
text_output = gr.Textbox() | |
text_button = gr.Button("") | |
with gr.Tab("Description Generator"): | |
gr.Markdown(""" | |
You can use this tool to turn a claim into a | |
Example input: A device to help the visually impaired using proprioception. | |
Output: | |
""") | |
with gr.Row(scale=1, min_width=600): | |
text1 = gr.Textbox(label="Input", | |
placeholder='Type in your idea here!') | |
text2 = gr.Textbox(label="Output") | |
with gr.Tab("Knowledge Graph"): | |
gr.Markdown(""" | |
Are you more of a visual type? Use this tool to generate graphical representations of your ideas and how their features interlink. | |
Example input: A device to help the visually impaired using proprioception. | |
Output: | |
""") | |
with gr.Row(scale=1, min_width=600): | |
text1 = gr.Textbox(label="Input", | |
placeholder='Type in your idea here!') | |
text2 = gr.Textbox(label="Output") | |
with gr.Tab("Prosecution Ideator"): | |
gr.Markdown(""" | |
Below is our | |
Example input: A device to help the visually impaired using proprioception. | |
Output: | |
""") | |
with gr.Row(scale=1, min_width=600): | |
text1 = gr.Textbox(label="Input", | |
placeholder='Type in your idea here!') | |
text2 = gr.Textbox(label="Output") | |
with gr.Tab("Claimed Infill"): | |
gr.Markdown(""" | |
Below is our | |
Example input: A device to help the visually impaired using proprioception. | |
Output: | |
""") | |
with gr.Row(scale=1, min_width=600): | |
text1 = gr.Textbox(label="Input", | |
placeholder='Type in your idea here!') | |
text2 = gr.Textbox(label="Output") | |
gr.Markdown(""" | |
# THE CHATBOT | |
Do you want a bit more freedom over the outputs you generate? No worries, you can use a chatbot version of our model below. You can ask it anything by the way, just try to keep it PG. | |
If you're concerned about an output from the model, hit the flag button and we will use that information to improve the model. | |
""") | |
chatbot = gr.Chatbot([], elem_id="Claimed Assistant").style(height=500) | |
with gr.Row(): | |
with gr.Column(scale=0.85): | |
txt = gr.Textbox( | |
show_label=False, | |
placeholder="Enter text and press enter, or upload an image", | |
).style(container=False) | |
with gr.Column(scale=0.15, min_width=0): | |
btn = gr.Button("Submit") | |
txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then( | |
generateresponse, chatbot, chatbot | |
) | |
demo.launch() |