thepolymerguy commited on
Commit
7f2ec6d
1 Parent(s): 3fbe817

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -4
app.py CHANGED
@@ -13,10 +13,83 @@ def bot(history):
13
  history[-1][1] = response
14
  return history
15
 
16
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  gr.Markdown("""
19
- #CLAIMED - A GENERATIVE TOOLKIT FOR PATENT ATTORNEYS
20
 
21
  Hey there, genius!
22
 
@@ -133,10 +206,10 @@ with gr.Blocks() as demo:
133
  placeholder="Enter text and press enter, or upload an image",
134
  ).style(container=False)
135
  with gr.Column(scale=0.15, min_width=0):
136
- btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
137
 
138
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
139
- bot, chatbot, chatbot
140
  )
141
 
142
  demo.launch()
 
13
  history[-1][1] = response
14
  return history
15
 
16
+ """
17
+
18
+ Alpaca model trained: example (n.b. can upload mine as a HF model to load from?)
19
+
20
+ """
21
+ '''
22
+ from peft import PeftModel
23
+ from transformers import LLaMATokenizer, LLaMAForCausalLM, GenerationConfig
24
+
25
+ tokenizer = LLaMATokenizer.from_pretrained("chavinlo/alpaca-native")
26
+
27
+ model = LLaMAForCausalLM.from_pretrained(
28
+ "chavinlo/alpaca-native",
29
+ load_in_8bit=True,
30
+ device_map="auto",
31
+ )
32
+ '''
33
+
34
+
35
+ def generateresponse(history):
36
+ """
37
+ Model definition here:
38
+ """
39
+ '''
40
+ global model
41
+ global tokenizer
42
+
43
+ PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
44
+ ### Instruction:
45
+ {user}
46
+ ### Response:"""
47
+
48
+ inputs = tokenizer(
49
+ PROMPT,
50
+ return_tensors="pt",
51
+ )
52
+ input_ids = inputs["input_ids"].cuda()
53
+
54
+ generation_config = GenerationConfig(
55
+ temperature=0.6,
56
+ top_p=0.95,
57
+ repetition_penalty=1.15,
58
+ )
59
+ print("Generating...")
60
+ generation_output = model.generate(
61
+ input_ids=input_ids,
62
+ generation_config=generation_config,
63
+ return_dict_in_generate=True,
64
+ output_scores=True,
65
+ max_new_tokens=256,
66
+ )
67
+ output = []
68
+ for s in generation_output.sequences:
69
+ outputs.append(tokenizer.decode(s))
70
+ print(tokenizer.decode(s))
71
+
72
+ output = (outputs[0].split('### Response:'))[1]
73
+
74
+ '''
75
+
76
+ user = history[-1][0]
77
+
78
+ response = f"you asked: {user}"
79
+ history[-1][1] = response
80
+ print(history)
81
+ return history
82
+
83
+ theme = gr.themes.Base(
84
+ primary_hue="indigo",
85
+ ).set(
86
+ prose_text_size='*text_sm'
87
+ )
88
+
89
+ with gr.Blocks(title='Claimed', theme=theme) as demo:
90
 
91
  gr.Markdown("""
92
+ ### CLAIMED - A GENERATIVE TOOLKIT FOR PATENT ATTORNEYS
93
 
94
  Hey there, genius!
95
 
 
206
  placeholder="Enter text and press enter, or upload an image",
207
  ).style(container=False)
208
  with gr.Column(scale=0.15, min_width=0):
209
+ btn = gr.Button("Submit")
210
 
211
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
212
+ generateresponse, chatbot, chatbot
213
  )
214
 
215
  demo.launch()