wormcode commited on
Commit
22ff099
·
1 Parent(s): e1c98da

add DebugInfo

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -33,6 +33,8 @@ def main(
33
  base_model
34
  ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
35
 
 
 
36
  prompter = Prompter(prompt_template)
37
  tokenizer = LlamaTokenizer.from_pretrained(base_model)
38
  if device == "cuda":
@@ -91,6 +93,8 @@ def main(
91
  max_new_tokens=256,
92
  **kwargs,
93
  ):
 
 
94
  prompt = prompter.generate_prompt(instruction, input)
95
  inputs = tokenizer(prompt, return_tensors="pt")
96
  input_ids = inputs["input_ids"].to(device)
@@ -111,7 +115,8 @@ def main(
111
  )
112
  s = generation_output.sequences[0]
113
  output = tokenizer.decode(s)
114
- return prompter.get_response(output)
 
115
 
116
  gr.Interface(
117
  fn=evaluate,
@@ -143,6 +148,11 @@ def main(
143
  lines=5,
144
  label="Output",
145
  )
 
 
 
 
 
146
  ],
147
  title="🦙🌲 Alpaca-LoRA",
148
  description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).", # noqa: E501
 
33
  base_model
34
  ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
35
 
36
+ DebugInfo=[] #this is mainly for debug 2023.08.25
37
+
38
  prompter = Prompter(prompt_template)
39
  tokenizer = LlamaTokenizer.from_pretrained(base_model)
40
  if device == "cuda":
 
93
  max_new_tokens=256,
94
  **kwargs,
95
  ):
96
+ DebugInfo.append("1.Enter in evaluate.")#TBD
97
+
98
  prompt = prompter.generate_prompt(instruction, input)
99
  inputs = tokenizer(prompt, return_tensors="pt")
100
  input_ids = inputs["input_ids"].to(device)
 
115
  )
116
  s = generation_output.sequences[0]
117
  output = tokenizer.decode(s)
118
+ DebugInfo.append("2.Generate out decode completed.")#TBD
119
+ return prompter.get_response(output),DebugInfo
120
 
121
  gr.Interface(
122
  fn=evaluate,
 
148
  lines=5,
149
  label="Output",
150
  )
151
+ ,
152
+ gr.inputs.Textbox(
153
+ lines=5,
154
+ label="DebugInfo",
155
+ )
156
  ],
157
  title="🦙🌲 Alpaca-LoRA",
158
  description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).", # noqa: E501