vsrinivas commited on
Commit
9dead07
·
verified ·
1 Parent(s): e181201

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -69
app.py CHANGED
@@ -15,23 +15,7 @@ model = AutoModelForCausalLM.from_pretrained(
15
  checkpoint, device_map="auto", offload_folder="offload", trust_remote_code=True)
16
 
17
  # tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, torch_dtype="auto")
18
- tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
19
-
20
- # model = AutoModelForCausalLM.from_pretrained(
21
- # checkpoint, device_map="auto",
22
- # # offload_folder="off_load",
23
- # trust_remote_code=True,
24
- # # torch_dtype="auto",
25
- # )
26
- # tokenizer = AutoTokenizer.from_pretrained(checkpoint,
27
- # trust_remote_code=True,
28
- # torch_dtype="auto",
29
- # )
30
-
31
- # model = "tiiuae/FalconLite2"
32
- # tokenizer = AutoTokenizer.from_pretrained(model,
33
- # torch_dtype="auto"
34
- # )
35
 
36
  pipeline = transformers.pipeline(
37
  "text-generation",
@@ -42,58 +26,6 @@ pipeline = transformers.pipeline(
42
  device_map="auto",
43
  )
44
 
45
- # pipeline = transformers.pipeline(
46
- # "text-generation",
47
- # model=model,
48
- # tokenizer=tokenizer,
49
- # # use_safetensors=True,
50
- # # torch_dtype=torch.bfloat16,
51
- # trust_remote_code=True,
52
- # device_map="auto",
53
- # offload_folder="off_load",
54
- # # offload_state_dict = True,
55
- # )
56
-
57
- # def format_chat_prompt(message, chat_history):
58
- # prompt = ""
59
- # for turn in chat_history:
60
- # user_message, bot_message = turn
61
- # prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
62
- # prompt = f"{prompt}\nUser: {message}\nAssistant:"
63
- # return prompt
64
-
65
- # def respond(message, chat_history):
66
- # formatted_prompt = format_chat_prompt(message, chat_history)
67
- # # print(formatted_prompt)
68
- # bot_message = generate_seqs(prompt = formatted_prompt,
69
- # max_new_tokens=1024,
70
- # stop_sequence=["\nUser:", "<|endoftext|>"]).split('Assistant: ')[-1]
71
-
72
- # chat_history.append((message, bot_message))
73
- # return "", chat_history
74
-
75
- # def generate_seqs(prompt, max_new_tokens=None, stop_sequence=None):
76
- # output = pipeline(prompt,
77
- # max_length=200,
78
- # max_new_tokens = max_new_tokens,
79
- # stop_sequence = stop_sequence,
80
- # do_sample=True,
81
- # top_k=10,
82
- # num_return_sequences=1,
83
- # eos_token_id=tokenizer.eos_token_id)
84
- # return output[0]['generated_text']
85
-
86
- # with gr.Blocks() as demo:
87
- # chatbot = gr.Chatbot(height=240) #just to fit the notebook
88
- # msg = gr.Textbox(label="Prompt")
89
- # btn = gr.Button("Submit")
90
- # clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
91
-
92
- # btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
93
- # msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit
94
- # demo.launch()
95
-
96
-
97
  def format_chat_prompt(message, chat_history, instruction):
98
  prompt = f"System:{instruction}"
99
  for turn in chat_history:
 
15
  checkpoint, device_map="auto", offload_folder="offload", trust_remote_code=True)
16
 
17
  # tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, torch_dtype="auto")
18
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True) )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  pipeline = transformers.pipeline(
21
  "text-generation",
 
26
  device_map="auto",
27
  )
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  def format_chat_prompt(message, chat_history, instruction):
30
  prompt = f"System:{instruction}"
31
  for turn in chat_history: