WICKED4950 commited on
Commit
61bd718
·
verified ·
1 Parent(s): 05c4dc9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -9,7 +9,6 @@ tf.keras.mixed_precision.set_global_policy(policy)
9
  strategy = tf.distribute.MultiWorkerMirroredStrategy()
10
 
11
  login(os.environ.get("hf_token"))
12
-
13
  name = "WICKED4950/GPT2mini-InstEsther0.3eV3.2"
14
  tokenizer = AutoTokenizer.from_pretrained(name)
15
  tokenizer.pad_token = tokenizer.eos_token
@@ -17,7 +16,7 @@ with strategy.scope():
17
  model = TFAutoModelForCausalLM.from_pretrained(name)
18
 
19
  def raw_pred(input, model, tokenizer, max_length=1024, temperature=0.2):
20
- input_ids = tokenizer.encode(input.capitalize(), return_tensors='tf')
21
 
22
  # Initialize variables
23
  generated_ids = input_ids
@@ -61,8 +60,8 @@ def respond(message, history):
61
  give_mod = ""
62
  history = history[-3:]
63
  for chunk in history:
64
- give_mod = give_mod + "<|SOH|>" + chunk[0] + "<|SOB|>" + chunk[1]
65
- give_mod = give_mod + "<|SOH|>" + message + "<|SOB|>"
66
  print(give_mod)
67
  response = ""
68
  for token in raw_pred(give_mod, model, tokenizer):
 
9
  strategy = tf.distribute.MultiWorkerMirroredStrategy()
10
 
11
  login(os.environ.get("hf_token"))
 
12
  name = "WICKED4950/GPT2mini-InstEsther0.3eV3.2"
13
  tokenizer = AutoTokenizer.from_pretrained(name)
14
  tokenizer.pad_token = tokenizer.eos_token
 
16
  model = TFAutoModelForCausalLM.from_pretrained(name)
17
 
18
  def raw_pred(input, model, tokenizer, max_length=1024, temperature=0.2):
19
+ input_ids = tokenizer.encode(input, return_tensors='tf')
20
 
21
  # Initialize variables
22
  generated_ids = input_ids
 
60
  give_mod = ""
61
  history = history[-3:]
62
  for chunk in history:
63
+ give_mod = give_mod + "<|SOH|>" + chunk[0].capitalize() + "<|SOB|>" + chunk[1]
64
+ give_mod = give_mod + "<|SOH|>" + message.capitalize() + "<|SOB|>"
65
  print(give_mod)
66
  response = ""
67
  for token in raw_pred(give_mod, model, tokenizer):