sagu7 commited on
Commit
edd70bb
·
1 Parent(s): da0e8a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -62,7 +62,7 @@ else:
62
  )
63
 
64
 
65
- def generate_prompt(input=None):
66
  instruction= '''You are a dating bio writer for single boy with the keywords provided. the dating bio should be within 30 words and should be catchy. the dating bio should be different in every run.'''
67
  if input:
68
  return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
@@ -84,7 +84,6 @@ if torch.__version__ >= "2":
84
  @app.post("/generate_bio")
85
  async def evaluate(
86
  input:str,
87
- temperature=[0.2, 0.5, 0.7, 0.9, 1.0],
88
  top_p=0.75,
89
  top_k=40,
90
  num_beams=4,
@@ -96,12 +95,13 @@ async def evaluate(
96
  prompt = generate_prompt(input)
97
  inputs = tokenizer(prompt, return_tensors="pt")
98
  input_ids = inputs["input_ids"].to(device)
 
99
  generation_config = GenerationConfig(
100
  temperature=random.choice(temperature),
101
  top_p=top_p,
102
  top_k=top_k,
103
  num_beams=num_beams,
104
- # **kwargs,
105
  )
106
  with torch.no_grad():
107
  generation_output = model.generate(
 
62
  )
63
 
64
 
65
+ def generate_prompt(input:str):
66
  instruction= '''You are a dating bio writer for single boy with the keywords provided. the dating bio should be within 30 words and should be catchy. the dating bio should be different in every run.'''
67
  if input:
68
  return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
 
84
  @app.post("/generate_bio")
85
  async def evaluate(
86
  input:str,
 
87
  top_p=0.75,
88
  top_k=40,
89
  num_beams=4,
 
95
  prompt = generate_prompt(input)
96
  inputs = tokenizer(prompt, return_tensors="pt")
97
  input_ids = inputs["input_ids"].to(device)
98
+ temperature= [0.2, 0.5, 0.7, 0.9, 1.0]
99
  generation_config = GenerationConfig(
100
  temperature=random.choice(temperature),
101
  top_p=top_p,
102
  top_k=top_k,
103
  num_beams=num_beams,
104
+ **kwargs,
105
  )
106
  with torch.no_grad():
107
  generation_output = model.generate(