bstraehle commited on
Commit
22a1dd5
·
1 Parent(s): aa69ed9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -12
app.py CHANGED
@@ -42,18 +42,21 @@ def wandb_log(prompt, completion):
42
  wandb.finish()
43
 
44
  def invoke(prompt):
45
- #completion = generation_model.predict(prompt = prompt,
46
- # max_output_tokens = config["max_output_tokens"],
47
- # temperature = config["temperature"],
48
- # top_k = config["top_k"],
49
- # top_p = config["top_p"],
50
- # ).text
51
- completion = generation_model.generate_content(prompt, generation_config = {
52
- "max_output_tokens": config["max_output_tokens"],
53
- "temperature": config["temperature"],
54
- "top_k": config["top_k"],
55
- "top_p": config["top_p"],
56
- }).text
 
 
 
57
  wandb_log(prompt, completion)
58
  return completion
59
  #return "🛑 Execution is commented out. To view the source code see https://huggingface.co/spaces/bstraehle/google-vertex-ai-llm/tree/main."
 
42
  wandb.finish()
43
 
44
  def invoke(prompt):
45
+ try:
46
+ #completion = generation_model.predict(prompt = prompt,
47
+ # max_output_tokens = config["max_output_tokens"],
48
+ # temperature = config["temperature"],
49
+ # top_k = config["top_k"],
50
+ # top_p = config["top_p"],
51
+ # ).text
52
+ completion = generation_model.generate_content(prompt, generation_config = {
53
+ "max_output_tokens": config["max_output_tokens"],
54
+ "temperature": config["temperature"],
55
+ "top_k": config["top_k"],
56
+ "top_p": config["top_p"],
57
+ }).text
58
+ except Exception as e:
59
+ raise gr.Error(e)
60
  wandb_log(prompt, completion)
61
  return completion
62
  #return "🛑 Execution is commented out. To view the source code see https://huggingface.co/spaces/bstraehle/google-vertex-ai-llm/tree/main."