bstraehle commited on
Commit
563878a
·
1 Parent(s): 0a17b5c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -19,7 +19,6 @@ config = {
19
 
20
  wandb.login(key = wandb_api_key)
21
  wandb.init(project = "vertex-ai-llm", config = config)
22
- config = wandb.config
23
 
24
  credentials = json.loads(credentials)
25
 
@@ -35,22 +34,22 @@ vertexai.init(project = project,
35
  )
36
 
37
  #from vertexai.language_models import TextGenerationModel
38
- #generation_model = TextGenerationModel.from_pretrained(config.model)
39
  from vertexai.preview.generative_models import GenerativeModel
40
- generation_model = GenerativeModel(config.model)
41
 
42
  def invoke(prompt):
43
  #completion = generation_model.predict(prompt = prompt,
44
- # max_output_tokens = config.max_output_tokens,
45
- # temperature = config.temperature,
46
- # top_k = config.top_k,
47
- # top_p = config.top_p,
48
  # ).text
49
  #completion = generation_model.generate_content(prompt, generation_config = {
50
- # "max_output_tokens": config.max_output_tokens,
51
- # "temperature": config.temperature,
52
- # "top_k": config.top_k,
53
- # "top_p": config.top_p,
54
  # }).text
55
  #wandb.log({"prompt": prompt, "completion": completion})
56
  #return completion
 
19
 
20
  wandb.login(key = wandb_api_key)
21
  wandb.init(project = "vertex-ai-llm", config = config)
 
22
 
23
  credentials = json.loads(credentials)
24
 
 
34
  )
35
 
36
  #from vertexai.language_models import TextGenerationModel
37
+ #generation_model = TextGenerationModel.from_pretrained(config["model"])
38
  from vertexai.preview.generative_models import GenerativeModel
39
+ generation_model = GenerativeModel(config["model"])
40
 
41
  def invoke(prompt):
42
  #completion = generation_model.predict(prompt = prompt,
43
+ # max_output_tokens = config["max_output_tokens"],
44
+ # temperature = config["temperature"],
45
+ # top_k = config["top_k"],
46
+ # top_p = config["top_p"],
47
  # ).text
48
  #completion = generation_model.generate_content(prompt, generation_config = {
49
+ # "max_output_tokens": config["max_output_tokens"],
50
+ # "temperature": config["temperature"],
51
+ # "top_k": config["top_k"],
52
+ # "top_p": config["top_p"],
53
  # }).text
54
  #wandb.log({"prompt": prompt, "completion": completion})
55
  #return completion