bstraehle commited on
Commit
241921c
·
1 Parent(s): eeaec60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -10,8 +10,8 @@ wandb_api_key = os.environ["WANDB_API_KEY"]
10
 
11
  config = {
12
  "max_output_tokens": 800,
13
- "model": "text-bison@001",
14
- #"model": "gemini-pro",
15
  "temperature": 0.1,
16
  "top_k": 40,
17
  "top_p": 1.0,
@@ -34,27 +34,27 @@ vertexai.init(project = project,
34
  credentials = credentials
35
  )
36
 
37
- from vertexai.language_models import TextGenerationModel
38
- generation_model = TextGenerationModel.from_pretrained(config.model)
39
- #from vertexai.preview.generative_models import GenerativeModel
40
- #generation_model = GenerativeModel(config.model)
41
 
42
  def invoke(prompt):
43
- completion = generation_model.predict(prompt = prompt,
44
- max_output_tokens = config.max_output_tokens,
45
- temperature = config.temperature,
46
- top_k = config.top_k,
47
- top_p = config.top_p,
48
- ).text
49
  #completion = generation_model.generate_content(prompt, generation_config = {
50
  # "max_output_tokens": config.max_output_tokens,
51
  # "temperature": config.temperature,
52
  # "top_k": config.top_k,
53
  # "top_p": config.top_p,
54
  # }).text
55
- wandb.log({"prompt": prompt, "completion": completion})
56
- return completion
57
- #return "Execution is commented out, to view the source code see https://huggingface.co/spaces/bstraehle/google-vertex-ai-llm/tree/main."
58
 
59
  description = """<a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://cloud.google.com/vertex-ai?hl=en/'>Google Vertex AI</a> API
60
  with gemini-pro foundation model. Model performance evaluation via <a href='https://wandb.ai/bstraehle'>Weights & Biases</a>."""
 
10
 
11
  config = {
12
  "max_output_tokens": 800,
13
+ #"model": "text-bison@001",
14
+ "model": "gemini-pro",
15
  "temperature": 0.1,
16
  "top_k": 40,
17
  "top_p": 1.0,
 
34
  credentials = credentials
35
  )
36
 
37
+ #from vertexai.language_models import TextGenerationModel
38
+ #generation_model = TextGenerationModel.from_pretrained(config.model)
39
+ from vertexai.preview.generative_models import GenerativeModel
40
+ generation_model = GenerativeModel(config.model)
41
 
42
  def invoke(prompt):
43
+ #completion = generation_model.predict(prompt = prompt,
44
+ # max_output_tokens = config.max_output_tokens,
45
+ # temperature = config.temperature,
46
+ # top_k = config.top_k,
47
+ # top_p = config.top_p,
48
+ # ).text
49
  #completion = generation_model.generate_content(prompt, generation_config = {
50
  # "max_output_tokens": config.max_output_tokens,
51
  # "temperature": config.temperature,
52
  # "top_k": config.top_k,
53
  # "top_p": config.top_p,
54
  # }).text
55
+ #wandb.log({"prompt": prompt, "completion": completion})
56
+ #return completion
57
+ return "🛑 Execution is commented out, to view the source code see https://huggingface.co/spaces/bstraehle/google-vertex-ai-llm/tree/main."
58
 
59
  description = """<a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://cloud.google.com/vertex-ai?hl=en/'>Google Vertex AI</a> API
60
  with gemini-pro foundation model. Model performance evaluation via <a href='https://wandb.ai/bstraehle'>Weights & Biases</a>."""