miaohaiyuan commited on
Commit
2b7833d
·
1 Parent(s): d0454ef

add llama3 model

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -4,9 +4,9 @@ import streamlit as st
4
  from typing import Generator
5
  from groq import Groq
6
 
7
- _ = load_dotenv(find_dotenv())
8
  st.set_page_config(page_icon="💬", layout="wide", page_title="Groq Chat Bot...")
9
 
 
10
 
11
  def icon(emoji: str):
12
  """Shows an emoji as a Notion-style page icon."""
@@ -40,6 +40,7 @@ models = {
40
  },
41
  "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
42
  "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
 
43
  }
44
 
45
  # Layout for model selection and max_tokens slider
 
4
  from typing import Generator
5
  from groq import Groq
6
 
 
7
  st.set_page_config(page_icon="💬", layout="wide", page_title="Groq Chat Bot...")
8
 
9
+ _ = load_dotenv(find_dotenv())
10
 
11
  def icon(emoji: str):
12
  """Shows an emoji as a Notion-style page icon."""
 
40
  },
41
  "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
42
  "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
43
+ "llama3-70b-8192": {"name": "LLaMA3-70b-chat", "tokens": 8192, "developer": "Meta"},
44
  }
45
 
46
  # Layout for model selection and max_tokens slider