jchen8000 commited on
Commit
7190d80
·
verified ·
1 Parent(s): fc616b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -0
app.py CHANGED
@@ -108,6 +108,17 @@ def generate_response(query, history, model, temperature, max_tokens, top_p, see
108
  return response
109
 
110
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  additional_inputs = [
113
  gr.Dropdown(choices=["llama-3.3-70b-versatile", "llama-3.1-8b-instant", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma2-9b-it"], value="gemma2-9b-it", label="Model"),
 
108
  return response
109
 
110
 
111
+ # gr.Markdown("""
112
+ # ### 1. Attention Is All You Need (Vaswani et al., 2017)
113
+ # This groundbreaking paper introduced the **Transformer** architecture. It revolutionized natural language processing by enabling parallelization and significantly improving performance on tasks like translation, leading to models like *BERT* and *GPT*.
114
+
115
+ # ### 2. Generative Adversarial Nets (Goodfellow et al., 2014)
116
+ # This paper proposed **GANs**, a novel framework for generative modeling using two neural networks—a generator and a discriminator—that compete in a zero-sum game. 
117
+
118
+ # ### 3. Parameter-Efficient Transfer Learning for NLP (Houlsby et al., 2019)
119
+ # This paper introduces **adapter modules**, a method for fine-tuning large pre-trained language models with significantly fewer parameters. 
120
+ # """)
121
+
122
 
123
  additional_inputs = [
124
  gr.Dropdown(choices=["llama-3.3-70b-versatile", "llama-3.1-8b-instant", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma2-9b-it"], value="gemma2-9b-it", label="Model"),