ranamhamoud commited on
Commit
b5bcfdd
·
verified ·
1 Parent(s): 3856850

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -14
app.py CHANGED
@@ -13,12 +13,11 @@ DEFAULT_MAX_NEW_TOKENS = 1024
13
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
14
 
15
  DESCRIPTION = """\
16
- # Llama-2 7B Chat
17
- This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
18
- 🔎 For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
19
- 🔨 Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
20
  """
21
 
 
22
  LICENSE = """
23
  <p/>
24
  ---
@@ -50,9 +49,9 @@ def generate(
50
  message: str,
51
  chat_history: list[tuple[str, str]],
52
  max_new_tokens: int = 1024,
53
- temperature: float = 0.6,
54
- top_p: float = 0.9,
55
- top_k: int = 50,
56
  repetition_penalty: float = 1.2,
57
  ) -> Iterator[str]:
58
  conversation = []
@@ -72,9 +71,9 @@ def generate(
72
  streamer=streamer,
73
  max_new_tokens=max_new_tokens,
74
  do_sample=True,
75
- top_p=top_p,
76
- top_k=top_k,
77
- temperature=temperature,
78
  num_beams=1,
79
  repetition_penalty=repetition_penalty,
80
  )
@@ -91,11 +90,7 @@ chat_interface = gr.ChatInterface(
91
  fn=generate,
92
  stop_btn=None,
93
  examples=[
94
- ["Hello there! How are you doing?"],
95
  ["Can you explain briefly to me what is the Python programming language?"],
96
- ["Explain the plot of Cinderella in a sentence."],
97
- ["How many hours does it take a man to eat a Helicopter?"],
98
- ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
99
  ],
100
  )
101
 
 
13
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
14
 
15
  DESCRIPTION = """\
16
+ # Storytell AI
17
+ Welcome to the Storytell AI space, crafted with care by Ranam & George. Dive into the world of educational storytelling with our [Storytell](https://huggingface.co/ranamhamoud/storytell) model. This iteration of the Llama 2 model with 7 billion parameters is fine-tuned to generate educational stories that engage and educate. Enjoy a journey of discovery and creativity—your storytelling lesson begins here!
 
 
18
  """
19
 
20
+
21
  LICENSE = """
22
  <p/>
23
  ---
 
49
  message: str,
50
  chat_history: list[tuple[str, str]],
51
  max_new_tokens: int = 1024,
52
+ # temperature: float = 0.6,
53
+ # top_p: float = 0.9,
54
+ # top_k: int = 50,
55
  repetition_penalty: float = 1.2,
56
  ) -> Iterator[str]:
57
  conversation = []
 
71
  streamer=streamer,
72
  max_new_tokens=max_new_tokens,
73
  do_sample=True,
74
+ # top_p=top_p,
75
+ # top_k=top_k,
76
+ # temperature=temperature,
77
  num_beams=1,
78
  repetition_penalty=repetition_penalty,
79
  )
 
90
  fn=generate,
91
  stop_btn=None,
92
  examples=[
 
93
  ["Can you explain briefly to me what is the Python programming language?"],
 
 
 
94
  ],
95
  )
96