youzarsiph commited on
Commit
2981ba9
·
1 Parent(s): ed25b0e
Files changed (4) hide show
  1. .gitignore +132 -0
  2. README.md +3 -3
  3. app.py +36 -31
  4. requirements.txt +1 -1
.gitignore ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Static files css / js
2
+ node_modules/
3
+
4
+ # Byte-compiled / optimized / DLL files
5
+ __pycache__/
6
+ *.py[cod]
7
+ *$py.class
8
+
9
+ # C extensions
10
+ *.so
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ pip-wheel-metadata/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98
+ __pypackages__/
99
+
100
+ # Celery stuff
101
+ celerybeat-schedule
102
+ celerybeat.pid
103
+
104
+ # SageMath parsed files
105
+ *.sage.py
106
+
107
+ # Environments
108
+ .env
109
+ .venv
110
+ env/
111
+ venv/
112
+ ENV/
113
+ env.bak/
114
+ venv.bak/
115
+
116
+ # Spyder project settings
117
+ .spyderproject
118
+ .spyproject
119
+
120
+ # Rope project settings
121
+ .ropeproject
122
+
123
+ # mkdocs documentation
124
+ /site
125
+
126
+ # mypy
127
+ .mypy_cache/
128
+ .dmypy.json
129
+ dmypy.json
130
+
131
+ # Pyre type checker
132
+ .pyre/
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Mistral Small Instruct 2409 Demo
3
  emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
  app_file: app.py
@@ -11,4 +11,4 @@ license: mit
11
  short_description: A demo space for mistralai/Mistral-Small-Instruct-2409
12
  ---
13
 
14
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
  ---
2
  title: Mistral Small Instruct 2409 Demo
3
  emoji: 💬
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
  app_file: app.py
 
11
  short_description: A demo space for mistralai/Mistral-Small-Instruct-2409
12
  ---
13
 
14
+ Demo chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.25.0/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index) for mistralai/Mistral-Small-Instruct-2409.
app.py CHANGED
@@ -1,63 +1,68 @@
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
 
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
 
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
 
26
  messages.append({"role": "user", "content": message})
27
 
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
  demo = gr.ChatInterface(
46
- respond,
 
 
47
  additional_inputs=[
48
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
  ],
59
  )
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ """Demo for mistralai/Mistral-Small-Instruct-2409"""
2
+
3
+ from typing import List, Tuple, Union
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
 
7
+
8
+ # HF InferenceClient
9
+ client = InferenceClient("mistralai/Mistral-Small-Instruct-2409")
 
10
 
11
 
12
+ def chat(
13
+ message: str,
14
+ history: List[Tuple[str, str]],
15
+ system_message: str,
16
+ max_tokens: Union[int, None],
17
+ temperature: Union[float, None],
18
+ top_p: Union[float, None],
19
  ):
20
+ """Chat demo for mistralai/Mistral-Small-Instruct-2409"""
21
+
22
+ # Chat history
23
  messages = [{"role": "system", "content": system_message}]
24
 
25
  for val in history:
26
  if val[0]:
27
  messages.append({"role": "user", "content": val[0]})
28
+
29
  if val[1]:
30
  messages.append({"role": "assistant", "content": val[1]})
31
 
32
+ # Add user message
33
  messages.append({"role": "user", "content": message})
34
 
35
+ llm_message = client.chat_completion(
 
 
36
  messages,
37
  max_tokens=max_tokens,
 
38
  temperature=temperature,
39
  top_p=top_p,
40
+ )
41
+
42
+ # Add chatbot message
43
+ messages.append(
44
+ {
45
+ "role": "assistant",
46
+ "content": llm_message.choices[0].message.content,
47
+ }
48
+ )
49
+
50
+ yield llm_message.choices[0].message.content
51
 
 
 
52
 
53
+ # UI
 
 
54
  demo = gr.ChatInterface(
55
+ chat,
56
+ title="Mistral-Small-Instruct-2409",
57
+ description="A small version of Mistral AI, designed for instruction following tasks.",
58
  additional_inputs=[
59
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
60
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
61
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
62
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
 
 
 
 
 
 
63
  ],
64
  )
65
 
66
 
67
  if __name__ == "__main__":
68
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1 @@
1
- huggingface_hub==0.22.2
 
1
+ huggingface_hub==0.25.0