TeleologyHI commited on
Commit
b19dc43
·
2 Parent(s): d3c2b0b d73def8

Update HIM implementation

Browse files
Files changed (4) hide show
  1. .gitattributes +38 -0
  2. README.md +14 -0
  3. app.py +67 -1
  4. requirements.txt +5 -1
.gitattributes CHANGED
@@ -1,3 +1,41 @@
 
1
  *.bin filter=lfs diff=lfs merge=lfs -text
2
  *.pt filter=lfs diff=lfs merge=lfs -text
3
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <<<<<<< HEAD
2
  *.bin filter=lfs diff=lfs merge=lfs -text
3
  *.pt filter=lfs diff=lfs merge=lfs -text
4
  *.pth filter=lfs diff=lfs merge=lfs -text
5
+ =======
6
+ *.7z filter=lfs diff=lfs merge=lfs -text
7
+ *.arrow filter=lfs diff=lfs merge=lfs -text
8
+ *.bin filter=lfs diff=lfs merge=lfs -text
9
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
10
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
11
+ *.ftz filter=lfs diff=lfs merge=lfs -text
12
+ *.gz filter=lfs diff=lfs merge=lfs -text
13
+ *.h5 filter=lfs diff=lfs merge=lfs -text
14
+ *.joblib filter=lfs diff=lfs merge=lfs -text
15
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
16
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
17
+ *.model filter=lfs diff=lfs merge=lfs -text
18
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
19
+ *.npy filter=lfs diff=lfs merge=lfs -text
20
+ *.npz filter=lfs diff=lfs merge=lfs -text
21
+ *.onnx filter=lfs diff=lfs merge=lfs -text
22
+ *.ot filter=lfs diff=lfs merge=lfs -text
23
+ *.parquet filter=lfs diff=lfs merge=lfs -text
24
+ *.pb filter=lfs diff=lfs merge=lfs -text
25
+ *.pickle filter=lfs diff=lfs merge=lfs -text
26
+ *.pkl filter=lfs diff=lfs merge=lfs -text
27
+ *.pt filter=lfs diff=lfs merge=lfs -text
28
+ *.pth filter=lfs diff=lfs merge=lfs -text
29
+ *.rar filter=lfs diff=lfs merge=lfs -text
30
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
31
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
33
+ *.tar filter=lfs diff=lfs merge=lfs -text
34
+ *.tflite filter=lfs diff=lfs merge=lfs -text
35
+ *.tgz filter=lfs diff=lfs merge=lfs -text
36
+ *.wasm filter=lfs diff=lfs merge=lfs -text
37
+ *.xz filter=lfs diff=lfs merge=lfs -text
38
+ *.zip filter=lfs diff=lfs merge=lfs -text
39
+ *.zst filter=lfs diff=lfs merge=lfs -text
40
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
41
+ >>>>>>> origin/main
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: HIM Self
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.0.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: The Hybrid Intelligence Model (HIM) is a consciousness-orien
12
+ ---
13
+
14
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  from src.model.him_model import HIMModel
3
  from src.core.config import HIMConfig
4
 
@@ -33,4 +34,69 @@ interface = gr.Interface(
33
  ],
34
  title="Hybrid Intelligence Matrix (HIM)",
35
  description="Interact with the HIM system for advanced cognitive processing"
36
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ <<<<<<< HEAD
3
  from src.model.him_model import HIMModel
4
  from src.core.config import HIMConfig
5
 
 
34
  ],
35
  title="Hybrid Intelligence Matrix (HIM)",
36
  description="Interact with the HIM system for advanced cognitive processing"
37
+ )
38
+ =======
39
+ from huggingface_hub import InferenceClient
40
+
41
+ """
42
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
43
+ """
44
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
45
+
46
+
47
+ def respond(
48
+ message,
49
+ history: list[tuple[str, str]],
50
+ system_message,
51
+ max_tokens,
52
+ temperature,
53
+ top_p,
54
+ ):
55
+ messages = [{"role": "system", "content": system_message}]
56
+
57
+ for val in history:
58
+ if val[0]:
59
+ messages.append({"role": "user", "content": val[0]})
60
+ if val[1]:
61
+ messages.append({"role": "assistant", "content": val[1]})
62
+
63
+ messages.append({"role": "user", "content": message})
64
+
65
+ response = ""
66
+
67
+ for message in client.chat_completion(
68
+ messages,
69
+ max_tokens=max_tokens,
70
+ stream=True,
71
+ temperature=temperature,
72
+ top_p=top_p,
73
+ ):
74
+ token = message.choices[0].delta.content
75
+
76
+ response += token
77
+ yield response
78
+
79
+
80
+ """
81
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
82
+ """
83
+ demo = gr.ChatInterface(
84
+ respond,
85
+ additional_inputs=[
86
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
87
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
88
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
89
+ gr.Slider(
90
+ minimum=0.1,
91
+ maximum=1.0,
92
+ value=0.95,
93
+ step=0.05,
94
+ label="Top-p (nucleus sampling)",
95
+ ),
96
+ ],
97
+ )
98
+
99
+
100
+ if __name__ == "__main__":
101
+ demo.launch()
102
+ >>>>>>> origin/main
requirements.txt CHANGED
@@ -1,3 +1,4 @@
 
1
  torch>=2.0.0
2
  transformers>=4.30.0
3
  gradio>=3.35.2
@@ -5,4 +6,7 @@ numpy>=1.24.0
5
  networkx>=3.1
6
  scipy>=1.10.0
7
  pandas>=2.0.0
8
- plotly>=5.15.0
 
 
 
 
1
+ <<<<<<< HEAD
2
  torch>=2.0.0
3
  transformers>=4.30.0
4
  gradio>=3.35.2
 
6
  networkx>=3.1
7
  scipy>=1.10.0
8
  pandas>=2.0.0
9
+ plotly>=5.15.0
10
+ =======
11
+ huggingface_hub==0.25.2
12
+ >>>>>>> origin/main