Deepakraj2006 commited on
Commit
1d72475
·
verified ·
1 Parent(s): dd9aa05

Update CODE_LLAMA.py

Browse files
Files changed (1) hide show
  1. CODE_LLAMA.py +64 -64
CODE_LLAMA.py CHANGED
@@ -1,64 +1,64 @@
1
- # Import necessary packages
2
- from ibm_watsonx_ai import Credentials
3
- from ibm_watsonx_ai import APIClient
4
- from ibm_watsonx_ai.foundation_models import Model, ModelInference
5
- from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
6
- from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
7
- import gradio as gr
8
-
9
- watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523"
10
- project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0"
11
-
12
- # Set credentials to use the model
13
- credentials = {
14
- "url" : "https://au-syd.ml.cloud.ibm.com",
15
- "apikey": watsonx_API
16
- }
17
-
18
- # Model and project settings
19
- model_id = "codellama/CodeLlama-7b-Instruct-hf" # Directly specifying the LLAMA3 model
20
- project_id = project_id # Specifying project_id as provided
21
-
22
- params = TextChatParameters(
23
- temperature=0.1,
24
- max_tokens=1024
25
- )
26
-
27
- # Initialize the model
28
- model = ModelInference(
29
- model_id=model_id,
30
- credentials=credentials,
31
- project_id=project_id,
32
- params=params
33
- )
34
-
35
- # Function to generate a response from the model
36
- def generate_response(prompt_txt):
37
- messages = [
38
- {
39
- "role": "user",
40
- "content": [
41
- {
42
- "type": "text",
43
- "text": prompt_txt
44
- },
45
- ]
46
- }
47
- ]
48
- generated_response = model.chat(messages=messages)
49
- generated_text = generated_response['choices'][0]['message']['content']
50
-
51
- return generated_text
52
-
53
- # Create Gradio interface
54
- chat_application = gr.Interface(
55
- fn=generate_response,
56
- flagging_mode="never",
57
- inputs=gr.Textbox(label="Input", lines=2, placeholder="Type your question here..."),
58
- outputs=gr.Textbox(label="Output"),
59
- title="Watsonx.ai Chatbot",
60
- description="Ask any question and the chatbot will try to answer."
61
- )
62
-
63
- # Launch the app
64
- chat_application.launch(share=True)
 
1
+ # Import necessary packages
2
+ from ibm_watsonx_ai import Credentials
3
+ from ibm_watsonx_ai import APIClient
4
+ from ibm_watsonx_ai.foundation_models import Model, ModelInference
5
+ from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
6
+ from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
7
+ import gradio as gr
8
+
9
+ watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523"
10
+ project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0"
11
+
12
+ # Set credentials to use the model
13
+ credentials = {
14
+ "url" : "https://au-syd.ml.cloud.ibm.com",
15
+ "apikey": watsonx_API
16
+ }
17
+
18
+ # Model and project settings
19
+ model_id = "codellama/CodeLlama-7b-Instruct-hf" # Directly specifying the LLAMA3 model
20
+ project_id = project_id # Specifying project_id as provided
21
+
22
+ params = TextChatParameters(
23
+ temperature=0.1,
24
+ max_tokens=1024
25
+ )
26
+
27
+ # Initialize the model
28
+ model = ModelInference(
29
+ model_id=model_id,
30
+ credentials=credentials,
31
+ project_id=project_id,
32
+ params=params
33
+ )
34
+
35
+ # Function to generate a response from the model
36
+ def generate_response(prompt_txt):
37
+ messages = [
38
+ {
39
+ "role": "user",
40
+ "content": [
41
+ {
42
+ "type": "text",
43
+ "text": prompt_txt
44
+ },
45
+ ]
46
+ }
47
+ ]
48
+ generated_response = model.chat(messages=messages)
49
+ generated_text = generated_response['choices'][0]['message']['content']
50
+
51
+ return generated_text
52
+
53
+ # Create Gradio interface
54
+ chat_application = gr.Interface(
55
+ fn=generate_response,
56
+ flagging_mode="never",
57
+ inputs=gr.Textbox(label="Input", lines=2, placeholder="Type your question here..."),
58
+ outputs=gr.Textbox(label="Output"),
59
+ title="CodeLLama Chatbot",
60
+ description="Ask coding related questions and the chatbot will try to answer."
61
+ )
62
+
63
+ # Launch the app
64
+ chat_application.launch(share=True)