ReyDev commited on
Commit
74dace5
Β·
unverified Β·
1 Parent(s): 32ff0c9

πŸ”§ refactor(ai.py, app.py): optimize imports and improve code readability

Browse files

πŸ”§ refactor(ai.py): move instantiation of Anthropic clients into methods to ensure correct API key usage
πŸ”§ refactor(app.py): add API key validation and improve error messages for better user experience
πŸ”§ refactor(settings.py): load environment variables at the start of the file for better code organization
πŸ”§ refactor(Dockerfile): add creation of 'flagged' and 'gradio_cached_examples' directories to Dockerfile
πŸ”§ refactor(.gitignore): remove comment to actually ignore 'flagged' and 'gradio_cached_examples' directories

.gitignore CHANGED
@@ -13,7 +13,7 @@ __pycache__
13
  # secret
14
  .env
15
 
16
- # flagged
17
- # gradio_cached_examples
18
 
19
  .venv
 
13
  # secret
14
  .env
15
 
16
+ flagged
17
+ gradio_cached_examples
18
 
19
  .venv
Dockerfile CHANGED
@@ -38,6 +38,8 @@ RUN pip install --upgrade poetry
38
  RUN python -m venv /venv
39
  RUN /venv/bin/pip install --upgrade pip wheel setuptools setuptools_rust
40
  COPY . .
 
 
41
 
42
  RUN poetry install
43
 
 
38
  RUN python -m venv /venv
39
  RUN /venv/bin/pip install --upgrade pip wheel setuptools setuptools_rust
40
  COPY . .
41
+ RUN mkdir flagged
42
+ RUN mkdir gradio_cached_examples
43
 
44
  RUN poetry install
45
 
claude_space/ai.py CHANGED
@@ -1,29 +1,18 @@
1
- import os
2
-
3
- import anthropic
4
  from dotenv import load_dotenv
5
 
6
- from claude_space.settings import settings
7
-
8
  load_dotenv()
9
 
10
 
11
- syncClient = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY, timeout=5)
12
- asyncClient = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY, timeout=60)
13
-
14
-
15
  class AnthropicCustom:
16
  def __init__(self, api_key, model, max_tokens=1000, prompt=""):
17
  self.api_key = api_key
18
  self.model = model
19
  self.max_tokens = max_tokens
20
  self.prompt = prompt
21
- if os.environ.get("ANTHROPIC_API_KEY") is not None:
22
- api_key = os.environ.get("ANTHROPIC_API_KEY")
23
- else:
24
- os.environ["ANTHROPIC_API_KEY"] = api_key
25
 
26
  def get_anthropic_response(self):
 
27
  response = syncClient.completions.create(
28
  prompt=self.prompt,
29
  model=self.model,
@@ -32,12 +21,13 @@ class AnthropicCustom:
32
  return response.completion
33
 
34
  async def get_anthropic_response_async(self):
 
35
  async for line in await asyncClient.completions.create(
36
  prompt=self.prompt,
37
  model=self.model,
38
  max_tokens_to_sample=self.max_tokens,
39
  stop_sequences=[
40
- anthropic.HUMAN_PROMPT,
41
  ],
42
  stream=True,
43
  ):
 
1
+ from anthropic import HUMAN_PROMPT, Anthropic, AsyncAnthropic
 
 
2
  from dotenv import load_dotenv
3
 
 
 
4
  load_dotenv()
5
 
6
 
 
 
 
 
7
  class AnthropicCustom:
8
  def __init__(self, api_key, model, max_tokens=1000, prompt=""):
9
  self.api_key = api_key
10
  self.model = model
11
  self.max_tokens = max_tokens
12
  self.prompt = prompt
 
 
 
 
13
 
14
  def get_anthropic_response(self):
15
+ syncClient = Anthropic(api_key=self.api_key, timeout=5)
16
  response = syncClient.completions.create(
17
  prompt=self.prompt,
18
  model=self.model,
 
21
  return response.completion
22
 
23
  async def get_anthropic_response_async(self):
24
+ asyncClient = AsyncAnthropic(api_key=self.api_key, timeout=60)
25
  async for line in await asyncClient.completions.create(
26
  prompt=self.prompt,
27
  model=self.model,
28
  max_tokens_to_sample=self.max_tokens,
29
  stop_sequences=[
30
+ HUMAN_PROMPT,
31
  ],
32
  stream=True,
33
  ):
claude_space/app.py CHANGED
@@ -1,11 +1,12 @@
1
  import re
2
 
3
- import anthropic
4
  import gradio as gr
 
5
  from gradio.components import Checkbox, Dropdown, IOComponent, Textbox
6
 
7
  from claude_space.ai import AnthropicCustom
8
  from claude_space.const import ClaudeModels, ModelTokenLength, Prompts
 
9
 
10
  conversation_history = ""
11
 
@@ -14,6 +15,10 @@ async def interact_with_ai(
14
  user_question, token, model, token_length, prompt, prompt_input, memory
15
  ):
16
  global conversation_history
 
 
 
 
17
 
18
  if memory:
19
  prompt = Prompts[prompt].value.format(
@@ -36,7 +41,7 @@ async def interact_with_ai(
36
  response_accumulated = ""
37
  async for response in anth.get_anthropic_response_async():
38
  response_accumulated += response
39
- conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
40
  yield response_accumulated
41
 
42
 
@@ -51,10 +56,14 @@ async def chat_with_ai(
51
  memory,
52
  ):
53
  global conversation_history
 
 
 
 
54
  if memory:
55
  for conversation in history:
56
  user_question, response_accumulated = conversation
57
- conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
58
  prompt = Prompts[prompt].value.format(memory=history, question=message)
59
  else:
60
  prompt = Prompts[prompt].value.format(memory="", question=message)
@@ -98,7 +107,12 @@ iface = gr.Interface(
98
  title="Claude Space",
99
  inputs=[
100
  Textbox(label="Question", placeholder="Enter a question here"),
101
- Textbox(label="Token", placeholder="Enter a token here", type="password"),
 
 
 
 
 
102
  Dropdown(
103
  choices=[model.value for model in ClaudeModels],
104
  label="Model",
@@ -155,5 +169,10 @@ gd = gr.TabbedInterface(
155
  [iface, cface], tab_names=["Claude Space", "Claude Chat"], title="Claude Space"
156
  )
157
  gd.queue(concurrency_count=75, max_size=100).launch(
158
- debug=True, share=False, server_name="0.0.0.0", server_port=7864
 
 
 
 
 
159
  )
 
1
  import re
2
 
 
3
  import gradio as gr
4
+ from anthropic import AI_PROMPT, HUMAN_PROMPT
5
  from gradio.components import Checkbox, Dropdown, IOComponent, Textbox
6
 
7
  from claude_space.ai import AnthropicCustom
8
  from claude_space.const import ClaudeModels, ModelTokenLength, Prompts
9
+ from claude_space.settings import settings
10
 
11
  conversation_history = ""
12
 
 
15
  user_question, token, model, token_length, prompt, prompt_input, memory
16
  ):
17
  global conversation_history
18
+ if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
19
+ raise TypeError(
20
+ "You have not provided an API key. Please provide one in the textbox."
21
+ )
22
 
23
  if memory:
24
  prompt = Prompts[prompt].value.format(
 
41
  response_accumulated = ""
42
  async for response in anth.get_anthropic_response_async():
43
  response_accumulated += response
44
+ conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
45
  yield response_accumulated
46
 
47
 
 
56
  memory,
57
  ):
58
  global conversation_history
59
+ if token is None or token == "" and settings.ANTHROPIC_API_KEY is None:
60
+ raise TypeError(
61
+ "You have not provided an API key. Please provide one in the textbox."
62
+ )
63
  if memory:
64
  for conversation in history:
65
  user_question, response_accumulated = conversation
66
+ conversation_history = f"{conversation_history} {HUMAN_PROMPT} {user_question} {AI_PROMPT} {response_accumulated}"
67
  prompt = Prompts[prompt].value.format(memory=history, question=message)
68
  else:
69
  prompt = Prompts[prompt].value.format(memory="", question=message)
 
107
  title="Claude Space",
108
  inputs=[
109
  Textbox(label="Question", placeholder="Enter a question here"),
110
+ Textbox(
111
+ label="Token",
112
+ info="You'll get this token from Anthropic console and this is mandatory",
113
+ placeholder="Enter a token here",
114
+ type="password",
115
+ ),
116
  Dropdown(
117
  choices=[model.value for model in ClaudeModels],
118
  label="Model",
 
169
  [iface, cface], tab_names=["Claude Space", "Claude Chat"], title="Claude Space"
170
  )
171
  gd.queue(concurrency_count=75, max_size=100).launch(
172
+ debug=True,
173
+ share=False,
174
+ server_name="0.0.0.0",
175
+ server_port=7860,
176
+ show_error=True,
177
+ show_tips=True,
178
  )
claude_space/settings.py CHANGED
@@ -1,5 +1,9 @@
1
  import os
2
 
 
 
 
 
3
 
4
  class Settings:
5
 
 
1
  import os
2
 
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
 
8
  class Settings:
9