ReyDev commited on
Commit
35cce96
β€’
0 Parent(s):

πŸŽ‰ feat: add Docker support, AI interaction, and chat interface

Browse files

🐳 chore: add Dockerfile and .dockerignore for Docker support

πŸ€– feat(ai.py): add AI interaction with anthropic API

πŸ’¬ feat(app.py): add chat interface with AI using gradio

πŸ”§ chore(makefile): add makefile for easy start command

πŸ”’ chore(.gitignore): add .gitignore to ignore unnecessary files

πŸ“š chore(const.py): add constants for models, token lengths, and prompts

✨ feat: add pyproject.toml and settings.py for project configuration

πŸ“ docs(pyproject.toml): define project metadata and dependencies using poetry

πŸ”§ chore(settings.py): add settings file to manage environment variables

Files changed (10) hide show
  1. .dockerignore +19 -0
  2. .gitignore +19 -0
  3. Dockerfile +47 -0
  4. ai.py +38 -0
  5. app.py +79 -0
  6. const.py +28 -0
  7. makefile +2 -0
  8. poetry.lock +0 -0
  9. pyproject.toml +20 -0
  10. settings.py +9 -0
.dockerignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+
7
+ # Directories
8
+ __pycache__
9
+
10
+ # Files
11
+ .DS_Store
12
+
13
+ # secret
14
+ .env
15
+
16
+ flagged
17
+ gradio_cached_examples
18
+
19
+ .venv
.gitignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+
7
+ # Directories
8
+ __pycache__
9
+
10
+ # Files
11
+ .DS_Store
12
+
13
+ # secret
14
+ .env
15
+
16
+ flagged
17
+ gradio_cached_examples
18
+
19
+ .venv
Dockerfile ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ # python
4
+ ENV PYTHONUNBUFFERED=1 \
5
+ # prevents python creating .pyc files
6
+ PYTHONDONTWRITEBYTECODE=1 \
7
+ \
8
+ # pip
9
+ PIP_NO_CACHE_DIR=off \
10
+ PIP_DISABLE_PIP_VERSION_CHECK=on \
11
+ PIP_DEFAULT_TIMEOUT=100 \
12
+ \
13
+ # poetry
14
+ # https://python-poetry.org/docs/configuration/#using-environment-variables
15
+ POETRY_VERSION=1.3.2 \
16
+ # make poetry install to this location
17
+ POETRY_HOME="/opt/poetry" \
18
+ # make poetry create the virtual environment in the project's root
19
+ # it gets named `.venv`
20
+ POETRY_VIRTUALENVS_IN_PROJECT=false \
21
+ # do not ask any interactive question
22
+ POETRY_NO_INTERACTION=1 \
23
+ POETRY_VIRTUALENVS_CREATE=false
24
+
25
+ ENV VENV_PATH="/venv"
26
+ ENV PATH="$VENV_PATH/bin:$PATH"
27
+
28
+ WORKDIR /app
29
+
30
+ RUN apt-get update
31
+ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
32
+ ENV PATH="/root/.cargo/bin:${PATH}"
33
+ RUN rustc --version
34
+
35
+ RUN pip install wheel
36
+ RUN apt-get update && apt-get upgrade -y && apt-get install netcat-traditional -y && apt-get install wkhtmltopdf -y
37
+ RUN pip install --upgrade poetry
38
+ RUN python -m venv /venv
39
+ RUN /venv/bin/pip install --upgrade pip wheel setuptools setuptools_rust
40
+ COPY . .
41
+
42
+ RUN poetry build && \
43
+ /venv/bin/pip install --upgrade pip wheel setuptools && \
44
+ /venv/bin/pip install dist/*.whl
45
+
46
+ COPY . .
47
+ CMD ["python", "app.py"]
ai.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import anthropic
2
+ import os
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ from settings import settings
8
+
9
+ syncClient = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY,
10
+ timeout=5)
11
+ asyncClient = anthropic.AsyncAnthropic(api_key=settings.ANTHROPIC_API_KEY,
12
+ timeout=60)
13
+ class AnthropicCustom():
14
+ def __init__(self, api_key, model, max_tokens=1000, prompt=""):
15
+ self.api_key = api_key
16
+ self.model = model
17
+ self.max_tokens = max_tokens
18
+ self.prompt = prompt
19
+ if os.environ.get('ANTHROPIC_API_KEY') is not None:
20
+ api_key = os.environ.get('ANTHROPIC_API_KEY')
21
+ else:
22
+ os.environ['ANTHROPIC_API_KEY'] = api_key
23
+ def get_anthropic_response(self):
24
+ response = syncClient.completions.create(
25
+ prompt=self.prompt,
26
+ model=self.model,
27
+ max_tokens_to_sample=self.max_tokens,
28
+ )
29
+ return response.completion
30
+ async def get_anthropic_response_async(self):
31
+ async for line in await asyncClient.completions.create(
32
+ prompt=self.prompt,
33
+ model=self.model,
34
+ max_tokens_to_sample=self.max_tokens,
35
+ stop_sequences=[anthropic.HUMAN_PROMPT,],
36
+ stream=True,
37
+ ):
38
+ yield line.completion
app.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import anthropic
2
+ import gradio as gr
3
+ from gradio.components import Dropdown, Checkbox,Textbox,IOComponent
4
+ import re
5
+ from ai import AnthropicCustom
6
+ from const import ClaudeModels,ModelTokenLength,Prompts
7
+
8
+
9
+ # Define a global variable for the conversation history
10
+ conversation_history = ""
11
+
12
+ async def interact_with_ai(user_question,token, model, token_length, prompt, prompt_input, memory):
13
+ global conversation_history
14
+
15
+ if memory:
16
+ prompt = Prompts[prompt].value.format(memory=conversation_history, question=user_question)
17
+ else:
18
+ prompt = Prompts[prompt].value.format(memory="", question=user_question)
19
+
20
+ if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1):
21
+ prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt)
22
+
23
+ # Create an instance of the custom class
24
+ anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt)
25
+
26
+ # Create a generator to stream the response
27
+ response_accumulated = ""
28
+ async for response in anth.get_anthropic_response_async():
29
+ response_accumulated += response
30
+ conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
31
+ yield response_accumulated
32
+
33
+
34
+ async def chat_with_ai(message, history, token,model, token_length, prompt, prompt_input, memory,):
35
+ global conversation_history
36
+ if memory:
37
+ for conversation in history:
38
+ user_question, response_accumulated = conversation
39
+ conversation_history = f"{conversation_history} {anthropic.HUMAN_PROMPT} {user_question} {anthropic.AI_PROMPT} {response_accumulated}"
40
+ prompt = Prompts[prompt].value.format(memory=history, question=message)
41
+ else:
42
+ prompt = Prompts[prompt].value.format(memory="", question=message)
43
+
44
+ if prompt_input != re.search(r'Human: (.*?) \n\nConversations:', prompt).group(1):
45
+ prompt = re.sub(r'Human: (.*?) \n\nConversations:', f'Human: {prompt_input} \n\nConversations:', prompt)
46
+
47
+ # Create an instance of the custom class
48
+ anth = AnthropicCustom(api_key=token, model=model, max_tokens=token_length, prompt= prompt)
49
+
50
+ # Create a generator to stream the response
51
+ response_accumulated = ""
52
+ async for response in anth.get_anthropic_response_async():
53
+ response_accumulated += response
54
+ yield response_accumulated
55
+
56
+ promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0])
57
+ prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), )
58
+
59
+
60
+ iface = gr.Interface(fn=interact_with_ai,
61
+ flagging_options=["Inappropriate", "Disrespectful", "Spam"],
62
+ allow_flagging='auto',
63
+ title="Claude Space",
64
+ inputs=[Textbox(label="Question", placeholder="Enter a question here"),Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=False)],
65
+ outputs="markdown",
66
+ cache_examples=True,
67
+ )
68
+
69
+ promptDropdown:IOComponent = Dropdown(choices=list(Prompts.__members__.keys()),label="Prompt",value=list(Prompts.__members__.keys())[0])
70
+ prompt_input :IOComponent = Textbox(label="Custom Prompt", placeholder="Enter a custom prompt here", lines=3, value=re.search(r'Human: (.*?) \n\nConversations:', Prompts[promptDropdown.value].value).group(1), )
71
+
72
+ cface = gr.ChatInterface(fn=chat_with_ai,additional_inputs=[Textbox(label="Token", placeholder="Enter a token here",type='password'),Dropdown(choices=[model.value for model in ClaudeModels],label="Model",value=[model.value for model in ClaudeModels][0]),Dropdown(choices=[token.value for token in ModelTokenLength],label="Token Length",value= [token.value for token in ModelTokenLength][0]),promptDropdown,prompt_input,Checkbox(label="Memory", value=True)])
73
+
74
+
75
+
76
+
77
+ if __name__ == "__main__":
78
+ gd = gr.TabbedInterface([iface, cface], tab_names=["Claude Space", "Claude Chat"],title="Claude Space")
79
+ gd.queue(concurrency_count=75, max_size=100).launch(debug=True, share=False,server_name='0.0.0.0', server_port=7864)
const.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ import anthropic
3
+
4
+
5
+ class ClaudeModels(str, enum.Enum):
6
+ Inatant1_1: str = "claude-instant-1"
7
+ Instant1_2: str = "claude-instant-1.2"
8
+ Instant1_3: str = "claude-instant-1.3"
9
+ Claude2: str = "claude-2"
10
+
11
+
12
+ class ModelTokenLength(str, enum.Enum):
13
+ ten : int = 10
14
+ twenty_five: int = 25
15
+ fifty: int = 50
16
+ hundred: int = 100
17
+ five_hundred: int = 500
18
+ one_k: int = 1000
19
+ five_k: int = 5000
20
+ ten_k: int = 10000
21
+ twenty_k: int = 20000
22
+ fifty_k: int = 50000
23
+ hundred_k: int = 100000
24
+
25
+
26
+ class Prompts(str, enum.Enum):
27
+ general: str = "\n\nHuman: You're a AI bot who loves to gossip. Listin user's query and answer in markdown using Conversations. \n\nConversations: {memory} \n\nQuery: {question} \n\nAssistant:"
28
+
makefile ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ start:
2
+ python app.py
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "claude-space"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shreyam Maity <[email protected]>"]
6
+ license = "MIT"
7
+ readme = "README.md"
8
+ packages = [{include = "claude_space"}]
9
+
10
+ [tool.poetry.dependencies]
11
+ python = "^3.10"
12
+ requests = "^2.31.0"
13
+ gradio = "^3.41.2"
14
+ anthropic = "^0.3.10"
15
+ python-dotenv = "^1.0.0"
16
+
17
+
18
+ [build-system]
19
+ requires = ["poetry-core"]
20
+ build-backend = "poetry.core.masonry.api"
settings.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class Settings:
5
+
6
+ ANTHROPIC_API_KEY:str=os.environ.get('ANTHROPIC_API_KEY')
7
+
8
+
9
+ settings = Settings()