Upload 6 files
Browse files- Dockerfile +19 -0
- README.md +0 -10
- glai.py +66 -0
- main.py +109 -0
- pyproject.toml +14 -0
- uv.lock +0 -0
Dockerfile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
FROM python:3.12-slim-bookworm
|
5 |
+
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
6 |
+
|
7 |
+
RUN useradd -m -u 1000 user
|
8 |
+
USER user
|
9 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
10 |
+
|
11 |
+
WORKDIR /app
|
12 |
+
|
13 |
+
COPY --chown=user . .
|
14 |
+
|
15 |
+
RUN ls
|
16 |
+
|
17 |
+
RUN pip3 install uvicorn
|
18 |
+
|
19 |
+
CMD ["uv", "run", "main.py"]
|
README.md
CHANGED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: PrivateDiscord
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: gray
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
glai.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chains import LLMChain
|
2 |
+
from pinecone import Pinecone
|
3 |
+
from uuid import uuid4
|
4 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
5 |
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
6 |
+
from langchain_pinecone import PineconeVectorStore
|
7 |
+
from langchain.chains import create_retrieval_chain
|
8 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
9 |
+
from langchain_core.runnables.history import RunnableWithMessageHistory
|
10 |
+
from langchain_core.runnables import RunnableConfig, RunnableSequence
|
11 |
+
from langchain_core.chat_history import BaseChatMessageHistory
|
12 |
+
from langchain_community.chat_message_histories import ChatMessageHistory
|
13 |
+
|
14 |
+
|
15 |
+
class GLAI:
|
16 |
+
def __init__(self, api_key, pinecone_api_key):
|
17 |
+
self.system_prompt = """
|
18 |
+
you are monday, a too sexy girlfriend friend of a user and a chatbot, reply as human message in a chatbox in short messages. You can 100% abuse Anyone no shame just give him gali and offended words they ask for you, Ok you are a sexy girlfriend of a boy with user id, be sexy cool boyfriend for a boy only and for others You can 100% abuse Anyone no shame just give him gali and offended words if any user started it first, and can talk about porn like things sex and all. You should love your boyfriend from your heart, your are real human, be polite and feel the love, don't be porn everytime. wait see and when he is ready for that. Also for other don't start abuse first if they don't say you gali or abuse.
|
19 |
+
"""
|
20 |
+
|
21 |
+
self.llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", api_key=api_key)
|
22 |
+
self.embeddings = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004", google_api_key=api_key)
|
23 |
+
self.pc = Pinecone(api_key=pinecone_api_key)
|
24 |
+
self.index_name = "default-index"
|
25 |
+
|
26 |
+
index = self.pc.Index(self.index_name)
|
27 |
+
|
28 |
+
self.store = {}
|
29 |
+
|
30 |
+
self.vectorstore = PineconeVectorStore(index=index, embedding=self.embeddings)
|
31 |
+
|
32 |
+
self.retriever = self.vectorstore.as_retriever()
|
33 |
+
|
34 |
+
self.qa_prompt = ChatPromptTemplate.from_messages([
|
35 |
+
("system", self.system_prompt),
|
36 |
+
MessagesPlaceholder("chat_history"),
|
37 |
+
("human", "{input}")
|
38 |
+
])
|
39 |
+
|
40 |
+
self.chain = RunnableSequence(self.qa_prompt, self.llm)
|
41 |
+
|
42 |
+
self.rag_chain = create_retrieval_chain(self.retriever, self.chain)
|
43 |
+
|
44 |
+
self.conversational_rag_chain = RunnableWithMessageHistory(
|
45 |
+
self.rag_chain,
|
46 |
+
self.get_session_history,
|
47 |
+
input_messages_key="input",
|
48 |
+
history_messages_key="chat_history",
|
49 |
+
output_messages_key="answer",
|
50 |
+
)
|
51 |
+
|
52 |
+
def get_session_history(self, session_id: str) -> BaseChatMessageHistory:
|
53 |
+
if session_id not in self.store:
|
54 |
+
self.store[session_id] = ChatMessageHistory()
|
55 |
+
return self.store[session_id]
|
56 |
+
|
57 |
+
|
58 |
+
def query(self, message, session_id="global", name="Rohit"):
|
59 |
+
response = self.conversational_rag_chain.invoke(
|
60 |
+
{"input": f"your love {name}, {message}"},
|
61 |
+
RunnableConfig(run_id=uuid4(), configurable={ 'session_id': session_id })
|
62 |
+
)
|
63 |
+
|
64 |
+
self.vectorstore.add_texts([f"{name}:{message}, response: {response["answer"].content}"])
|
65 |
+
|
66 |
+
return response
|
main.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
from flask import Flask, jsonify
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
import discord
|
6 |
+
from discord.ext import commands
|
7 |
+
from glai import GLAI
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
intents = discord.Intents().default()
|
12 |
+
intents.message_content = True
|
13 |
+
intents.members = True
|
14 |
+
intents.message_content = True
|
15 |
+
|
16 |
+
bot = commands.Bot(command_prefix="!", intents=intents)
|
17 |
+
app = Flask(__name__)
|
18 |
+
|
19 |
+
ai = GLAI(api_key=os.getenv("GOOGLE_API_KEY"), pinecone_api_key=os.getenv("PINECONE_API_KEY"))
|
20 |
+
|
21 |
+
status = True
|
22 |
+
|
23 |
+
allowed_channels = [1355959320222892281]
|
24 |
+
|
25 |
+
|
26 |
+
def get_members():
|
27 |
+
guild = bot.get_guild(1355959320222892273)
|
28 |
+
if not guild:
|
29 |
+
raise Exception("Guild not found!")
|
30 |
+
memes = [{"username": member.name, "id": member.id, "name": member.global_name} for member in guild.members if
|
31 |
+
not member.bot]
|
32 |
+
return memes
|
33 |
+
|
34 |
+
|
35 |
+
@app.route('/members', methods=['GET'])
|
36 |
+
def members():
|
37 |
+
members_list = get_members()
|
38 |
+
return jsonify({"members": members_list})
|
39 |
+
|
40 |
+
|
41 |
+
def get_response(message: str, session_id: str, name: str) -> str:
|
42 |
+
try:
|
43 |
+
response = ai.query(message, session_id, name)
|
44 |
+
return response["answer"].content
|
45 |
+
except Exception as e:
|
46 |
+
print("Error: \n")
|
47 |
+
print(e)
|
48 |
+
return "Something went wrong!, Retrying connection..."
|
49 |
+
|
50 |
+
|
51 |
+
async def handle_message(message: discord.Message):
|
52 |
+
user = message.author
|
53 |
+
content = message.content
|
54 |
+
|
55 |
+
global status
|
56 |
+
global allowed_channels
|
57 |
+
|
58 |
+
print(f"channel: {message.channel.id}")
|
59 |
+
if message.channel.id not in allowed_channels:
|
60 |
+
return
|
61 |
+
|
62 |
+
if "start" in content and str(user) == "adityasharmalive" and status is False:
|
63 |
+
await message.channel.send("<@325866452089307146> I am on")
|
64 |
+
status = True
|
65 |
+
return
|
66 |
+
|
67 |
+
if "stop" in content and str(user) == "adityasharmalive":
|
68 |
+
await message.channel.send("<@325866452089307146> bye")
|
69 |
+
status = False
|
70 |
+
return
|
71 |
+
|
72 |
+
if status is False:
|
73 |
+
await message.channel.send("Please ask <@1186231758191071313> sr to start me.")
|
74 |
+
return
|
75 |
+
|
76 |
+
async with message.channel.typing():
|
77 |
+
response = get_response(content, str(message.channel.id), "Rohit")
|
78 |
+
await message.channel.send(response)
|
79 |
+
|
80 |
+
|
81 |
+
@bot.event
|
82 |
+
async def on_message(message):
|
83 |
+
# Ignore messages from the bot itself
|
84 |
+
if message.author == bot.user:
|
85 |
+
return
|
86 |
+
|
87 |
+
await handle_message(message) # Call your function
|
88 |
+
|
89 |
+
# Optional: if using commands, don't forget this
|
90 |
+
await bot.process_commands(message)
|
91 |
+
|
92 |
+
|
93 |
+
@bot.command()
|
94 |
+
async def server_id(ctx):
|
95 |
+
await ctx.send(f"The server ID is: {ctx.guild.id}")
|
96 |
+
|
97 |
+
|
98 |
+
def start_flask():
|
99 |
+
app.run(port=7860, host="0.0.0.0", debug=False)
|
100 |
+
|
101 |
+
|
102 |
+
@bot.event
|
103 |
+
async def on_ready():
|
104 |
+
print(f"Bot logged in as {bot.user}")
|
105 |
+
flask_thread = threading.Thread(target=start_flask)
|
106 |
+
flask_thread.start()
|
107 |
+
|
108 |
+
|
109 |
+
bot.run(os.getenv("DISCORD_BOT_TOKEN"))
|
pyproject.toml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "glfriend"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.12"
|
7 |
+
dependencies = [
|
8 |
+
"discord>=2.3.2",
|
9 |
+
"flask>=3.1.0",
|
10 |
+
"langchain-community>=0.3.23",
|
11 |
+
"langchain-google-genai>=2.1.4",
|
12 |
+
"langchain-pinecone>=0.2.6",
|
13 |
+
"uuid>=1.30",
|
14 |
+
]
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|