saq1b commited on
Commit
cde68dd
·
verified ·
1 Parent(s): b6c0814

Upload 7 files

Browse files
Files changed (7) hide show
  1. .dockerignore +4 -0
  2. Dockerfile +40 -0
  3. app.tsx +133 -0
  4. main.py +84 -0
  5. package.json +33 -0
  6. requirements.txt +4 -0
  7. start.sh +14 -0
.dockerignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ node_modules
2
+ .next
3
+ .git
4
+ .env
Dockerfile ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Node.js runtime as a parent image
2
+ FROM node:21
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Install Python and pip
8
+ RUN apt-get update && apt-get install -y \
9
+ python3 \
10
+ python3-pip \
11
+ && rm -rf /var/lib/apt/lists/* \
12
+ && pip3 install --no-cache-dir --upgrade pip # Upgrade pip to the latest version
13
+
14
+ # Copy package.json
15
+ COPY package.json ./
16
+
17
+ # Install frontend dependencies
18
+ RUN npm install
19
+
20
+ # Copy the rest of the application code
21
+ COPY . .
22
+
23
+ # Install backend dependencies
24
+ RUN pip3 install --no-cache-dir -r requirements.txt
25
+
26
+ # Build the Next.js app
27
+ RUN npm run build
28
+
29
+ # Make port 8000 available to the world outside this container
30
+ EXPOSE 8000
31
+
32
+ # Set execute permissions for the start script
33
+ RUN chmod +x start.sh
34
+
35
+ # Use a non-root user
36
+ RUN useradd -m appuser
37
+ USER appuser
38
+
39
+ # Run the script when the container launches
40
+ CMD ["./start.sh"]
app.tsx ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'use client'
2
+
3
+ import React, { useState, useEffect, useRef } from 'react'
4
+ import { Button } from "@/components/ui/button"
5
+ import { Input } from "@/components/ui/input"
6
+ import { Textarea } from "@/components/ui/textarea"
7
+ import { Card, CardContent } from "@/components/ui/card"
8
+ import { ChevronDown, ChevronUp, Send } from 'lucide-react'
9
+
10
+ export default function AIChat() {
11
+ const [messages, setMessages] = useState([])
12
+ const [input, setInput] = useState('')
13
+ const [apiKey, setApiKey] = useState('')
14
+ const [thinking, setThinking] = useState('')
15
+ const [isThinkingVisible, setIsThinkingVisible] = useState(false)
16
+ const chatEndRef = useRef(null)
17
+
18
+ useEffect(() => {
19
+ chatEndRef.current?.scrollIntoView({ behavior: 'smooth' })
20
+ }, [messages])
21
+
22
+ const handleSubmit = async (e) => {
23
+ e.preventDefault()
24
+ if (!input.trim()) return
25
+
26
+ const newMessage = { role: 'user', content: input }
27
+ setMessages([...messages, newMessage])
28
+ setInput('')
29
+
30
+ try {
31
+ // Call /think endpoint
32
+ const thinkResponse = await fetch('/think', {
33
+ method: 'POST',
34
+ headers: { 'Content-Type': 'application/json' },
35
+ body: JSON.stringify({ messages: [...messages, newMessage], api_key: apiKey }),
36
+ })
37
+
38
+ if (!thinkResponse.ok) throw new Error('Think request failed')
39
+
40
+ const reader = thinkResponse.body.getReader()
41
+ let thinkingContent = ''
42
+
43
+ while (true) {
44
+ const { done, value } = await reader.read()
45
+ if (done) break
46
+ const chunk = new TextDecoder().decode(value)
47
+ thinkingContent += chunk
48
+ setThinking(thinkingContent)
49
+ }
50
+
51
+ // Call /chat endpoint with the thinking result
52
+ const chatResponse = await fetch('/chat', {
53
+ method: 'POST',
54
+ headers: { 'Content-Type': 'application/json' },
55
+ body: JSON.stringify({ messages: [...messages, newMessage, { role: 'assistant', content: thinkingContent }], api_key: apiKey }),
56
+ })
57
+
58
+ if (!chatResponse.ok) throw new Error('Chat request failed')
59
+
60
+ const chatReader = chatResponse.body.getReader()
61
+ let chatContent = ''
62
+
63
+ while (true) {
64
+ const { done, value } = await chatReader.read()
65
+ if (done) break
66
+ const chunk = new TextDecoder().decode(value)
67
+ chatContent += chunk
68
+ setMessages(prev => [...prev.slice(0, -1), { role: 'assistant', content: chatContent }])
69
+ }
70
+ } catch (error) {
71
+ console.error('Error:', error)
72
+ setMessages(prev => [...prev, { role: 'assistant', content: 'An error occurred. Please try again.' }])
73
+ }
74
+ }
75
+
76
+ return (
77
+ <div className="min-h-screen bg-gray-900 text-white p-4 flex flex-col">
78
+ <Card className="flex-grow overflow-auto mb-4 bg-gray-800 bg-opacity-50 backdrop-filter backdrop-blur-lg">
79
+ <CardContent className="p-4">
80
+ {messages.map((message, index) => (
81
+ <div key={index} className={`mb-4 ${message.role === 'user' ? 'text-right' : 'text-left'}`}>
82
+ <div className={`inline-block p-2 rounded-lg ${message.role === 'user' ? 'bg-blue-600' : 'bg-gray-700'}`}>
83
+ {message.content}
84
+ </div>
85
+ {message.role === 'assistant' && thinking && (
86
+ <div className="mt-2">
87
+ <Button
88
+ variant="ghost"
89
+ size="sm"
90
+ onClick={() => setIsThinkingVisible(!isThinkingVisible)}
91
+ className="text-xs text-gray-400"
92
+ >
93
+ {isThinkingVisible ? <ChevronUp className="h-4 w-4" /> : <ChevronDown className="h-4 w-4" />}
94
+ Thinking Process
95
+ </Button>
96
+ {isThinkingVisible && (
97
+ <div className="mt-2 text-xs text-gray-400 bg-gray-800 p-2 rounded">
98
+ {thinking.split('\n').map((line, i) => {
99
+ if (line.startsWith('<thinking>') || line.startsWith('<step>')) {
100
+ return <p key={i}>{line.replace(/<\/?thinking>|<\/?step>/g, '')}</p>
101
+ }
102
+ return null
103
+ })}
104
+ </div>
105
+ )}
106
+ </div>
107
+ )}
108
+ </div>
109
+ ))}
110
+ <div ref={chatEndRef} />
111
+ </CardContent>
112
+ </Card>
113
+ <form onSubmit={handleSubmit} className="flex gap-2">
114
+ <Input
115
+ type="text"
116
+ value={apiKey}
117
+ onChange={(e) => setApiKey(e.target.value)}
118
+ placeholder="Enter API Key (optional)"
119
+ className="flex-grow bg-gray-800 text-white"
120
+ />
121
+ <Textarea
122
+ value={input}
123
+ onChange={(e) => setInput(e.target.value)}
124
+ placeholder="Type your message..."
125
+ className="flex-grow bg-gray-800 text-white"
126
+ />
127
+ <Button type="submit" className="bg-blue-600 hover:bg-blue-700">
128
+ <Send className="h-4 w-4" />
129
+ </Button>
130
+ </form>
131
+ </div>
132
+ )
133
+ }
main.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI, HTTPException
3
+ from fastapi.responses import StreamingResponse
4
+ from pydantic import BaseModel
5
+ from typing import List, Dict
6
+ import google.generativeai as genai
7
+ from google.generativeai.types import HarmBlockThreshold, HarmCategory
8
+ from dotenv import load_dotenv
9
+
10
+ load_dotenv()
11
+
12
+ app = FastAPI()
13
+
14
+ class ChatMessage(BaseModel):
15
+ role: str
16
+ content: str
17
+
18
+ class ChatRequest(BaseModel):
19
+ messages: List[ChatMessage]
20
+ api_key: str = None
21
+
22
+ chat_history = []
23
+
24
+ async def stream_response(response):
25
+ for chunk in response:
26
+ yield chunk.text
27
+
28
+ @app.post("/think")
29
+ async def think(request: ChatRequest):
30
+ api_key = request.api_key or os.getenv("GOOGLE_API_KEY")
31
+ if not api_key:
32
+ raise HTTPException(status_code=400, detail="API key is required")
33
+
34
+ genai.configure(api_key=api_key)
35
+ model = genai.GenerativeModel(
36
+ model_name="gemini-1.5-flash",
37
+ generation_config={
38
+ "temperature": 1,
39
+ "max_output_tokens": 8192,
40
+ "response_mime_type": "application/json",
41
+ },
42
+ safety_settings={
43
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
44
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
45
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
46
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE
47
+ },
48
+ system_instruction="You are a helpful AI assistant. Analyze the user's request and provide your thoughts in XML tags. Use <thinking> for overall analysis, <step> for individual steps, and <count> for remaining steps."
49
+ )
50
+
51
+ messages = [{"role": m.role, "parts": [m.content]} for m in request.messages]
52
+ response = await model.generate_content_async(messages, stream=True)
53
+ return StreamingResponse(stream_response(response), media_type="text/plain")
54
+
55
+ @app.post("/chat")
56
+ async def chat(request: ChatRequest):
57
+ api_key = request.api_key or os.getenv("GOOGLE_API_KEY")
58
+ if not api_key:
59
+ raise HTTPException(status_code=400, detail="API key is required")
60
+
61
+ genai.configure(api_key=api_key)
62
+ model = genai.GenerativeModel(
63
+ model_name="gemini-1.5-flash",
64
+ generation_config={
65
+ "temperature": 1,
66
+ "max_output_tokens": 8192,
67
+ "response_mime_type": "application/json",
68
+ },
69
+ safety_settings={
70
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
71
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
72
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
73
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE
74
+ },
75
+ system_instruction="You are a helpful AI assistant. Provide a concise summary of the thinking process and give a final answer."
76
+ )
77
+
78
+ messages = [{"role": m.role, "parts": [m.content]} for m in request.messages]
79
+ response = await model.generate_content_async(messages, stream=True)
80
+ return StreamingResponse(stream_response(response), media_type="text/plain")
81
+
82
+ if __name__ == "__main__":
83
+ import uvicorn
84
+ uvicorn.run(app, host="0.0.0.0", port=8000)
package.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "ai-chat",
3
+ "version": "1.0.0",
4
+ "private": true,
5
+ "scripts": {
6
+ "dev": "next dev",
7
+ "build": "next build",
8
+ "start": "next start",
9
+ "lint": "next lint"
10
+ },
11
+ "dependencies": {
12
+ "next": "13.4.19",
13
+ "react": "18.2.0",
14
+ "react-dom": "18.2.0",
15
+ "@radix-ui/react-slot": "^1.0.2",
16
+ "class-variance-authority": "^0.7.0",
17
+ "clsx": "^2.0.0",
18
+ "lucide-react": "^0.263.1",
19
+ "tailwind-merge": "^1.14.0",
20
+ "tailwindcss-animate": "^1.0.6"
21
+ },
22
+ "devDependencies": {
23
+ "@types/node": "20.5.7",
24
+ "@types/react": "18.2.21",
25
+ "@types/react-dom": "18.2.7",
26
+ "autoprefixer": "10.4.15",
27
+ "eslint": "8.48.0",
28
+ "eslint-config-next": "13.4.19",
29
+ "postcss": "8.4.29",
30
+ "tailwindcss": "3.3.3",
31
+ "typescript": "5.2.2"
32
+ }
33
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ google-generativeai
4
+ python-dotenv
start.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # Start the Next.js application
5
+ npm start &
6
+
7
+ # Start the FastAPI backend
8
+ uvicorn main:app --host 0.0.0.0 --port 8000
9
+
10
+ # Wait for any process to exit
11
+ wait -n
12
+
13
+ # Exit with status of process that exited first
14
+ exit $?