devme commited on
Commit
ac86223
·
verified ·
1 Parent(s): 4990916

Update src/index.js

Browse files
Files changed (1) hide show
  1. src/index.js +159 -159
src/index.js CHANGED
@@ -1,159 +1,159 @@
1
- const express = require('express')
2
- const bodyParser = require('body-parser')
3
- const axios = require('axios')
4
- const app = express()
5
- const uuid = require('uuid')
6
- const { uploadImage } = require('./image')
7
-
8
- app.use(bodyParser.json())
9
- // 设置上传文件大小限制
10
- app.use(bodyParser.json({ limit: '50mb' }))
11
- app.use(bodyParser.urlencoded({ limit: '50mb', extended: true }))
12
-
13
- app.get('/v1/models', async (req, res) => {
14
- try {
15
- const response = await axios.get('https://chat.qwenlm.ai/api/models',
16
- {
17
- headers: {
18
- "Authorization": req.headers.authorization,
19
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
20
- }
21
- })
22
- res.json(response.data)
23
- } catch (error) {
24
- res.status(403)
25
- .json({
26
- error: "请提供正确的 Authorization token"
27
- })
28
- }
29
- })
30
-
31
- app.post('/v1/chat/completions', async (req, res) => {
32
- if (!req.headers.authorization) {
33
- return res.status(403)
34
- .json({
35
- error: "请提供正确的 Authorization token"
36
- })
37
- }
38
- const messages = req.body.messages
39
- let imageId = null
40
- const isImageMessage = Array.isArray(messages[messages.length - 1].content) === true && messages[messages.length - 1].content[1].image_url.url
41
- if (isImageMessage) {
42
- imageId = await uploadImage(messages[messages.length - 1].content[1].image_url.url, req.headers.authorization)
43
- messages[messages.length - 1].content[1] = {
44
- "type": "image",
45
- "image": imageId
46
- }
47
- }
48
-
49
- const stream = req.body.stream
50
-
51
- const notStreamResponse = async (response) => {
52
- const bodyTemplate = {
53
- "id": `chatcmpl-${uuid.v4()}`,
54
- "object": "chat.completion",
55
- "created": new Date().getTime(),
56
- "model": req.body.model,
57
- "choices": [
58
- {
59
- "index": 0,
60
- "message": {
61
- "role": "assistant",
62
- "content": response.choices[0].message.content
63
- },
64
- "finish_reason": "stop"
65
- }
66
- ],
67
- "usage": {
68
- "prompt_tokens": JSON.stringify(req.body.messages).length,
69
- "completion_tokens": response.choices[0].message.content.length,
70
- "total_tokens": JSON.stringify(req.body.messages).length + response.choices[0].message.content.length
71
- }
72
- }
73
- res.json(bodyTemplate)
74
- }
75
-
76
- const streamResponse = async (response) => {
77
- const id = uuid.v4()
78
- const decoder = new TextDecoder('utf-8')
79
- let backContent = null
80
- response.on('data', (chunk) => {
81
- const decodeText = decoder.decode(chunk)
82
-
83
- const lists = decodeText.split('\n').filter(item => item.trim() !== '')
84
- for (const item of lists) {
85
- const decodeJson = JSON.parse(item.replace(/^data: /, ''))
86
- let content = decodeJson.choices[0].delta.content
87
-
88
- if (backContent === null) {
89
- backContent = content
90
- } else {
91
- const temp = content
92
- content = content.replace(backContent, '')
93
- backContent = temp
94
- }
95
-
96
- const StreamTemplate = {
97
- "id": `chatcmpl-${id}`,
98
- "object": "chat.completion.chunk",
99
- "created": new Date().getTime(),
100
- "choices": [
101
- {
102
- "index": 0,
103
- "delta": {
104
- "content": content
105
- },
106
- "finish_reason": null
107
- }
108
- ]
109
- }
110
- res.write(`data: ${JSON.stringify(StreamTemplate)}\n\n`)
111
- }
112
- })
113
-
114
- response.on('end', () => {
115
- res.write(`data: [DONE]\n\n`)
116
- res.end()
117
- })
118
-
119
- }
120
-
121
- try {
122
- const response = await axios.post('https://chat.qwenlm.ai/api/chat/completions',
123
- req.body,
124
- {
125
- headers: {
126
- "Authorization": req.headers.authorization,
127
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
128
- },
129
- responseType: stream ? 'stream' : 'json'
130
- }
131
- )
132
- if (stream) {
133
- res.set({
134
- 'Content-Type': 'text/event-stream',
135
- 'Cache-Control': 'no-cache',
136
- 'Connection': 'keep-alive',
137
- })
138
- streamResponse(response.data)
139
- } else {
140
- res.set({
141
- 'Content-Type': 'application/json',
142
- })
143
- notStreamResponse(response.data)
144
- }
145
-
146
- } catch (error) {
147
- console.log(error)
148
- res.status(500)
149
- .json({
150
- error: "罢工了,不干了!!!"
151
- })
152
- }
153
-
154
- })
155
-
156
- app.listen(3000, () => {
157
- console.log('Server is running on port 3000')
158
- })
159
-
 
1
+ const express = require('express')
2
+ const bodyParser = require('body-parser')
3
+ const axios = require('axios')
4
+ const app = express()
5
+ const uuid = require('uuid')
6
+ const { uploadImage } = require('./image')
7
+
8
+ app.use(bodyParser.json())
9
+ // 设置上传文件大小限制
10
+ app.use(bodyParser.json({ limit: '50mb' }))
11
+ app.use(bodyParser.urlencoded({ limit: '50mb', extended: true }))
12
+
13
+ app.get('/api/v1/models', async (req, res) => {
14
+ try {
15
+ const response = await axios.get('https://chat.qwenlm.ai/api/models',
16
+ {
17
+ headers: {
18
+ "Authorization": req.headers.authorization,
19
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
20
+ }
21
+ })
22
+ res.json(response.data)
23
+ } catch (error) {
24
+ res.status(403)
25
+ .json({
26
+ error: "请提供正确的 Authorization token"
27
+ })
28
+ }
29
+ })
30
+
31
+ app.post('/api/v1/chat/completions', async (req, res) => {
32
+ if (!req.headers.authorization) {
33
+ return res.status(403)
34
+ .json({
35
+ error: "请提供正确的 Authorization token"
36
+ })
37
+ }
38
+ const messages = req.body.messages
39
+ let imageId = null
40
+ const isImageMessage = Array.isArray(messages[messages.length - 1].content) === true && messages[messages.length - 1].content[1].image_url.url
41
+ if (isImageMessage) {
42
+ imageId = await uploadImage(messages[messages.length - 1].content[1].image_url.url, req.headers.authorization)
43
+ messages[messages.length - 1].content[1] = {
44
+ "type": "image",
45
+ "image": imageId
46
+ }
47
+ }
48
+
49
+ const stream = req.body.stream
50
+
51
+ const notStreamResponse = async (response) => {
52
+ const bodyTemplate = {
53
+ "id": `chatcmpl-${uuid.v4()}`,
54
+ "object": "chat.completion",
55
+ "created": new Date().getTime(),
56
+ "model": req.body.model,
57
+ "choices": [
58
+ {
59
+ "index": 0,
60
+ "message": {
61
+ "role": "assistant",
62
+ "content": response.choices[0].message.content
63
+ },
64
+ "finish_reason": "stop"
65
+ }
66
+ ],
67
+ "usage": {
68
+ "prompt_tokens": JSON.stringify(req.body.messages).length,
69
+ "completion_tokens": response.choices[0].message.content.length,
70
+ "total_tokens": JSON.stringify(req.body.messages).length + response.choices[0].message.content.length
71
+ }
72
+ }
73
+ res.json(bodyTemplate)
74
+ }
75
+
76
+ const streamResponse = async (response) => {
77
+ const id = uuid.v4()
78
+ const decoder = new TextDecoder('utf-8')
79
+ let backContent = null
80
+ response.on('data', (chunk) => {
81
+ const decodeText = decoder.decode(chunk)
82
+
83
+ const lists = decodeText.split('\n').filter(item => item.trim() !== '')
84
+ for (const item of lists) {
85
+ const decodeJson = JSON.parse(item.replace(/^data: /, ''))
86
+ let content = decodeJson.choices[0].delta.content
87
+
88
+ if (backContent === null) {
89
+ backContent = content
90
+ } else {
91
+ const temp = content
92
+ content = content.replace(backContent, '')
93
+ backContent = temp
94
+ }
95
+
96
+ const StreamTemplate = {
97
+ "id": `chatcmpl-${id}`,
98
+ "object": "chat.completion.chunk",
99
+ "created": new Date().getTime(),
100
+ "choices": [
101
+ {
102
+ "index": 0,
103
+ "delta": {
104
+ "content": content
105
+ },
106
+ "finish_reason": null
107
+ }
108
+ ]
109
+ }
110
+ res.write(`data: ${JSON.stringify(StreamTemplate)}\n\n`)
111
+ }
112
+ })
113
+
114
+ response.on('end', () => {
115
+ res.write(`data: [DONE]\n\n`)
116
+ res.end()
117
+ })
118
+
119
+ }
120
+
121
+ try {
122
+ const response = await axios.post('https://chat.qwenlm.ai/api/chat/completions',
123
+ req.body,
124
+ {
125
+ headers: {
126
+ "Authorization": req.headers.authorization,
127
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
128
+ },
129
+ responseType: stream ? 'stream' : 'json'
130
+ }
131
+ )
132
+ if (stream) {
133
+ res.set({
134
+ 'Content-Type': 'text/event-stream',
135
+ 'Cache-Control': 'no-cache',
136
+ 'Connection': 'keep-alive',
137
+ })
138
+ streamResponse(response.data)
139
+ } else {
140
+ res.set({
141
+ 'Content-Type': 'application/json',
142
+ })
143
+ notStreamResponse(response.data)
144
+ }
145
+
146
+ } catch (error) {
147
+ console.log(error)
148
+ res.status(500)
149
+ .json({
150
+ error: "罢工了,不干了!!!"
151
+ })
152
+ }
153
+
154
+ })
155
+
156
+ app.listen(3000, () => {
157
+ console.log('Server is running on port 3000')
158
+ })
159
+