vlff李飞飞
add b2
68743ec
raw
history blame
7.14 kB
import datetime
import os
import re
from urllib.parse import unquote, urlparse
import add_qwen_libs # NOQA
import jsonlines
from qwen_agent.log import logger
from qwen_agent.utils.doc_parser import parse_doc, parse_html_bs
from qwen_agent.utils.utils import print_traceback, save_text_to_file
from qwen_server.schema import Record
from b2sdk.v2 import B2Api
from b2sdk.v2 import InMemoryAccountInfo
import hashlib
from datetime import datetime
from io import BytesIO
class B2Manager():
def __init__(self):
info = InMemoryAccountInfo()
b2_api = B2Api(info)
application_key_id = os.environ.get("b2_key_id")
application_key = os.environ.get("b2_key")
b2_api.authorize_account("production", application_key_id, application_key)
self.b2_bucket = b2_api.get_bucket_by_name(os.environ.get("b2_bucket_name"))
self.b2_api = b2_api
self.file_name = None
def gen_file_name(self, access_token, url):
url_md5 = hashlib.md5(b'%s' % url.encode(encoding='UTF-8')).hexdigest()
self.file_name = f"{access_token}/{url_md5}"
def get(self):
in_memory_file = BytesIO()
self.b2_bucket.download_file_by_name(self.file_name).save(in_memory_file)
# export_file = self.b2_bucket.download_file_by_name(self.file_name)
# export_file.save(in_memory_file)
in_memory_file.seek(0)
return in_memory_file.read()
def upsert(self, file_path, content):
with open(file_path, 'rb') as file_data:
# file_info = {'description': ''}
self.b2_bucket.upload_bytes(file_data.read(), self.file_name, file_infos=None)
def delete(self):
file_version_info = self.b2_bucket.get_file_info_by_name(self.file_name)
self.b2_bucket.hide_file(file_version_info.file_name)
# for version in self.b2_bucket.list_file_versions(self.file_name):
# self.b2_bucket.delete_file_version(version.id_, version.file_name)
def list_files(self, access_token):
files = []
for file_version_info, folder_name in self.b2_bucket.ls(folder_to_list="access_token/", show_versions=False):
# The upload timestamp is in milliseconds, so we divide by 1000 to convert it to seconds
upload_timestamp = datetime.fromtimestamp(file_version_info.upload_timestamp / 1000.0)
files.append(f"File Name: {file_version_info.file_name}, \nUpload timestamp: {upload_timestamp}, \nMetadata: {file_version_info.file_info}")
return files
def is_local_path(path):
if path.startswith('https://') or path.startswith('http://'):
return False
return True
def sanitize_chrome_file_path(file_path: str) -> str:
# For Linux and macOS.
if os.path.exists(file_path):
return file_path
# For native Windows, drop the leading '/' in '/C:/'
win_path = file_path
if win_path.startswith('/'):
win_path = win_path[1:]
if os.path.exists(win_path):
return win_path
# For Windows + WSL.
if re.match(r'^[A-Za-z]:/', win_path):
wsl_path = f'/mnt/{win_path[0].lower()}/{win_path[3:]}'
if os.path.exists(wsl_path):
return wsl_path
# For native Windows, replace / with \.
win_path = win_path.replace('/', '\\')
if os.path.exists(win_path):
return win_path
return file_path
def extract_and_cache_document(data, cache_file, cache_root, access_token):
logger.info('Starting cache pages...')
if data['url'].split('.')[-1].lower() in ['pdf', 'docx', 'pptx']:
date1 = datetime.datetime.now()
# generate one processing record
new_record = Record(url=data['url'],
time='',
type=data['type'],
raw=[],
extract='',
access_token=access_token,
topic='',
checked=False,
session=[]).to_dict()
with jsonlines.open(cache_file, mode='a') as writer:
writer.write(new_record)
if data['url'].startswith('https://') or data['url'].startswith(
'http://'):
pdf_path = data['url']
else:
parsed_url = urlparse(data['url'])
pdf_path = unquote(parsed_url.path)
pdf_path = sanitize_chrome_file_path(pdf_path)
try:
pdf_content = parse_doc(pdf_path)
except Exception:
print_traceback()
# del the processing record
lines = []
if os.path.exists(cache_file):
for line in jsonlines.open(cache_file):
if line['access_token'] == access_token and line['url'] != data['url']:
lines.append(line)
with jsonlines.open(cache_file, mode='w') as writer:
for new_line in lines:
writer.write(new_line)
return 'failed'
date2 = datetime.datetime.now()
logger.info('Parsing pdf time: ' + str(date2 - date1))
data['content'] = pdf_content
data['type'] = 'pdf'
extract = pdf_path.split('/')[-1].split('\\')[-1].split('.')[0]
elif data['content'] and data['type'] == 'html':
new_record = Record(url=data['url'],
time='',
type=data['type'],
raw=[],
extract='',
access_token=access_token,
topic='',
checked=False,
session=[]).to_dict()
with jsonlines.open(cache_file, mode='a') as writer:
writer.write(new_record)
try:
tmp_html_file = os.path.join(cache_root, 'tmp.html')
save_text_to_file(tmp_html_file, data['content'])
data['content'] = parse_html_bs(tmp_html_file)
except Exception:
print_traceback()
extract = data['content'][0]['metadata']['title']
else:
logger.error(
'Only Support the Following File Types: [\'.html\', \'.pdf\', \'.docx\', \'.pptx\']'
)
raise NotImplementedError
today = datetime.date.today()
new_record = Record(url=data['url'],
time=str(today),
type=data['type'],
raw=data['content'],
extract=extract,
access_token=access_token,
topic='',
checked=True,
session=[])
lines = []
if os.path.exists(cache_file):
for line in jsonlines.open(cache_file):
if line['access_token'] == access_token and line['url'] != data['url']:
lines.append(line)
lines.append(new_record.to_dict()) # cache
with jsonlines.open(cache_file, mode='w') as writer:
for new_line in lines:
writer.write(new_line)
response = 'Cached'
return response