code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from io import BufferedReader
from typing import Optional
from .types import Component, ICatalystMail, ICatalystProject
from .exceptions import CatalystMailError
from . import validator
from ._http_client import AuthorizedHttpClient
from ._constants import RequestMethod, CredentialUser, Components
_MAIL_OBJ_DICT = {
'from_email': str,
'to_email': list,
'subject': str,
'content': str,
'cc': list,
'bcc': list,
'reply_to': list,
'html_mode': bool,
'display_name': str,
'attachments': list
}
class ICatalystMailResp(ICatalystMail):
project_details: Optional[ICatalystProject]
class Email(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.MAIL
def send_mail(self, mail_obj: ICatalystMail) -> ICatalystMailResp:
validator.is_non_empty_dict(mail_obj, 'mail_obj', CatalystMailError)
mail_data = self._generate_data(mail_obj)
resp = self._requester.request(
method=RequestMethod.POST,
path='/email/send',
files=mail_data,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
@staticmethod
def _generate_data(mail_obj):
data = []
for key, data_type in _MAIL_OBJ_DICT.items():
if key in mail_obj:
if not isinstance(mail_obj[key], data_type):
raise CatalystMailError(
'INVALID_MAIL_OBJECT',
f'{key} must be an instance of {data_type}'
)
if data_type is list:
if key == 'attachments':
for attachment in mail_obj[key]:
Email._is_valid_attachment(attachment)
data.append((key, attachment))
else:
listed_keys: list = mail_obj[key]
data.append((key, (None, ','.join(listed_keys))))
elif data_type is bool:
data.append((key, (None, str(mail_obj[key]).lower())))
else:
data.append((key, (None, mail_obj[key])))
return data
@staticmethod
def _is_valid_attachment(attachment):
if not isinstance(attachment, BufferedReader):
raise CatalystMailError(
'INVALID_MAIL_OBJECT',
'Attachments must be a instance of BufferReader'
) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/email.py | email.py |
import os
import json
import threading
from typing import Dict, Union
from .types import ICatalystConfig
from . import _constants as APIConstants
from .credentials import Credential
from .exceptions import CatalystAppError, CatalystCredentialError
from .cache import Cache
from .cron import Cron
from .datastore import Datastore
from .filestore import Filestore
from .zcql import Zcql
from .email import Email
from .search import Search
from .functions import Functions
from .authentication import Authentication
from .push_notification import PushNotification
from .zia import Zia
from .circuit import Circuit
from .connection import Connection
from .smart_browz import SmartBrowz
CATALYST_OPTIONS_ENV_KEY = 'CATALYST_OPTIONS'
CONFIG_MANDATORIES = {
APIConstants.PROJECT_ID: (int, str),
APIConstants.PROJECT_KEY: (int, str),
APIConstants.PROJECT_DOMAIN: (str,)
}
DEFAULT_ENVIRONMENT = "Development"
class CatalystAppOptions:
def __init__(self, options: Dict):
if options is None:
options = self._load_options_from_env()
if not isinstance(options, dict):
raise CatalystAppError(
'INVALID_APP_OPTIONS',
f'Illegal app option type - {type(options)}. App options must be a instance of dict'
)
config = CatalystAppOptions.validate_options(options)
self._config = config
@property
def config(self):
return self._config
@staticmethod
def _load_options_from_env():
options_json = os.getenv(CATALYST_OPTIONS_ENV_KEY)
options = json.loads(options_json)
if not isinstance(options, dict):
raise CatalystAppError(
'INVALID_APP_OPTIONS',
'App options present in env is invalid.'
'App options must be stored in env as json string and it must be parsable as dict',
options
)
return options
@staticmethod
def validate_options(options: Dict):
# validation for option keys
for key, val in CONFIG_MANDATORIES.items():
if not options.get(key):
raise CatalystAppError(
'INVALID_APP_OPTIONS',
(f"Either the key '{key}' is missing or "
f"value provided for the {key} is None in app options")
)
if not isinstance(options[key], val):
raise CatalystAppError(
'INVALID_APP_OPTIONS',
f'{key} must be a instance of {" or ".join([type.__name__ for type in val])}'
)
# If environment is empty, set default environment as Development
if not options.get(APIConstants.ENVIRONMENT):
options.update({APIConstants.ENVIRONMENT: DEFAULT_ENVIRONMENT})
if not options.get(APIConstants.PROJECT_SECRET_KEY):
options.update({APIConstants.PROJECT_SECRET_KEY: None})
return options
class CatalystApp:
def __init__(
self,
credential: Credential,
options: Dict,
name: str
):
if not name or not isinstance(name, str):
raise CatalystAppError(
'INVALID_APP_NAME',
'App name must be a non-empty string',
name
)
self._name = name
if not isinstance(credential, Credential):
raise CatalystCredentialError(
'INVALID CREDENTIAL',
f'Illegal credential type - {type(credential)}.'
'credential must be initialized with valid Credential instance.'
)
self._credential = credential
self._options = CatalystAppOptions(options)
self._lock = threading.RLock()
self._services = {}
@property
def name(self):
return self._name
@property
def credential(self):
return self._credential
@property
def config(self) -> ICatalystConfig:
return self._options.config
@property
def services(self):
return self._services
@property
def scope(self) -> str:
"""
Returns: Scope of the app if initialized with scope, else None
"""
if hasattr(self._credential, '_strict_scope'):
if self._credential._strict_scope: # pylint: disable=protected-access
return self._credential.current_user()
return None
def cache(self) -> Cache:
return self._ensure_service('cache', Cache)
def cron(self) -> Cron:
return self._ensure_service('cron', Cron)
def datastore(self) -> Datastore:
return self._ensure_service('datastore', Datastore)
def filestore(self) -> Filestore:
return self._ensure_service('filestore', Filestore)
def zcql(self) -> Zcql:
return self._ensure_service('zcql', Zcql)
def email(self) -> Email:
return self._ensure_service('mail', Email)
def search(self) -> Search:
return self._ensure_service('search', Search)
def functions(self) -> Functions:
return self._ensure_service('functions', Functions)
def authentication(self) -> Authentication:
return self._ensure_service('user-management', Authentication)
def push_notification(self) -> PushNotification:
return self._ensure_service('push-notification', PushNotification)
def zia(self) -> Zia:
return self._ensure_service('zia', Zia)
def circuit(self) -> Circuit:
return self._ensure_service('circuit', Circuit)
def connection(self, properties: Union[str, Dict[str, Dict[str, str]]]) -> Connection:
return self._ensure_service('connection', Connection, override=True, properties=properties)
def smart_browz(self) -> SmartBrowz:
return self._ensure_service('SmartBrowz', SmartBrowz)
def _ensure_service(self, service_name: str, initializer, **kwargs):
with self._lock:
if service_name not in self._services or kwargs.get('override'):
self._services[service_name] = initializer(self, **kwargs)
return self._services[service_name] | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/catalyst_app.py | catalyst_app.py |
from io import BufferedReader
from typing import Any, Dict, List, Optional, TypedDict, Union
from . import validator
from .types import Component
from ._http_client import AuthorizedHttpClient
from .exceptions import CatalystZiaError
from ._constants import Components, RequestMethod, CredentialUser
from .types.zia import (
ICatalystZiaKeywordExtraction,
ICatalystZiaBarcode,
ICatalystZiaFace,
ICatalystZiaFaceComparison,
ICatalystZiaModeration,
ICatalystZiaOCR,
ICatalystZiaObject,
ICatalystZiaSentimentAnalysis
)
ICatalystOCROptions = TypedDict('ICatalystOCROptions', {
'language': Optional[str],
'model_type': Optional[str]
}, total=False)
ICatalystBarCodeOptions = TypedDict('ICatalystBarCodeOptions', {
'format': Optional[str]
})
ICatalystImageModerationOpts = TypedDict('ICatalystImageModerationOpt', {
'mode': Optional[str]
})
ICatalystFaceAnalysisOptions = TypedDict('ICatalystFaceAnalysisOptions', {
'mode': Optional[str],
'emotion': Optional[bool],
'age': Optional[bool],
'gender': Optional[bool]
}, total=False)
class Zia(Component):
def __init__(self, app):
self._app = app
self._requester = AuthorizedHttpClient(app)
def get_component_name(self):
return Components.ZIA
def detect_object(
self,
file: BufferedReader
) -> ICatalystZiaObject:
self._is_valid_file_type(file)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/detect-object',
files={
'image': file
},
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def extract_optical_characters(
self,
file: BufferedReader,
options: ICatalystOCROptions = None
) -> ICatalystZiaOCR:
self._is_valid_file_type(file)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/ocr',
files={
'image': file
},
data=options,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def extract_aadhaar_characters(
self,
front_img: BufferedReader,
back_img: BufferedReader,
language: str
) -> ICatalystZiaOCR:
self._is_valid_file_type(front_img, back_img)
validator.is_non_empty_string(language, 'language', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/ocr',
files={
'aadhaar_front': front_img,
'aadhaar_back': back_img
},
data={
'language': language,
'model_type': 'AADHAAR'
},
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def scan_barcode(
self,
image: BufferedReader,
options: ICatalystBarCodeOptions = None
) -> ICatalystZiaBarcode:
self._is_valid_file_type(image)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/barcode',
files={
'image': image
},
data=options,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def moderate_image(
self,
image: BufferedReader,
options: ICatalystImageModerationOpts = None
) -> ICatalystZiaModeration:
self._is_valid_file_type(image)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/imagemoderation',
files={
'image': image
},
data=options,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def analyse_face(
self,
image: BufferedReader,
options: ICatalystFaceAnalysisOptions = None
) -> ICatalystZiaFace:
self._is_valid_file_type(image)
if isinstance(options, dict):
modified_opt = dict((k,str(v).lower()) for k,v in options.items() if isinstance(v,bool))
options.update(modified_opt)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/faceanalytics',
files={
'image': image
},
data=options,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def compare_face(
self,
source_img: BufferedReader,
query_img: BufferedReader
) -> ICatalystZiaFaceComparison:
self._is_valid_file_type(source_img, query_img)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/facecomparison',
files={
'source_image': source_img,
'query_image': query_img
},
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def auto_ml(
self,
model_id: Union[int, str],
data: Dict[str, Any] = None
):
validator.is_non_empty_string_or_number(model_id, 'model_id', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/ml/automl/model/{model_id}',
json=data,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def get_sentiment_analysis(
self,
list_of_docs: List[str],
keywords: Optional[List[str]] = None
) -> ICatalystZiaSentimentAnalysis:
validator.is_non_empty_list(list_of_docs, 'documents list', CatalystZiaError)
if keywords:
validator.is_non_empty_list(keywords, 'keywords', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/text-analytics/sentiment-analysis',
json={
'document': list_of_docs,
'keywords': keywords
},
user=CredentialUser.ADMIN,
)
return resp.response_json.get('data')
def get_keyword_extraction(
self,
list_of_docs: List[str]
) -> ICatalystZiaKeywordExtraction:
validator.is_non_empty_list(list_of_docs, 'documents list', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/text-analytics/keyword-extraction',
json={
'document': list_of_docs
},
user=CredentialUser.ADMIN,
)
return resp.response_json.get('data')
def get_NER_prediction( # pylint: disable=invalid-name
self,
list_of_docs: List[str]
) -> ICatalystZiaKeywordExtraction:
validator.is_non_empty_list(list_of_docs, 'documents list', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/text-analytics/ner',
json={
'document': list_of_docs
},
user=CredentialUser.ADMIN,
)
return resp.response_json.get('data')
def get_text_analytics(
self,
list_of_docs: List[str],
keywords: Optional[List[str]] = None
) -> ICatalystZiaSentimentAnalysis:
validator.is_non_empty_list(list_of_docs, 'documents list', CatalystZiaError)
if keywords:
validator.is_non_empty_list(keywords, 'keywords', CatalystZiaError)
resp = self._requester.request(
method=RequestMethod.POST,
path='/ml/text-analytics',
json={
'document': list_of_docs,
'keywords': keywords
},
user=CredentialUser.ADMIN,
)
return resp.response_json.get('data')
@staticmethod
def _is_valid_file_type(*files):
for file in files:
if not isinstance(file, BufferedReader):
raise CatalystZiaError(
'Invalid-Argument',
'File must be a instance of BufferReader'
) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/zia.py | zia.py |
from typing import Dict, Union
from .types import Component
from .exceptions import CatalystCircuitError
from . import validator
from ._http_client import AuthorizedHttpClient
from ._constants import RequestMethod, CredentialUser, Components
class Circuit(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.CIRCUIT
def execute(
self,
circuit_id: Union[int, str],
name: str,
inputs: Dict[str, str] = None
):
validator.is_non_empty_string_or_number(circuit_id, 'circuit_id', CatalystCircuitError)
validator.is_non_empty_string(name, 'execution_name', CatalystCircuitError)
req_json = {
'name': name,
'input': inputs or {}
}
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/circuit/{circuit_id}/execute',
json=req_json,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def status(
self,
circuit_id: Union[int, str],
exec_id: Union[int, str]
):
validator.is_non_empty_string_or_number(circuit_id, 'circuit_id', CatalystCircuitError)
validator.is_non_empty_string_or_number(exec_id, 'execution_id', CatalystCircuitError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/circuit/{circuit_id}/execution/{exec_id}',
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def abort(
self,
circuit_id: Union[int, str],
exec_id: Union[int, str]
):
validator.is_non_empty_string_or_number(circuit_id, 'circuit_id', CatalystCircuitError)
validator.is_non_empty_string_or_number(exec_id, 'execution_id', CatalystCircuitError)
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/circuit/{circuit_id}/execution/{exec_id}',
user=CredentialUser.ADMIN
)
return resp.response_json.get('data') | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/circuit.py | circuit.py |
from abc import ABC, abstractmethod
import json
import os
from time import time
from typing import Dict, List, Literal, TypedDict, Union
from ._thread_util import ZCThreadUtil
from . import _constants as APIConstants
from ._constants import (
CredentialUser,
RequestMethod,
CredentialType
)
from .exceptions import (
CatalystCredentialError,
CatalystAppError
)
_CATALYST_AUTH_ENV_KEY = 'CATALYST_AUTH'
_REFRESH_OBJ_KEYS = [
APIConstants.CLIENT_ID,
APIConstants.CLIENT_SECRET,
APIConstants.REFRESH_TOKEN
]
_CATALYST_SCOPES = ['admin', 'user']
ICatalystRefreshObj = TypedDict('ICatalystRefreshObj', {
'client_id': str,
'client_secret': str,
'refresh_token': str
})
ICatalystTokenObj = TypedDict('ICatalystTokenObj', {
'access_token': str
})
ICatalystTicketObj = TypedDict('ICatalystTicketObj', {
'ticket': str
})
# Credential class for all credentials
class Credential(ABC):
@abstractmethod
def token(self):
pass
def _switch_user(self, user=None): # pylint: disable=unused-argument
return None
def current_user(self):
return CredentialUser.ADMIN
def current_user_type(self):
return CredentialUser.ADMIN
class RefreshTokenCredential(Credential):
def __init__(self, refresh_obj: ICatalystRefreshObj):
super().__init__()
RefreshTokenCredential._validate_refresh_obj(refresh_obj)
self._client_id = refresh_obj.get(APIConstants.CLIENT_ID)
self._client_secret = refresh_obj.get(APIConstants.CLIENT_SECRET)
self._refresh_token = refresh_obj.get(APIConstants.REFRESH_TOKEN)
self._cached_token: Dict[str, Union[str, int]] = None
def token(self) -> str:
if not self._cached_token or self._cached_token.get('expires_in') <= int(round(time())):
from ._http_client import HttpClient # pylint: disable=cyclic-import,import-outside-toplevel
requester = HttpClient(base_url=APIConstants.ACCOUNTS_URL)
post_data = {
'grant_type': 'refresh_token',
'refresh_token': self._refresh_token,
'client_id': self._client_id,
'client_secret': self._client_secret
}
resp = requester.request(
method=RequestMethod.POST,
path='/oauth/v2/token',
data=post_data
)
data: Dict = resp.response_json
try:
if data.get('access_token') and data.get('expires_in'):
data.update({'expires_in': int(round(time())) + data.get('expires_in') * 1000})
self._cached_token = data
else:
raise CatalystCredentialError(
'AUTHENTICATION_FAILURE',
'Unexpected response while fetching access token',
str(data)
)
except:
raise CatalystCredentialError(
'AUTHENTICATION_FAILURE',
'Error while fetching access token'
) from None
return self._cached_token.get('access_token')
@staticmethod
def _validate_refresh_obj(refresh_obj):
for key in _REFRESH_OBJ_KEYS:
if key not in refresh_obj or not refresh_obj[key]:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
f'Unable to get "{key}" in refresh_obj dict'
)
class AccessTokenCredential(Credential):
def __init__(self, token_obj: ICatalystTokenObj):
super().__init__()
self._token: str = _get_attr(token_obj, 'access_token')
def token(self):
return self._token
class TicketCredential(Credential):
def __init__(self, ticket_obj: ICatalystTicketObj):
super().__init__()
self._token: str = _get_attr(ticket_obj, 'ticket')
def token(self):
return self._token
class CookieCredential(Credential):
def __init__(self, cookie_obj):
super().__init__()
cookie_str = _get_attr(cookie_obj, 'cookie')
csrf_token: str = ZCThreadUtil().get_value(APIConstants.CSRF_TOKEN_COOKIE)
if not csrf_token:
cookies_list: List = cookie_str.split("; ")
for cookie in cookies_list:
splitted_cookie: List = cookie.split("=")
if splitted_cookie[0] == APIConstants.CSRF_TOKEN_COOKIE:
csrf_token = splitted_cookie[1]
break
ZCThreadUtil().put_value(APIConstants.CSRF_TOKEN_COOKIE, csrf_token)
self._cookie = cookie_str
self._csrf_header = APIConstants.CSRF_PARAM_PREFIX + csrf_token
def token(self):
return self._cookie, self._csrf_header
class CatalystCredential(Credential):
def __init__(self, user: str = None):
super().__init__()
thread_obj = ZCThreadUtil()
self._admin_cred: Union[AccessTokenCredential, TicketCredential] = None
self._user_cred: Union[AccessTokenCredential, TicketCredential, CookieCredential] = None
self._admin_token: str = thread_obj.get_value(APIConstants.ADMIN_CRED)
self._user_token: str = thread_obj.get_value(APIConstants.CLIENT_CRED)
self._cookie: str = thread_obj.get_value(APIConstants.COOKIE_CRED)
self._admin_cred_type = thread_obj.get_value(APIConstants.ADMIN_CRED_TYPE)
self._user_cred_type = thread_obj.get_value(APIConstants.CLIENT_CRED_TYPE)
self._current_user = CredentialUser.USER
self._strict_scope = False
self._user_type: Literal['admin', 'user'] \
= (CredentialUser.ADMIN
if thread_obj.get_value(APIConstants.USER_TYPE) == CredentialUser.ADMIN
else CredentialUser.USER)
if self._is_valid_user(user):
self._strict_scope = True
self._current_user = user.lower()
# Admin Credentials
if self._admin_cred_type == CredentialType.ticket:
self._admin_cred = TicketCredential({
APIConstants.TICKET: self._admin_token
})
elif self._admin_cred_type == CredentialType.token:
self._admin_cred = AccessTokenCredential({
APIConstants.ACCESS_TOKEN: self._admin_token
})
else:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
"Admin credential type is unknown"
)
# Client Credentials
if not self._user_token and not self._cookie:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
"User credentials missing"
)
if self._user_cred_type == CredentialType.token:
self._user_cred = AccessTokenCredential({
APIConstants.ACCESS_TOKEN: self._user_token
})
elif self._user_cred_type == CredentialType.ticket:
self._user_cred = TicketCredential({
APIConstants.TICKET: self._user_token
})
else:
self._user_cred = CookieCredential({
APIConstants.COOKIE: self._cookie
})
def token(self):
if self._current_user == CredentialUser.ADMIN:
return self._admin_cred.__class__.__name__, self._admin_cred.token()
if self._current_user == CredentialUser.USER:
if not self._user_cred:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'User credentials are not initialized'
)
# check if user credentials provided are admin credentials while following strict scope
if self._strict_scope and self._user_type == CredentialUser.ADMIN:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'No user credentials present for catalyst app initialized in user scope'
)
return self._user_cred.__class__.__name__, self._user_cred.token()
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'user provided is not recognized',
self._current_user
)
def current_user(self):
return self._current_user
def current_user_type(self):
if self._current_user == CredentialUser.USER:
return self._user_type
return self._current_user
def _switch_user(self, user=None):
if self._strict_scope:
return self._current_user
if not user:
user = (CredentialUser.USER
if self._current_user == CredentialUser.ADMIN
else CredentialUser.ADMIN)
self._current_user = user.lower()
return self._current_user
@staticmethod
def _is_valid_user(user):
if user is None:
return False
if not isinstance(user, str) or user.lower() not in _CATALYST_SCOPES:
raise CatalystAppError(
'INVALID SCOPE',
"Scope must be either 'user' or 'admin'"
)
return True
class ApplicationDefaultCredential(Credential):
def __init__(self):
super().__init__()
self._credential_obj = None
self._credential: Union[
AccessTokenCredential,
TicketCredential,
RefreshTokenCredential
] = None
# load credentials from environment
self._load_credential_from_env()
if not self._credential_obj:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'There is no default credentials in env. Please provide valid credentials.'
)
if not isinstance(self._credential_obj, dict):
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'Credentials present in env is invalid.'
'Credentials must be stored in env as json string and it must be parsable as dict',
self._credential_obj
)
if 'refresh_token' in self._credential_obj:
self._credential = RefreshTokenCredential(self._credential_obj)
elif 'access_token' in self._credential_obj:
self._credential = AccessTokenCredential(self._credential_obj)
elif 'ticket' in self._credential_obj:
self._credential = TicketCredential(self._credential_obj)
else:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'The given credential object does not contain proper credentials'
)
def _load_credential_from_env(self):
auth_json = os.getenv(_CATALYST_AUTH_ENV_KEY)
if not isinstance(auth_json, str):
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
'Credentials present in env is invalid.'
'Credentials must be stored in env as json string.'
)
auth_dict = json.loads(auth_json)
self._credential_obj = auth_dict
@property
def credential_obj(self):
return self._credential_obj
@property
def credential(self):
return self._credential
def token(self):
return self._credential.token()
def _get_attr(src: Dict, key: str):
if not isinstance(src, dict):
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
f'Illegal credential obj type - {type(src)} is provided.'
'Credential obj must be a instance of dict.'
)
if key not in src or not src[key]:
raise CatalystCredentialError(
'INVALID_CREDENTIAL',
f"Unable to get '{key}' in credential dict"
)
return src[key] | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/credentials.py | credentials.py |
from io import BufferedReader
import re
from typing import List
def is_valid_email(email):
"""
validates the given value is a email
Args:
email: The value to validate.
Returns:
bool: Whether the value is a valid email or not.
"""
regex = r'^[^@]+@[^@]+$'
if re.fullmatch(regex, email):
return True
return False
def is_bool(value):
"""
validates the given value is a boolean
Args:
value: The value to validate.
Returns:
bool: Whether the value is a boolean or not.
"""
return isinstance(value, bool)
def _is_number(value):
"""
validates the given value is a number
Args:
value: The value to validate.
Returns:
bool: Whether the value is a number or not.
"""
try:
float(value)
return True
except (ValueError, TypeError):
return False
def _is_integer(value):
"""
validates the given value is a integer
Args:
value: The value to validate.
Returns:
bool: Whether the value is a integer or not.
"""
try:
int(str(value))
return True
except (ValueError, TypeError):
return False
def is_string(value):
"""
validates the given value is a string
Args:
value: The value to validate.
Returns:
bool: Whether the value is a string or not.
"""
return isinstance(value, str)
def is_list(value):
"""
validates the given value is a list
Args:
value: The value to validate.
Returns:
bool: Whether the value is a list or not.
"""
return isinstance(value, list)
def is_dict(value):
"""
validates the given value is a dict
Args:
value: The value to validate.
Returns:
bool: Whether the value is a dict or not.
"""
return isinstance(value, dict)
def is_set(value):
"""
validates the given value is a set
Args:
value: The value to validate.
Returns:
bool: Whether the value is a set or not.
"""
return isinstance(value, set)
def is_tuple(value):
"""
validates the given value is a tuple
Args:
value: The value to validate.
Returns:
bool: Whether the value is a tuple or not.
"""
return isinstance(value, tuple)
def is_buffered_reader(value):
"""
validates the given value is a buffered reader or not
Args:
value: The value to validate.
Returns:
bool: Whether the value is a buffered reader or not.
"""
return isinstance(value, BufferedReader)
def is_non_empty_string(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a non-empty string
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a non-empty string or not.
Raises:
Exception: If the value is not a non-empty string and exception class is given.
"""
if not is_string(value) or not value:
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a non-empty string.',
value
)
return False
return True
def is_non_empty_string_or_number(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a non-empty string or number
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a valid non-empty string or number.
Raises:
Exception: If the value is not a non-empty string or number and
exception class is given.
"""
if is_non_empty_string(value) or _is_number(value):
return True
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a non-empty string or number.',
value
)
return False
def is_parsable_number(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a parsable number
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a parsable number.
Raises:
Exception: If the value is non parsable number and exception
class is given.
"""
if _is_number(value):
return True
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a parsable number.',
value
)
return False
def is_parsable_integer(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a parsable integer
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a parsable integer.
Raises:
Exception: If the value is non parsable integer and
exception class is given.
"""
if _is_integer(value):
return True
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a parsable integer.',
value
)
return False
def is_non_empty_list(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a non-empty list
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a valid non-empty list or not.
Raises:
Exception: If the value is not a non-empty list and exception class is given.
"""
if not is_list(value) or not value:
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a non-empty list.',
value
)
return False
return True
def is_non_empty_dict(
value,
attr_name: str = None,
exception: Exception = None
):
"""
validates the given value is a non-empty dict
Args:
value: The value to validate.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: Whether the value is a valid non-empty dict or not.
Raises:
Exception: If the value is not a non-empty dict and exception class is given.
"""
if not is_dict(value) or not value:
if exception:
raise exception(
'Invalid-Argument',
f'Value provided for {attr_name} is expected to be a non-empty dict.',
value
)
return False
return True
def is_keys_present(
obj: dict,
keys: List[str],
attr_name: str = None,
exception: Exception = None
):
"""
validates a dict has given keys. Note: single level
Args:
obj: The obj to validate.
keys: Keys to be check for presence.
attr_name: The name of the value to use in error.
exception: The Exception class to raise error if needed.
Returns:
bool: validity of the object
Raises:
Exception: If the given key is not present and if exception class is given.
"""
if not is_non_empty_dict(obj, attr_name, exception):
return False
for key in keys:
if key not in obj or obj[key] is None:
if exception:
raise exception(
'Invalid-Argument',
f"Value for the key '{key}' cannot be None or undefined in {attr_name} dict",
obj
)
return False
return True
def is_valid_url(url):
"""
validates the given value is a valid url
Args:
email: The value to validate.
Returns:
bool: Whether the value is a valid url or not.
"""
regex = """^https?:\\/\\/(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}
\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)$"""
if re.match(regex, url):
return True
return False | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/validator.py | validator.py |
import os
from typing import Dict
import requests
from requests import Response
from urllib3.util import retry
from .exceptions import CatalystAPIError
from .credentials import (
AccessTokenCredential,
RefreshTokenCredential,
TicketCredential,
CatalystCredential
)
from ._constants import (
APP_DOMAIN,
APP_VERSION_V1,
USER_AGENT,
SDK_VERSION,
AUTHORIZATION,
TICKET_PREFIX,
OAUTH_PREFIX,
COOKIE_HEADER,
CSRF_HEADER,
IS_LOCAL,
PROJECT_KEY,
PROJECT_KEY_NAME,
ENVIRONMENT_KEY_NAME,
ENVIRONMENT,
PROJECT_SECRET_KEY,
USER_KEY_NAME,
PROJECT_DOMAIN,
AcceptHeader,
CredentialUser,
ProjectHeader,
CatalystService
)
USERAGENT_HEADER = {USER_AGENT: "zc-python-sdk/" + SDK_VERSION}
# Default timeout for connect and read operation in seconds
DEFAULT_TIMEOUT = (60, 30)
DEFAULT_RETRY_CONFIG = retry.Retry(
connect=2,
read=1,
status=4,
status_forcelist=[500, 502, 503, 504],
raise_on_status=False,
backoff_factor=0.5
)
class DefaultHttpResponse:
def __init__(self, resp: Response):
self._response = resp
self._status_code = resp.status_code
self._headers = resp.headers
self.check_status()
@property
def response(self):
return self._response
@property
def status_code(self):
return self._status_code
@property
def headers(self):
return self._headers
@property
def response_json(self):
try:
return self._response.json()
except:
raise CatalystAPIError(
'UNPARSABLE_RESPONSE',
'unable to parse the response json'
) from None
def check_status(self):
if self._status_code is None:
raise CatalystAPIError(
'UNKNOWN_STATUSCODE',
'unable to obtain status code from response', self.response_json
)
if self._status_code not in range(200, 300):
data = self.response_json.get('data')
if data:
error_code = data.get('error_code')
message = data.get('message')
if error_code and message:
raise CatalystAPIError(
error_code,
message,
http_status_code=self._status_code
)
raise CatalystAPIError(
'API_ERROR',
(f'Request failed with status {self._status_code} and '
f'response data : {self.response_json}')
)
class HttpClient:
def __init__(
self,
app=None,
base_url=None,
retries=DEFAULT_RETRY_CONFIG,
timeout=DEFAULT_TIMEOUT
):
self._session = requests.session()
self._timeout = timeout
self._base_url = base_url
if retries:
self._session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
self._session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
self._app = app
@property
def session(self):
return self._session
@property
def base_url(self):
return self._base_url
@property
def timeout(self):
return self._timeout
def request(
self,
method: str,
url: str = None,
path: str = None,
user=CredentialUser.ADMIN,
catalyst_service=None,
**kwargs
):
if 'timeout' not in kwargs:
kwargs['timeout'] = self._timeout
if 'headers' not in kwargs:
kwargs['headers'] = {}
headers = kwargs['headers']
headers.update(USERAGENT_HEADER)
if self._app is not None:
from .catalyst_app import CatalystApp # pylint: disable=import-outside-toplevel
if not isinstance(self._app, CatalystApp):
raise ValueError(
'Invalid app provided to make requests. App must be an instance of CatalystApp'
)
headers[PROJECT_KEY_NAME] = str(self._app.config.get(PROJECT_KEY))
headers[ENVIRONMENT_KEY_NAME] = self._app.config.get(ENVIRONMENT)
headers[USER_KEY_NAME] = self._app.credential.current_user_type()
# setting project key in headers if it's present
if self._app.config.get(PROJECT_SECRET_KEY):
headers[ProjectHeader.project_secret_key] = self._app.config['project_secret_key']
user = self._app.credential.current_user()
#special handling for CLI
if IS_LOCAL == 'true':
if user == CredentialUser.ADMIN:
self._base_url = \
'https://' + APP_DOMAIN.replace('https://', '').replace('http://', '')
elif user == CredentialUser.USER:
self._base_url = 'https://' + self._app.config.get(PROJECT_DOMAIN)
if catalyst_service:
project_id = self._app.config.get('project_id')
path = catalyst_service + APP_VERSION_V1 + f'/project/{project_id}' + (path or '')
headers[AcceptHeader.KEY] = AcceptHeader.VALUE
self._base_url = self._base_url or APP_DOMAIN
url = url or (os.path.join(self._base_url, path))
resp = self._session.request(method, url, **kwargs)
return DefaultHttpResponse(resp)
def close(self):
self._session.close()
self._session = None
class AuthorizedHttpClient(HttpClient):
def __init__(
self,
app
):
super().__init__(app)
def request(
self,
method: str,
url: str = None,
path: str = None,
user=CredentialUser.USER,
catalyst_service=CatalystService.SERVERLESS,
**kwargs
):
self._app.credential._switch_user(user) # pylint: disable=protected-access
self._authenticate_request(kwargs)
return super().request(method, url, path, user, catalyst_service, **kwargs)
def _authenticate_request(self, kwargs):
# if 'headers' not in kwargs:
# kwargs['headers'] = {}
# headers = kwargs['headers']
headers = kwargs['headers'] = {} if 'headers' not in kwargs else kwargs['headers']
credential = self._app.credential
if isinstance(credential, (AccessTokenCredential, RefreshTokenCredential)):
AuthorizedHttpClient.set_oauth_header(headers, credential.token())
if isinstance(credential, TicketCredential):
AuthorizedHttpClient.set_ticket_header(headers, credential.token())
if isinstance(credential, CatalystCredential):
cred_type, token = credential.token()
if cred_type == 'AccessTokenCredential':
AuthorizedHttpClient.set_oauth_header(headers, token)
elif cred_type == 'TicketCredential':
AuthorizedHttpClient.set_ticket_header(headers, token)
elif cred_type == 'CookieCredential':
headers.update({COOKIE_HEADER: token[0]})
headers.update({CSRF_HEADER: token[1]})
@staticmethod
def set_oauth_header(headers: Dict, token: str):
headers.update({AUTHORIZATION: OAUTH_PREFIX + token})
@staticmethod
def set_ticket_header(headers: Dict, ticket: str):
headers.update({AUTHORIZATION: TICKET_PREFIX + ticket}) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/_http_client.py | _http_client.py |
from typing import Union
from .types import Component
from .exceptions import BrowserLogicError
from ._http_client import AuthorizedHttpClient
from . import validator
from ._constants import (
RequestMethod,
CredentialUser,
Components,
CatalystService
)
from .types.smart_browz import (
OutputOptions,
PdfOptions,
PdfPageOptions,
ScreenShotOptions,
ScreenShotPageOptions,
NavigationOptions
)
class SmartBrowz(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.SMART_BROWZ
def convert_to_pdf(
self,
source: str,
pdf_options: PdfOptions = None,
page_options: PdfPageOptions = None,
navigation_options: NavigationOptions = None,
**kwargs
):
'''
convert the given source into pdf
'''
req_json = {"output_options": {"output_type": "pdf"}}
validator.is_non_empty_string(source, 'source', BrowserLogicError)
if validator.is_valid_url(source):
req_json['url']=source
else:
req_json['html']=source
req_json.update({
"pdf_options": pdf_options,
"page_options": page_options,
"navigation_options": navigation_options
})
req_json.update(kwargs)
resp = self._requester.request(
method=RequestMethod.POST,
path='/convert',
json=req_json,
user=CredentialUser.ADMIN,
catalyst_service=CatalystService.BROWSER360
)
return resp.response
def take_screenshot(
self,
source: str,
screenshot_options: ScreenShotOptions = None,
page_options: ScreenShotPageOptions = None,
navigation_options: NavigationOptions = None,
**kwargs
):
'''
Take screenshot of the given source
'''
req_json = {"output_options": {"output_type": "screenshot"}}
validator.is_non_empty_string(source, 'source', BrowserLogicError)
if validator.is_valid_url(source):
req_json['url']=source
else:
req_json['html']=source
req_json.update({
"screenshot_options": screenshot_options,
"page_options": page_options,
"navigation_options": navigation_options
})
req_json.update(kwargs)
resp = self._requester.request(
method=RequestMethod.POST,
path='/convert',
json=req_json,
user=CredentialUser.ADMIN,
catalyst_service=CatalystService.BROWSER360
)
return resp.response
def generate_from_template(
self,
template_id: Union[str, int],
template_data: dict = None,
output_options: OutputOptions = None,
pdf_options: PdfOptions = None,
screenshot_options: ScreenShotOptions = None,
page_options: Union[PdfPageOptions, ScreenShotPageOptions] = None,
navigation_options: NavigationOptions = None,
**kwargs
):
'''
Generate outputs using existing templates with dynamic template datas
'''
validator.is_non_empty_string_or_number(template_id, 'template_id', BrowserLogicError)
req_json = {
"template_id": template_id,
"template_data": template_data,
"output_options": output_options,
"pdf_options": pdf_options,
"screenshot_options": screenshot_options,
"page_options": page_options,
"navigation_options": navigation_options
}
req_json.update(kwargs)
resp = self._requester.request(
method=RequestMethod.POST,
path='/convert',
json=req_json,
user=CredentialUser.ADMIN,
catalyst_service=CatalystService.BROWSER360
)
return resp.response | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/smart_browz.py | smart_browz.py |
from typing import Union, List
from .types import (
Component,
ICatalystCron,
ICatalystGResponse
)
from .exceptions import CatalystCronError
from . import validator
from ._http_client import AuthorizedHttpClient
from ._constants import (
RequestMethod,
CredentialUser,
Components
)
class ICatalystCronReq(ICatalystCron):
pass
class ICatalystCronUpdateReq(ICatalystCron):
id: str # pylint: disable=invalid-name
class ICatalystCronRes(ICatalystCron, ICatalystGResponse):
id: str # pylint: disable=invalid-name
success_count: int
failure_count: int
class Cron(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.CRON
def get_all_cron(self) -> List[ICatalystCronRes]:
resp = self._requester.request(
method=RequestMethod.GET,
path='/cron',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def get_cron_details(self, cron_id: Union[int, str]) -> ICatalystCronRes:
validator.is_parsable_integer(cron_id, 'cron_id', CatalystCronError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/cron/{cron_id}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def create_cron(self, cron_details: ICatalystCronReq) -> ICatalystCronRes:
self._validate_cron(cron_details)
resp = self._requester.request(
method=RequestMethod.POST,
path='/cron',
json=cron_details,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def update_cron(self, cron_details: ICatalystCronUpdateReq) -> ICatalystCronRes:
self._validate_cron(cron_details, {'id'})
cron_id = cron_details.get('id')
resp = self._requester.request(
method=RequestMethod.PUT,
path=f'/cron/{cron_id}',
json=cron_details,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def delete_cron(self, cron_id: Union[int, str]) -> bool:
validator.is_parsable_integer(cron_id, 'cron_id', CatalystCronError)
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/cron/{cron_id}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
@staticmethod
def _validate_cron(cron_details, mandatories=None):
if not cron_details or not isinstance(cron_details, dict):
raise CatalystCronError(
'INVALID_CRON_DETAILS',
'cron details must be passed as a non empty dict'
)
if mandatories:
for mand in mandatories:
if mand not in cron_details:
raise CatalystCronError(
'INVALID_CRON_DETAILS',
f'cron details must contain the mandatory keys {str(mandatories)}'
) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/cron.py | cron.py |
import json
from typing import List, Literal, Optional, Union
from .types import (
Component,
ICatalystCustomTokenDetails,
ICatalystCustomTokenResponse,
ICatalystSignupConfig,
ICatalystUserDetails,
ICatalystUser,
ICatalystSignupValidationReq
)
from .exceptions import CatalystAuthenticationError
from . import validator
from ._http_client import AuthorizedHttpClient
from ._constants import RequestMethod, CredentialUser, Components
UserStatus = Literal['enable', 'disable']
class ICatalystNewUser(ICatalystSignupConfig):
user_details: ICatalystUser
class Authentication(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.AUTHENTICATION
def get_current_user(self) -> ICatalystUser:
resp = self._requester.request(
method=RequestMethod.GET,
path='/project-user/current',
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def get_all_users(self, org_id: str = None) -> List[ICatalystUser]:
resp = self._requester.request(
method=RequestMethod.GET,
path='/project-user',
user=CredentialUser.ADMIN,
params={
'org_id': org_id
} if org_id else None
)
resp_json = resp.response_json
return resp_json.get('data')
def get_user_details(self, user_id: Union[int, str]) -> ICatalystUser:
validator.is_non_empty_string_or_number(user_id, 'user_id', CatalystAuthenticationError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/project-user/{user_id}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def delete_user(self, user_id: Union[int, str]) -> bool:
validator.is_non_empty_string_or_number(user_id, 'user_id', CatalystAuthenticationError)
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/project-user/{user_id}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
def register_user(
self,
signup_config: ICatalystSignupConfig,
user_details: ICatalystUserDetails
) -> ICatalystNewUser:
validator.is_keys_present(
signup_config, ['platform_type', 'zaid'], 'signup_config', CatalystAuthenticationError
)
validator.is_keys_present(
user_details, ['last_name', 'email_id'], 'user_details', CatalystAuthenticationError
)
signup_config['user_details'] = user_details
resp = self._requester.request(
method=RequestMethod.POST,
path='/project-user/signup',
json=signup_config,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def add_user_to_org(
self,
signup_config: ICatalystSignupConfig,
user_details: ICatalystUserDetails
) -> ICatalystNewUser:
validator.is_keys_present(
signup_config, ['platform_type'], 'signup_config', CatalystAuthenticationError
)
validator.is_keys_present(
user_details,
['last_name', 'email_id', 'zaaid'],
'user_details',
CatalystAuthenticationError
)
signup_config['user_details'] = user_details
resp = self._requester.request(
method=RequestMethod.POST,
path='/project-user',
json=signup_config,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def get_all_orgs(self):
resp = self._requester.request(
method=RequestMethod.GET,
path='/project-user/orgs',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def update_user_status(
self,
user_id: Union[str, int],
status: UserStatus
):
validator.is_non_empty_string_or_number(user_id, 'user_id', CatalystAuthenticationError)
validator.is_non_empty_string(status, 'status', CatalystAuthenticationError)
if status not in ['enable', 'disable']:
raise CatalystAuthenticationError(
'INVALID_USER_STATUS',
"Status must be either 'enable' or 'disable'."
)
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/project-user/{user_id}/{status}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def update_user_details(
self,
user_id: str,
user_details: ICatalystUserDetails
):
validator.is_non_empty_string(user_id, 'user_id', CatalystAuthenticationError)
validator.is_keys_present(
user_details, ['email_id'], 'user_details', CatalystAuthenticationError
)
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/project-user/{user_id}',
json=user_details,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def reset_password(
self,
signup_config: ICatalystSignupConfig,
user_details: ICatalystUserDetails
) -> str:
validator.is_keys_present(
signup_config,
['platform_type', 'zaid'],
'signup_config',
CatalystAuthenticationError
)
validator.is_keys_present(
user_details, ['email_id'], 'user_details', CatalystAuthenticationError
)
signup_config['user_details'] = user_details
resp = self._requester.request(
method=RequestMethod.POST,
path='/project-user/forgotpassword',
json=signup_config,
user=CredentialUser.USER,
headers={
'project_id': signup_config['zaid']
}
)
resp_json = resp.response_json
return resp_json.get('data')
@staticmethod
def get_signup_validation_request(bio_req) -> Optional[ICatalystSignupValidationReq]:
if bio_req.__class__.__name__ != 'BasicIO':
raise CatalystAuthenticationError(
'Invalid-Argument',
'Please pass the valid basicio param'
)
if bio_req.get_argument('request_type') != 'add_user':
return None
request_details = bio_req.get_argument('request_details')
if isinstance(request_details, dict):
return request_details
try:
return json.loads(request_details)
except TypeError as err:
raise CatalystAuthenticationError(
'Invalid request details',
"Unable to parse 'request_details' from basicio args",
request_details
) from err
def generate_custom_token(
self,
custom_token_details: ICatalystCustomTokenDetails
) -> ICatalystCustomTokenResponse:
validator.is_non_empty_dict(
custom_token_details, 'custom_token_details', CatalystAuthenticationError
)
resp = self._requester.request(
method=RequestMethod.POST,
path='/authentication/custom-token',
json=custom_token_details,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data') | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/authentication.py | authentication.py |
import os
from os import path
def env_override(env_name: str, default_value: str):
env_value = os.getenv(env_name)
if not env_value:
return default_value
return env_value
meta_file = path.join(path.dirname(path.abspath(__file__)), '__version__.py')
meta = {}
with open(meta_file, encoding="utf-8") as fp:
exec(fp.read(), meta) # pylint: disable=exec-used
# SDK version
SDK_VERSION = meta['__version__']
# Json variables
JSON_RESPONSE_KEY = "data"
JSON_RESPONSE_STATUS = "status"
JSON_RESPONSE_MESSAGE = "message"
JSON_RESPONSE_CODE = "error_code"
SUCCESS_STATUS = "success"
FAILURE_STATUS = "failure"
# Environment Variable
PROJECT_KEY = "project_key"
PROJECT_ID = "project_id"
PROJECT_DOMAIN = "project_domain"
ENVIRONMENT = "environment"
PROJECT_SECRET_KEY = "project_secret_key"
ADMIN_CRED = "admin_cred"
CLIENT_CRED = "client_cred"
COOKIE_CRED = "cookie_cred"
ACCESS_TOKEN = "access_token"
CLIENT_ACCESS_TOKEN = "client_token"
CLIENT_COOKIE = "client_cookie"
CLIENT_ID = "client_id"
EXPIRES_IN = "expires_in"
CLIENT_SECRET = "client_secret"
AUTH_URL = "auth_url"
REFRESH_URL = "refresh_url"
REDIRECT_URL = "redirect_url"
GRANT_TYPE = "grant_type"
CODE = "code"
TICKET = "ticket"
ADMIN_CRED_TYPE = "admin_cred_type"
CLIENT_CRED_TYPE = "client_cred_type"
REFRESH_TOKEN = "refresh_token"
USER_TYPE = "user_type"
CONNECTOR_NAME = "connector_name"
ENVIRONMENT_KEY_NAME = "X-Catalyst-Environment"
USER_KEY_NAME = "X-CATALYST-USER"
# URL constants
PROJECT_URL = "project"
PROJECT_KEY_NAME = "PROJECT_ID"
FILE_SEPERATOR = "/"
IS_LOCAL = env_override("X_ZOHO_CATALYST_IS_LOCAL", "False")
CSRF_TOKEN_COOKIE = "ZD_CSRF_TOKEN"
APP_DOMAIN = env_override("X_ZOHO_CATALYST_CONSOLE_URL", "https://console.catalyst.localzoho.com")
APP_VERSION_V1 = "/v1"
ACCOUNTS_URL = env_override("X_ZOHO_CATALYST_ACCOUNTS_URL", "https://accounts.localzoho.com")
# Header Constants
CONTENT_TYPE = "Content-Type"
CLIENT_HEADER = "PROJECT_ID"
COOKIE_HEADER = "Cookie"
CSRF_HEADER = "X-ZCSRF-TOKEN"
USER_AGENT = "USER-AGENT"
# Auth Constants
AUTHORIZATION = "Authorization"
COOKIE = "cookie"
USER_SCOPE_HEADER = "X-CATALYST-USER"
ADMIN_SCOPE = "admin"
USER_SCOPE = "user"
OAUTH_PREFIX = "Zoho-oauthtoken "
TICKET_PREFIX = "Zoho-ticket "
CSRF_PARAM_PREFIX = "zd_csrparam="
class AcceptHeader:
KEY = 'Accept'
VALUE = 'application/vnd.catalyst.v2+json'
class CredentialUser:
ADMIN = 'admin'
USER = 'user'
class RequestMethod:
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
class Components:
CACHE = "Cache"
FILE_STORE = "FileStore"
MAIL = "Mail"
SEARCH = "Search"
ZCQL = "ZCQL"
ZIA = "Zia"
CRON = "Cron"
DATA_STORE = "DataStore"
FUNCTION = "Function"
AUTHENTICATION = "Authentication"
CIRCUIT = "Circuit"
PUSH_NOTIFICATION = "PushNotification"
SMART_BROWZ = "SmartBrowz"
class CredentialType:
token = 'token'
ticket = 'ticket'
class ProjectHeader:
project_id = 'X-ZC-ProjectId'
domain = 'X-ZC-Project-Domain'
key = 'X-ZC-Project-Key'
environment = 'X-ZC-Environment'
project_secret_key = 'X-ZC-PROJECT-SECRET-KEY'
class CredentialHeader:
admin_cred_type = 'X-ZC-Admin-Cred-Type'
user_cred_type = 'X-ZC-User-Cred-Type'
admin_token = 'X-ZC-Admin-Cred-Token'
user_token = 'X-ZC-User-Cred-Token'
cookie = 'x-zc-cookie'
zcsrf = 'X-ZCSRF-TOKEN'
user = 'X-ZC-User-Type'
class CatalystService:
SERVERLESS = 'baas'
BROWSER360 = 'browser360' | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/_constants.py | _constants.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SDK for Catalyst by Zoho """
import threading
from typing import Dict, Literal
from .types import ICatalystOptions
from . import credentials
from .catalyst_app import CatalystApp
from ._thread_util import ZCThreadUtil, get_attr
from .exceptions import CatalystAppError
from . import _constants as APIConstants
from ._constants import ProjectHeader, CredentialHeader
from ._util import parse_headers_from_request
_app_collection = {}
_app_lock = threading.RLock()
_DEFAULT_APP_NAME = '[DEFAULT]'
CatalystScopes = Literal['admin', 'user']
def initialize(
name=_DEFAULT_APP_NAME,
scope: CatalystScopes = None,
req = None
):
"""
Initializes a new CatalystApp from request
Args:
name: Name of the catalyst app (optional).
If app name is None, default name will be used.
scope: The scope in which the app gets initialized (optional).
If no scope provided, catalyst will switch scopes automatically.
req: Request object to initialize the SDK
Returns:
CatalystApp: A newly initialized catalyst app instance.
Raises:
CatalystAppError: If the given scope or other app properties are invalid.
CatalystCredentialError: If the credentials are missing.
"""
if req:
parse_headers_from_request(req)
thread_obj = ZCThreadUtil()
catalyst_headers: Dict = thread_obj.get_value("catalyst_headers")
if not catalyst_headers:
raise CatalystAppError(
'FATAL ERROR',
'Catalyst headers are empty'
)
# creating options from catalyst config
options = {
APIConstants.PROJECT_KEY: get_attr(catalyst_headers, ProjectHeader.key),
APIConstants.PROJECT_ID: get_attr(catalyst_headers, ProjectHeader.project_id),
APIConstants.PROJECT_DOMAIN: get_attr(catalyst_headers, ProjectHeader.domain),
APIConstants.ENVIRONMENT: get_attr(catalyst_headers, ProjectHeader.environment),
APIConstants.PROJECT_SECRET_KEY: get_attr(
catalyst_headers,
ProjectHeader.project_secret_key
)
}
admin_token = get_attr(catalyst_headers, CredentialHeader.admin_token)
if admin_token:
thread_obj.put_value(APIConstants.ADMIN_CRED, admin_token)
thread_obj.put_value(
APIConstants.ADMIN_CRED_TYPE,
get_attr(catalyst_headers, CredentialHeader.admin_cred_type)
)
user_token = get_attr(catalyst_headers, CredentialHeader.user_token)
if user_token:
thread_obj.put_value(APIConstants.CLIENT_CRED, user_token)
thread_obj.put_value(
APIConstants.CLIENT_CRED_TYPE,
get_attr(catalyst_headers, CredentialHeader.user_cred_type)
)
cookie_str = get_attr(catalyst_headers, CredentialHeader.cookie)
if cookie_str:
thread_obj.put_value(APIConstants.COOKIE_CRED, cookie_str)
user_type = get_attr(catalyst_headers, CredentialHeader.user)
if user_type:
thread_obj.put_value(APIConstants.USER_TYPE, user_type)
credential = credentials.CatalystCredential(scope)
app = CatalystApp(credential, options, name)
with _app_lock:
_app_collection[f'{app.name}_{threading.get_ident()}'] = app
return app
def initialize_app(
credential: credentials.Credential = None,
options: ICatalystOptions = None,
name=_DEFAULT_APP_NAME
):
"""
Initializes a new CatalystApp
Args:
credential: A credential object of valid Credential type which is initialized from
catalyst credential module (optional). If credential is None,
first will check for valid credentials in credential path file and next in env.
options: A dictionary of key-value pairs (optional). If passed, it must contains the
mandatory keys - 'project_id', 'project_key' and 'project_domain'.
If no options provided will check it in env.
name: Name of the catalyst app (optional).
If app name is None, default name will be used.
Returns:
CatalystApp: A newly initialized catalyst app instance.
Raises:
CatalystAppError: If duplicate app name provided or app options are invalid.
CatalystCredentialError: If the given credentials are invalid.
"""
if not isinstance(name, str) or not name:
raise CatalystAppError(
'INVALID_APP_NAME',
'App name must be a non-empty string',
name
)
with _app_lock:
if f'{name}_{threading.get_ident()}' in _app_collection:
raise CatalystAppError(
'DUPLICATE_APP',
f'There is already an app named "{name}".'
)
if credential is None:
credential = credentials.ApplicationDefaultCredential().credential
app = CatalystApp(credential, options, name)
with _app_lock:
_app_collection[f'{app.name}_{threading.get_ident()}'] = app
return app
def get_app(name=_DEFAULT_APP_NAME) -> CatalystApp:
if not isinstance(name, str):
raise CatalystAppError(
'INVALID_APP_NAME',
'app name must be a string.'
)
if f'{name}_{threading.get_ident()}' not in _app_collection:
err_msg = (
'Default app does not exist. Make sure to initialize the default app.'
if name == _DEFAULT_APP_NAME
else f'There is no app named "{name}". Make sure to initialize the app.'
)
raise CatalystAppError(
'INVALID_APP_NAME',
err_msg
)
with _app_lock:
return _app_collection[f'{name}_{threading.get_ident()}'] | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/__init__.py | __init__.py |
class CatalystError(Exception):
def __init__(self, code, message, value=None):
self._code = code
self._message = message
self._value = value
Exception.__init__(self, self.to_string())
@property
def code(self):
return self._code
@property
def message(self):
return self._message
@property
def value(self):
return self._value
@property
def status_code(self):
return None
def to_json(self):
json_dict = {
'code': self._code,
'message': self._message
}
if self._value:
json_dict['value'] = self._value
return json_dict
def to_string(self):
return str(self.to_json())
class CatalystCredentialError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystAppError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystAPIError(CatalystError):
def __init__(self, code, message, value=None, http_status_code=None):
self.http_status_code = http_status_code
CatalystError.__init__(self, code, message, value)
@property
def status_code(self):
return self.http_status_code
class CatalystCacheError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystDatastoreError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystFunctionError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystMailError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystFilestoreError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystAuthenticationError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystZCQLError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystCronError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystCircuitError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystConnectorError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystPushNotificationError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystSearchError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class CatalystZiaError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value)
class BrowserLogicError(CatalystError):
def __init__(self, code, message, value=None):
CatalystError.__init__(self, code, message, value) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/exceptions.py | exceptions.py |
from typing import Union, List
from ..types import Component, ICatalystFolder
from ..exceptions import CatalystFilestoreError
from .._http_client import AuthorizedHttpClient
from .._constants import RequestMethod, CredentialUser, Components
from ._folder import Folder
from .. import validator
class Filestore(Component):
def __init__(self, app) -> None:
self._app = app
self._requester = AuthorizedHttpClient(self._app)
def get_component_name(self):
return Components.FILE_STORE
def create_folder(self, name: str):
validator.is_non_empty_string(name, 'folder_name', CatalystFilestoreError)
req_json = {
'folder_name': name
}
resp = self._requester.request(
method=RequestMethod.POST,
path='/folder',
json=req_json,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
data = resp_json.get('data')
return Folder(self, data)
def get_all_folders(self):
resp = self._requester.request(
method=RequestMethod.GET,
path='/folder',
user=CredentialUser.USER
)
data: List = resp.response_json.get('data')
folders: List[Folder] = []
for folder in data:
folders.append(Folder(self, folder))
return folders
def get_folder_details(self, folder_id: Union[int, str]) -> ICatalystFolder:
validator.is_non_empty_string_or_number(folder_id, 'folder_id', CatalystFilestoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/folder/{folder_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
data = resp_json.get('data')
return data
def folder(self, folder_id: Union[int, str]):
validator.is_non_empty_string_or_number(folder_id, 'folder_id', CatalystFilestoreError)
return Folder(self, {'id': folder_id}) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/filestore/__init__.py | __init__.py |
from io import BufferedReader
from typing import Dict, Optional, Union
from ..exceptions import CatalystFilestoreError
from .._http_client import AuthorizedHttpClient
from .. import validator
from .._constants import (
RequestMethod,
CredentialUser,
Components
)
from ..types import (
ParsableComponent,
ICatalystFile,
ICatalystFolder,
ICatalystGResponse,
ICatalystProject,
ICatalystSysUser
)
class ICatalystFolderDetails(ICatalystFolder):
created_time: Optional[str]
created_by: Optional[ICatalystSysUser]
project_details: Optional[ICatalystProject]
class ICatalystFileDetails(ICatalystFile, ICatalystGResponse):
pass
class Folder(ParsableComponent):
def __init__(self, filestore_instance, folder_details: Dict):
validator.is_non_empty_dict(folder_details, 'folder_details', CatalystFilestoreError)
self._requester: AuthorizedHttpClient = filestore_instance._requester
self._folder_details = folder_details
self._id = folder_details.get('id')
def __repr__(self) -> str:
return str(self._folder_details)
def get_component_name(self):
return Components.FILE_STORE
def update(self, name: str) -> ICatalystFolderDetails:
validator.is_non_empty_string(name, 'folder_name', CatalystFilestoreError)
resp = self._requester.request(
method=RequestMethod.PUT,
path=f'/folder/{self._id}',
json={
'folder_name': name
},
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def delete(self):
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/folder/{self._id}',
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
def get_file_details(self, file_id: Union[int, str]) -> ICatalystFileDetails:
validator.is_non_empty_string_or_number(file_id, 'file_id', CatalystFilestoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/folder/{self._id}/file/{file_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def delete_file(self, file_id: Union[int, str]) -> bool:
validator.is_non_empty_string_or_number(file_id, 'file_id', CatalystFilestoreError)
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/folder/{self._id}/file/{file_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
def upload_file(
self,
name: str,
file: BufferedReader
) -> ICatalystFileDetails:
Folder._validate_file_details(name, file)
# data = [
# ('code',('',file_details['code'],'application/octet-stream')),
# ('file_name',(None,file_details['name']))
# ]
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/folder/{self._id}/file',
files={
'code': ('', file, 'application/octet-stream')
},
data={
'file_name': name
},
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def download_file(self, file_id: Union[int, str]):
resp = self.get_file_stream(file_id)
return resp.content
def get_file_stream(self, file_id: Union[int, str]):
validator.is_non_empty_string_or_number(file_id, 'file_id', CatalystFilestoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/folder/{self._id}/file/{file_id}/download',
user=CredentialUser.USER,
stream=True
)
return resp.response
@staticmethod
def _validate_file_details(name, file):
if not isinstance(file, BufferedReader):
raise CatalystFilestoreError(
'INVALID_FILE_DETAILS',
'Code must be an instance of BufferReader and cannot be empty'
)
validator.is_non_empty_string(name, 'file_name', CatalystFilestoreError)
def to_string(self):
return repr(self)
def to_dict(self) -> ICatalystFolderDetails:
return self._folder_details | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/filestore/_folder.py | _folder.py |
from typing import List, Optional, TypedDict, Dict, Union
class ObjectParams(TypedDict):
co_ordinates: List[int]
object_type: str
confidence: str
class ICatalystZiaObject(TypedDict):
object: List[ObjectParams]
class ICatalystZiaOCR(TypedDict):
confidence: Optional[str]
text: str
class ICatalystZiaBarcode(TypedDict):
content: str
class ICatalystZiaModeration(TypedDict):
probability: Dict[str, str]
confidence: int
prediction: str
class ICatalystZiaCom(TypedDict):
prediction: str
confidence: Dict[str, str]
class FaceParams(TypedDict):
confidence: int
id: str
co_ordinates: List[int]
emotion: ICatalystZiaCom
age: ICatalystZiaCom
gender: ICatalystZiaCom
landmarks: Optional[Dict[str, List[int]]]
class ICatalystZiaFace(TypedDict):
faces: List[FaceParams]
class ICatalystZiaFaceComparison(TypedDict):
confidence: Optional[int]
matched: bool
class ICatalystZiaAutoML(TypedDict):
regression_result: Optional[int]
classification_result: Optional[Dict[str, int]]
# Text analysis response
class ConfidenceScores(TypedDict):
negative: int
neutral: int
positive: int
class SentenceAnalyticsResponse(TypedDict):
sentence: str
sentiment: str
confidence_scores: ConfidenceScores
class SentimentAnalysisResponseParams(TypedDict):
sentiment: str
sentence_analytics: List[SentenceAnalyticsResponse]
overall_score: int
keyword: Optional[str]
class ICatalystZiaSentimentAnalysisResponse(TypedDict):
feature: str
response: SentimentAnalysisResponseParams
status: str
class ICatalystZiaSentimentAnalysis(TypedDict):
response: List[ICatalystZiaSentimentAnalysisResponse]
id: str
status: str
class KeywordExtractionResponseParams(TypedDict):
keywords: List[str]
keyphrases: List[str]
class ICatalystZiaKeywordExtractionResponse(TypedDict):
feature: str
response: KeywordExtractionResponseParams
status: str
class ICatalystZiaKeywordExtraction(TypedDict):
response: List[ICatalystZiaKeywordExtractionResponse]
id: str
status: str
class GeneralEntitiesParams(TypedDict):
NERTag: str
start_index: int
confidence_score: int
end_index: int
Token: str
processed_value: Optional[int]
class NERPredictionResponseParams(TypedDict):
general_entities: List[GeneralEntitiesParams]
class ICatalystZiaNERPredictonResponse(TypedDict):
feature: str
response: NERPredictionResponseParams
status: str
statusCode: int
class ICatalystZiaNERPrediction(TypedDict):
response: List[ICatalystZiaNERPredictonResponse]
id: str
statusCode: int
status: str
class ICatalystZiaTextAnalytics(TypedDict):
response: List[Union[ICatalystZiaSentimentAnalysisResponse,
ICatalystZiaKeywordExtractionResponse,
ICatalystZiaNERPredictonResponse]]
id: str
status: str | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/types/zia.py | zia.py |
from abc import ABC, abstractmethod
from io import BufferedReader
from typing import Any, List, Literal, Optional, TypedDict, Dict, Union
class Component(ABC):
@abstractmethod
def get_component_name(self) -> str:
pass
class ParsableComponent(Component):
@abstractmethod
def to_string(self):
pass
@abstractmethod
def to_dict(self):
pass
class ICatalystOptions(TypedDict):
project_id: Union[int, str]
project_key: Union[int, str]
project_domain: str
environment: Optional[str]
class ICatalystConfig(ICatalystOptions):
project_secret_key: Optional[str]
class ICatalystSysUser(TypedDict):
userId: str
email_id: str
first_name: str
last_name: str
zuid: Optional[str]
is_confirmed: Optional[bool]
class ICatalystProject(TypedDict):
id: str
project_name: str
class ICatalystGResponse(TypedDict):
created_time: Optional[str]
created_by: Optional[ICatalystSysUser]
modified_time: Optional[str]
modified_by: Optional[ICatalystSysUser]
project_details: Optional[ICatalystProject]
class ICatalystSegment(TypedDict):
id: str
segment_name: str
class ICatalystCache(TypedDict):
cache_name: str
cache_value: str
expires_in: str
expiry_in_hours: str
ttl_in_milliseconds: str
segment_details: ICatalystSegment
class ICatalystFolder(TypedDict):
id: str
folder_name: Optional[str]
class ICatalystFile(TypedDict):
id: str
file_location: Optional[str]
file_name: str
file_size: int
folder_details: ICatalystFolder
class ICatalystColumn(TypedDict):
table_id: str
column_sequence: str
column_id: str
column_name: str
category: int
data_type: int
max_length: str
is_mandatory: bool
default_value: Optional[Any]
decimal_digits: Optional[str]
is_unique: bool
search_index_enabled: bool
class ICatalystRow(TypedDict):
CREATORID: str
CREATEDTIME: str
MODIFIEDTIME: str
ROWID: str
class ICatalystRows(TypedDict):
status: str
data: List[ICatalystRow]
more_records: Optional[bool]
next_token: Optional[str]
class ICatalystTable(TypedDict):
table_id: Optional[str]
table_name: Optional[str]
table_scope: Optional[str]
project_id: Optional[ICatalystProject]
modified_time: Optional[str]
modified_by: Optional[ICatalystSysUser]
class ICatalystMail(TypedDict, total=False):
from_email: str
to_email: List[str]
subject: str
content: Optional[str]
cc: Optional[List[str]] # pylint: disable=invalid-name
bcc: Optional[List[str]]
reply_to: Optional[List[str]]
html_mode: Optional[bool]
display_name: Optional[str]
attachments: Optional[List[BufferedReader]]
class ICatalystUserRoleDetails(TypedDict):
role_id: str
role_name: str
class ICatalystSignupConfig(TypedDict, total=False):
zaid: str
platform_type: str
redirect_url: Optional[str]
class ICatalystUser(TypedDict):
zuid: str
zaaid: str
status: str
user_id: str
is_confirmed: bool
email_id: str
first_name: str
last_name: str
created_time: str
modified_time: str
invited_time: str
role_details: ICatalystUserRoleDetails
class ICatalystUserDetails(TypedDict, total=False):
first_name: Optional[str]
last_name: str
email_id: str
zaaid: str
class ICatalystUserParticulars(TypedDict):
email_id: str
first_name: str
last_name: str
org_id: Optional[str]
class ICatalystCronUrl(TypedDict):
url: str
headers: Optional[Dict[str, str]]
params: Optional[Dict[str, str]]
request_method: str
request_body: Optional[str]
class ICatalystCronJob(TypedDict):
time_of_execution: Union[str, int, None]
repetition_type: Optional[str]
hour: Optional[int]
minute: Optional[int]
second: Optional[int]
days: Optional[List[int]]
weeks_of_month: Optional[List[int]]
week_day: Optional[List[int]]
months: Optional[List[int]]
timezone: Optional[str]
class ICatalystCron(TypedDict, total=False):
cron_name: str
description: Optional[str]
cron_type: str
status: bool
cron_url_details: ICatalystCronUrl
job_detail: ICatalystCronJob
class ICatalystPushDetails(TypedDict, total=False):
message: str
additional_info: Optional[Dict[str, Any]]
badge_count: Optional[int]
reference_id: Optional[str]
expiry_time: Optional[int]
class ICatalystMobileNotification(TypedDict):
recipients: List[str]
push_details: ICatalystPushDetails
class ICatalystSearchQuery(TypedDict, total=False):
search: str
search_table_columns: Dict[str, List[str]]
select_table_columns: Optional[Dict[str, List[str]]]
order_by: Optional[Dict[str, Any]]
start: Optional[int]
end: Optional[int]
class ICatalystSignupUserDetails(ICatalystUserParticulars):
role_details: Optional[ICatalystUserRoleDetails]
class ICatalystSignupValidationReq(TypedDict):
user_details: ICatalystSignupUserDetails
auth_type: Literal['web', 'mobile']
CustomTokenUserDetails = TypedDict('CustomTokenUserDetails', {
'role_name': Optional[str],
'phone_number': Optional[str],
'country_code': Optional[str]
}, total=False)
class ICatalystCustomTokenDetails(TypedDict):
type: Literal['web', 'mobile']
user_details: CustomTokenUserDetails
class ICatalystCustomTokenResponse(TypedDict):
jwt_token: str
client_id: str
scopes: List[str]
Group = TypedDict('Group', {
'column_name': str,
'comparator': str,
'value': str
})
BulkReadCriteria = TypedDict('BulkReadCriteria', {
'group_operator': str,
'group': List[Group]
})
class ICatalystBulkReadQuery(TypedDict, total=False):
page: Optional[int]
select_columns: Optional[List[str]]
criteria: Optional[BulkReadCriteria]
FkMapping = TypedDict('FkMapping', {
'local_column': str,
'reference_column': str
})
class ICatalystBulkWriteInput(TypedDict, total=False):
operation: Optional[Literal['insert', 'update', 'upsert']]
find_by: Optional[str]
fk_mapping: Optional[List[FkMapping]]
class ICatalystBulkCallback(TypedDict, total=False):
url: str
headers: Optional[Dict[str, str]]
params: Optional[Dict[str, str]]
QueryResultDetails = TypedDict('QueryResultDetails', {
'page': Optional[int],
'file_id': Optional[str]
})
BulkJobQueryResult = TypedDict('BulkJobQueryResult', {
'table_id': str,
'details': QueryResultDetails
})
BulkJobResultDetails = TypedDict('BulkJobResultDetails', {
'table_id': str,
'records_processed': int,
'more_records': Optional[bool]
})
BulkJobResults = TypedDict('BulkJobResults', {
'download_url': Optional[str],
'description': str,
'details': Optional[BulkJobResultDetails]
})
class ICatalystBulkJob(TypedDict):
job_id: str
status: Literal['In-Progress', 'Completed', 'Failed']
operation: str
project_details: ICatalystProject
created_by: ICatalystSysUser
created_time: str
query: Optional[List[BulkJobQueryResult]]
callback: Optional[ICatalystBulkCallback]
results: Optional[BulkJobResults] | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/types/__init__.py | __init__.py |
from typing import Dict, Optional
from ..exceptions import CatalystCacheError
from ..types import (
ICatalystCache,
ICatalystProject,
ICatalystSegment,
ParsableComponent
)
from .._constants import (
RequestMethod,
CredentialUser,
Components
)
from .._http_client import AuthorizedHttpClient
from .. import validator
class ICatalystCacheResp(ICatalystCache):
project_details: Optional[ICatalystProject]
class Segment(ParsableComponent):
def __init__(self, cache_instance, segment_details: Dict):
if segment_details:
validator.is_non_empty_dict(segment_details, 'segment_details', CatalystCacheError)
self._requester: AuthorizedHttpClient = cache_instance._requester
self._id = segment_details.get('id') if segment_details else None
self._segment_details = segment_details
def __repr__(self) -> str:
return str(self.to_dict())
def get_component_name(self):
return Components.CACHE
def put(
self,
key: str,
value: str,
expiry: int = None
) -> ICatalystCacheResp:
validator.is_non_empty_string(key, 'cache_key', CatalystCacheError)
api_path = f'/segment/{self._id}/cache' if self._id else '/cache'
req_json = {
'cache_name': key,
'cache_value': value,
'expiry_in_hours': expiry
}
resp = self._requester.request(
method=RequestMethod.POST,
path=api_path,
json=req_json,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def update(
self,
key: str,
value: str,
expiry: int = None
) -> ICatalystCacheResp:
validator.is_non_empty_string(key, 'cache_key', CatalystCacheError)
api_path = f'/segment/{self._id}/cache' if self._id else '/cache'
req_json = {
'cache_name': key,
'cache_value': value,
'expiry_in_hours': expiry
}
resp = self._requester.request(
method=RequestMethod.PUT,
path=api_path,
json=req_json,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def get(
self,
key: str
) -> ICatalystCacheResp:
validator.is_non_empty_string(key, 'cache_key', CatalystCacheError)
api_path = f'/segment/{self._id}/cache' if self._id else '/cache'
query_params = {
'cacheKey': key
}
resp = self._requester.request(
method=RequestMethod.GET,
path=api_path,
params=query_params,
user=CredentialUser.ADMIN
)
resp_json = resp.response_json
return resp_json.get('data')
def get_value(self, key: str) -> str:
cache_obj = self.get(key)
return cache_obj.get('cache_value')
def delete(self, key: str) -> bool:
validator.is_non_empty_string(key, 'cache_key', CatalystCacheError)
api_path = f'/segment/{self._id}/cache' if self._id else '/cache'
query_params = {
'cacheKey': key
}
resp = self._requester.request(
method=RequestMethod.DELETE,
path=api_path,
params=query_params,
user=CredentialUser.ADMIN
)
return bool(resp)
def to_dict(self) -> ICatalystSegment:
return self._segment_details
def to_string(self):
return repr(self) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/cache/_segment.py | _segment.py |
from typing import Dict
from .. import validator
from .._http_client import HttpClient
from ..exceptions import CatalystConnectorError
from .._constants import (
CLIENT_ID,
CLIENT_SECRET,
AUTH_URL,
REFRESH_URL,
CONNECTOR_NAME,
REFRESH_TOKEN,
EXPIRES_IN,
REDIRECT_URL,
GRANT_TYPE,
CODE,
RequestMethod,
ACCESS_TOKEN
)
class Connector:
def __init__(self, connection_instance, connector_details: Dict[str, str]) -> None:
self._app = connection_instance._app
self._requester: HttpClient = connection_instance._requester
self.connector_name = connector_details.get(CONNECTOR_NAME)
self.auth_url = connector_details.get(AUTH_URL)
self.refresh_url = connector_details.get(REFRESH_URL)
self.refresh_token = connector_details.get(REFRESH_TOKEN)
self.client_id = connector_details.get(CLIENT_ID)
self.client_secret = connector_details.get(CLIENT_SECRET)
self.expires_in = (int(connector_details.get(EXPIRES_IN))
if connector_details.get(EXPIRES_IN)
else None)
self.redirect_url = connector_details.get(REDIRECT_URL)
self.access_token = None
@property
def _connector_name(self):
return 'ZC_CONN_' + self.connector_name
def generate_access_token(self, code: str) -> str:
validator.is_non_empty_string(code, 'grant_token', CatalystConnectorError)
# if not self.redirect_url or not isinstance(self.redirect_url, str):
# raise CatalystConnectorError(
# 'Invalid Argument',
# 'Value provided for redirect_url is expected to be a non empty string',
# code
# )
resp = self._requester.request(
method=RequestMethod().POST,
url=self.auth_url,
data={
GRANT_TYPE: 'authorization_code',
CODE: code,
CLIENT_ID: self.client_id,
CLIENT_SECRET: self.client_secret,
# REDIRECT_URL: self.redirect_url
}
)
token_obj = resp.response_json
try:
self.access_token = token_obj[ACCESS_TOKEN]
self.refresh_token = token_obj[REFRESH_TOKEN]
self.expires_in = token_obj[EXPIRES_IN]
except KeyError as err:
raise CatalystConnectorError(
'Invalid Auth Response',
f'{str(err)} is missing in the response json',
token_obj
) from None
self._persist_token_in_cache()
return self.access_token
def get_access_token(self):
cached_token = self._app.cache().segment().get(self._connector_name)
value = cached_token['cache_value']
if value:
time = 3600000 - int(cached_token['ttl_in_milliseconds'])
if not self.expires_in:
return value
if self.expires_in and time <= (self.expires_in * 1000):
return value
validator.is_non_empty_string(self.refresh_token, 'refresh_token', CatalystConnectorError)
resp = self._requester.request(
method=RequestMethod.POST,
url=self.refresh_url,
data={
GRANT_TYPE: 'refresh_token',
CLIENT_ID: self.client_id,
CLIENT_SECRET: self.client_secret,
REFRESH_TOKEN: self.refresh_token
}
)
token_obj = resp.response_json
try:
self.access_token = token_obj[ACCESS_TOKEN]
self.expires_in = int(token_obj[EXPIRES_IN])
except KeyError as err:
raise CatalystConnectorError(
'Invalid Auth Response',
f'{str(err)} is missing in the response json',
token_obj
) from None
self._persist_token_in_cache()
return self.access_token
def _persist_token_in_cache(self):
return self._app.cache().segment().put(self._connector_name, self.access_token, 1) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/connection/_connector.py | _connector.py |
import json
from typing import Dict
from .._http_client import HttpClient
from ..exceptions import CatalystConnectorError
from ._connector import Connector
from .._constants import (
CLIENT_ID,
CLIENT_SECRET,
AUTH_URL,
REFRESH_URL,
CONNECTOR_NAME
)
CONNECTOR_PARAMS = set([CLIENT_ID, CLIENT_SECRET, AUTH_URL, REFRESH_URL])
class Connection:
def __init__(self, app, **kwargs) -> None:
self._app = app
self._requester = HttpClient(self._app)
self.connection_json: Dict = self._get_connection_json(kwargs.get('properties'))
def get_connector(self, connector_name: str) -> Connector:
connector = self.connection_json.get(connector_name)
if connector is None:
raise CatalystConnectorError(
'Invalid-Connector',
'Provided connector does not exists.'
'Kindly initialize connection with proper properties.'
)
if not isinstance(connector, dict):
raise CatalystConnectorError(
'Invalid Connector details',
'Connector details must be a dictionary of key-value pairs'
)
for key in CONNECTOR_PARAMS:
if not connector.get(key):
raise CatalystConnectorError(
'Invalid Connector details',
(f"Either the key '{key}' is missing or value "
f"provided for the {key} is None in {connector_name} dict")
)
connector_copy = connector.copy()
connector_copy[CONNECTOR_NAME] = connector_name
return Connector(self, connector_copy)
@staticmethod
def _get_connection_json(properties):
if not properties or not isinstance(properties, (str, dict)):
raise CatalystConnectorError(
'Invalid-Properties',
'Connection properties must be passed as dict or string path to json file'
)
if isinstance(properties, dict):
return properties
try:
with open(properties, encoding="utf-8") as json_file:
json_dict = json.load(json_file)
except:
raise CatalystConnectorError(
'Invalid-Properties',
f'Unable to parse the property json from the file path: {properties}'
) from None
return json_dict | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/connection/__init__.py | __init__.py |
from typing import Any, Dict, List, Literal, TypedDict, Union
from ..exceptions import CatalystDatastoreError
from ..types import (
ICatalystColumn,
ICatalystRow,
ICatalystRows,
ParsableComponent
)
from .._constants import (
RequestMethod,
CredentialUser,
Components
)
from .. import validator
from .._http_client import AuthorizedHttpClient
from ._bulk_job import BulkRead, BulkWrite
ICatalystRowInput = TypedDict('ICatalystRowInput', {'ROWID': str})
BulkOperation = Literal['read', 'write']
class Table(ParsableComponent):
def __init__(self, datastore_instance, table_details: Dict):
validator.is_non_empty_dict(table_details, 'table_details', CatalystDatastoreError)
self._requester: AuthorizedHttpClient = datastore_instance._requester
self._identifier = table_details.get('table_id') or table_details.get('table_name')
self._table_details = table_details
def __repr__(self) -> str:
return str(self._table_details)
def get_component_name(self):
return Components.DATA_STORE
def get_all_columns(self) -> List[ICatalystColumn]:
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/table/{self._identifier}/column',
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def get_column_details(self, col_id: Union[str, int]) -> ICatalystColumn:
validator.is_non_empty_string_or_number(col_id, 'column_id', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/table/{self._identifier}/column/{col_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def insert_row(self, row: Dict[str, Any]) -> ICatalystRow:
validator.is_non_empty_dict(row, 'row', CatalystDatastoreError)
resp = self.insert_rows([row])
return resp[0]
def insert_rows(self, row_list: List[Dict]) -> List[ICatalystRow]:
validator.is_non_empty_list(row_list, 'row_list', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.POST,
path=f'/table/{self._identifier}/row',
json=row_list,
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def get_paged_rows(
self,
next_token: str = None,
max_rows: int = None
) -> ICatalystRows:
req_params = {
'next_token': next_token,
'max_rows': max_rows
}
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/table/{self._identifier}/row',
params=req_params,
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json
def get_iterable_rows(self):
next_token: str = None
while True:
rows_output = self.get_paged_rows(next_token)
for row in rows_output.get('data'):
yield row
next_token = rows_output.get('next_token')
if next_token is None:
break
def get_row(self, row_id: Union[str, int]) -> ICatalystRow:
validator.is_non_empty_string_or_number(row_id, 'row_id', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/table/{self._identifier}/row/{row_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def delete_row(self, row_id: Union[str, int]) -> bool:
validator.is_non_empty_string_or_number(row_id, 'row_id', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/table/{self._identifier}/row/{row_id}',
user=CredentialUser.USER
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
def delete_rows(self, ids: List[Union[str, int]]) -> bool:
validator.is_non_empty_list(ids, 'row_ids', CatalystDatastoreError)
ids = list(map(str, ids))
req_param = {
'ids': ','.join(ids)
}
resp = self._requester.request(
method=RequestMethod.DELETE,
path=f'/table/{self._identifier}/row',
params=req_param,
user=CredentialUser.USER
)
resp_json = resp.response_json
return bool(resp_json.get('data'))
def update_row(self, row: ICatalystRowInput) -> ICatalystRow:
validator.is_non_empty_dict(row, 'row', CatalystDatastoreError)
resp = self.update_rows([row])
return resp[0] if len(resp) > 0 else resp
def update_rows(self, row_list: List[ICatalystRowInput]) -> List[ICatalystRow]:
validator.is_non_empty_list(row_list, 'row_list', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.PATCH,
path=f'/table/{self._identifier}/row',
json=row_list,
user=CredentialUser.USER
)
resp_json = resp.response_json
return resp_json.get('data')
def bulk_read(self):
return BulkRead(self)
def bulk_write(self):
return BulkWrite(self)
def to_dict(self):
return self._table_details
def to_string(self):
return repr(self) | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/datastore/_table.py | _table.py |
from abc import ABC
from typing import Union
from ..exceptions import CatalystDatastoreError
from ..types import (
Component,
ICatalystBulkJob,
ICatalystBulkCallback,
ICatalystBulkReadQuery,
ICatalystBulkWriteInput
)
from .._constants import (
RequestMethod,
CredentialUser,
Components
)
from .. import validator
from .._http_client import AuthorizedHttpClient
class BulkJob(Component, ABC):
def __init__(self, table_instance, operation: str):
self._requester: AuthorizedHttpClient = table_instance._requester
self._identifier = table_instance._identifier
self._operation = operation
def get_component_name(self):
return Components.DATA_STORE
def get_status(self, job_id: Union[str, int]) -> ICatalystBulkJob:
validator.is_non_empty_string_or_number(job_id, 'job_id', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/bulk/{self._operation}/{job_id}',
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
def get_result(self, job_id: Union[str, int]):
validator.is_non_empty_string_or_number(job_id, 'job_id', CatalystDatastoreError)
resp = self._requester.request(
method=RequestMethod.GET,
path=f'/bulk/{self._operation}/{job_id}/download',
user=CredentialUser.ADMIN,
stream=True
)
return resp.response
class BulkRead(BulkJob):
def __init__(self, table_instance):
super().__init__(table_instance, 'read')
def create_job(
self,
query: ICatalystBulkReadQuery = None,
callback: ICatalystBulkCallback = None
) -> ICatalystBulkJob:
resp = self._requester.request(
method=RequestMethod.POST,
path='/bulk/read',
json={
'table_identifier': self._identifier,
'query': query,
'callback': callback
},
user=CredentialUser.ADMIN
)
return resp.response_json.get('data')
class BulkWrite(BulkJob):
def __init__(self, table_instance):
super().__init__(table_instance, 'write')
def create_job(
self,
file_id: Union[str, int],
options: ICatalystBulkWriteInput = None,
callback: ICatalystBulkCallback = None
) -> ICatalystBulkJob:
validator.is_non_empty_string_or_number(
file_id, 'file_id', CatalystDatastoreError
)
if options:
validator.is_non_empty_dict(
options, 'options', CatalystDatastoreError
)
req_json = {
'table_identifier': self._identifier,
'file_id': file_id,
'callback': callback
}
req_json.update(options)
resp = self._requester.request(
method=RequestMethod.POST,
path='/bulk/write',
json=req_json,
user=CredentialUser.ADMIN
)
return resp.response_json.get('data') | zcatalyst-sdk | /zcatalyst_sdk-0.0.2rc3-py3-none-any.whl/zcatalyst_sdk/datastore/_bulk_job.py | _bulk_job.py |
# dbutils
使用方法
```python
from zcb_dbutils import DBConnection
import logging
import pandas as pd
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d ] %(levelname)s %(message)s',
datefmt='%d %b %Y,%a %H:%M:%S', #日 月 年 ,星期 时 分 秒
)
dbconn = DBConnection(host='localhost', port=3306, user='root', password='', database='basic')
ret = dbconn.fetch_one("select * from tb_user1 where id = 1000")
print(ret)
ret = dbconn.showtable('basic', 'tb_user')
print(pd.DataFrame.from_dict(ret))
ret = dbconn.show_table_index('basic', 'tb_user')
print(pd.DataFrame.from_dict(ret))
rows = dbconn.fetch_list('select user_id,id from tb_user')
print(rows)
id = dbconn.insert('insert into tb_role (role_name,role_type,remark_info,created,updated) value (%s,1,%s,0,0)',('xxx', 'xxxx'))
dbconn.commit()
print(id)
``` | zcb-dbutils | /zcb_dbutils-0.0.7.tar.gz/zcb_dbutils-0.0.7/README.md | README.md |
import pymysql
import logging
class DBConnection:
def __init__(self, host, port, user, password, database, charset='utf8'):
self.database = database
self.conn = pymysql.connect(host=host, port=port, user=user, passwd=password, db=database, charset=charset)
self.cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def commit(self):
self.conn.commit()
def close(self):
self.conn.close()
def exec_sql_file(self, file):
fd = open(file, 'r', encoding='utf-8')
sql = fd.read()
fd.close()
sqlCommands = sql.split(";\n")
for command in sqlCommands:
try:
self.cursor.execute(command)
self.conn.commit()
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return False
return True
def insert(self, sql, args=None):
"""
插入一条,返回自增ID
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
return self.cursor.lastrowid
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def update(self, sql, args=None):
"""
更新,返回影响行数
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
return self.cursor.rowcount
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def delete(self, sql, args=None):
"""
删除,返回影响行数
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
return self.cursor.rowcount
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def exec_sql(self, command):
"""
执行一条语句语句
:param command:
:return:
"""
try:
self.cursor.execute(command)
return True
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return False
def batch_exec(self, command, args=[]):
"""
批量执行,依赖args的数量
:param args:
:param command:
:return:
"""
try:
self.cursor.executemany(command, args)
return True
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return False
def fetch_one(self, sql, args=None):
"""
查询一行数据,如果有多个结果,也只取第一行
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
result = self.cursor.fetchone()
return result
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def fetch_list(self, sql, args=None):
"""
查询结果为二维数组
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
result = self.cursor.fetchall()
ret = []
for item in result:
values = list(item.values())
ret.append(values)
return ret
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def fetch_column_by_index(self, sql, args=None, index=0):
"""
查询结果取第index个字段的值,返回数组
:param sql:
:param args:
:param index:
:return:
"""
try:
r = self.cursor.execute(sql, args)
result = self.cursor.fetchall()
ret = []
for item in result:
values = list(item.values())
ret.append(values[index])
return ret
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def fetch_rows(self, sql, args=None):
"""
查询对象数组的结果
:param sql:
:param args:
:return:
"""
try:
self.cursor.execute(sql, args)
result = self.cursor.fetchall()
return result
except Exception as e:
logging.info(e, exc_info=True, stack_info=True)
return None
def showtable(self, database, table_name):
"""
获取表创建的信息,返回对象数组 [{'field':'字段名称','type_name':'字段类型 varchar, bigint','comment':'字段说明','type':'字段类型全称 varchar(32)'}]
:param database:
:param table_name:
:return:
"""
with self.conn.cursor() as cursor:
sqllist = '''
select aa.COLUMN_NAME,
aa.DATA_TYPE,aa.COLUMN_COMMENT, cc.TABLE_COMMENT, aa.COLUMN_TYPE
from information_schema.`COLUMNS` aa LEFT JOIN
(select DISTINCT bb.TABLE_SCHEMA,bb.TABLE_NAME,bb.TABLE_COMMENT
from information_schema.`TABLES` bb ) cc
ON (aa.TABLE_SCHEMA=cc.TABLE_SCHEMA and aa.TABLE_NAME = cc.TABLE_NAME )
where aa.TABLE_SCHEMA = '%s' and aa.TABLE_NAME = '%s';
''' % (database, table_name)
cursor.execute(sqllist)
result = cursor.fetchall()
td = [
{
'field': i[0],
'type_name': i[1],
'type': i[4],
'comment': i[2],
} for i in result
]
return td
def show_table_index(self, database, table_name):
"""
获取表索引列表 返回 [{'id':0,'name':'idx_xxx','type':'','fields':'field1,field2'}]
:param database:
:param table_name:
:return:
"""
with self.conn.cursor() as cursor:
sqllist = '''
select aa.INDEX_ID, aa.`NAME`,aa.TYPE,group_concat(cc.`NAME` order by cc.POS) from information_schema.INNODB_INDEXES aa left JOIN information_schema.`INNODB_TABLES` bb on aa.TABLE_ID = bb.TABLE_ID left join information_schema.INNODB_FIELDS cc on aa.INDEX_ID = cc.INDEX_ID where aa.TYPE != 1 and bb.`NAME` = '%s/%s' group by cc.INDEX_ID order by cc.POS;
''' % (database, table_name)
cursor.execute(sqllist)
result = cursor.fetchall()
td = [
{
'id': i[0],
'name': i[1],
'type': i[2],
'fields': i[3]
} for i in result
]
return td | zcb-dbutils | /zcb_dbutils-0.0.7.tar.gz/zcb_dbutils-0.0.7/zcb_dbutils/dbutils.py | dbutils.py |
# 定义抽取结果的名字
OUTPUT = "result.csv"
# 状态机状态
START = 0 # 开始状态
TITLE = 1 # 抽取文书title信息
TYPE = 2 # 抽取类型信息
NUMBER = 3 # 案号信息
YUANGAO = 4 # 原告信息
BEIGAO = 5 # 被告信息
PARTY = 10 # 新版,当事人,取代原告,被告,第三人
THIRD_PERSON = 6 # 第三人信息
CONTENT = 7 # 抽取文书内容信息
FOOTER = 8 # 抽取文书落款信息
END = 9 # 结束信息
CASE_DOMAIN = ["民事", "行政", "刑事", "执行"] # 案件类型
LAW_TYPE = ["裁决书", "判决书", "裁定书", "调解书"] # 文书类型
PARTIES_STOP = ["申诉人", "反诉原告", "原告人", "原告",
"上诉单位", "本诉原告", "上诉人", "公诉机关", "再审申请人", "起诉人", "申请人",
"申请再审人", "自诉人", "复议申请人", "附带民事诉讼原告人", "申请执行人", "申请追加人",
"解除保全申请人", "原告1", "原告2", "原告3", "异议人", "申请复议人"]
PARTIES_YG = ["原审上诉人", "申诉人", "原审原告", "反诉原告", "一审原告", "原告人",
"上诉单位", "本诉原告", "上诉人", "公诉机关", "再审申请人", "起诉人", "申请人",
"申请再审人", "自诉人", "复议申请人", "附带民事诉讼原告人", "申请执行人", "申请追加人",
"解除保全申请人", "再审上诉人", "原告1", "原告2", "原告3", "原告一", "原告二",
"原告三", "原告",
"异议人", "申请复议人", "申请执行机关", "申请执行", "申请复议人", "案外人"]
PARTIES_BG = ["原审被上诉人", "被申诉人", "被上诉人", "被诉人", "再审被申请人", "被申请人",
"原上诉人", "被起诉人", "本诉被告", "原公诉机关", "原审被告", "反诉被告",
"一审被告", "被告人", "附带民事诉讼被告人", "被执行人", "被申请追加人", "罪犯",
"再审被上诉人", "被告1", "被告2", "被告3", "被告一", "被告二", "被告三", "被告"]
PARTIES_DSR = ["一审第三人", "原审第三人", "反诉第三人", "第三人", "第三",
"一审原告、二审上诉人", "一审原告、二审被上诉人", "一审被告、二审上诉人", "一审被告、二审被上诉人",
"一审第三人、二审上诉人", "一审第三人、二审被上诉人"]
PARTIES = [] # 当事人信息
PARTIES.extend(PARTIES_YG)
PARTIES.extend(PARTIES_BG)
PARTIES.extend(PARTIES_DSR)
LEGAL = ["定代表人", "法等代表人", "法定代笔人", "法定代表人", "法定代表", "法定代理人",
"法定代人", "法定借助人", "法人代表人", "法人代表"]
PRINCIPAL = ["全权代表", "授权代表", "授权经理人", "授权签署人", "授权签字代表",
"授权签字人", "授权人", "义乌办事处首席代表", "代表人", "负责人", "委托代理", "代理人",
"诉讼代表人", "委托代表人"] # 授权代表
OTHERS = ["监护人", "经营者", "投资人", "执行合伙企业事务的合伙人", "执行合伙人", "执行合伙事务合伙人",
"执行事务合伙人", "专家辅助人", "翻译人员", "法定代理人", "业主"] # 其他
PROXY = ["辩护人", "委托代理人", "委托人", "委托人理人", "诉讼代理人",
"委托地利人", "委托大理人", "委托诉讼代理人", "委托代付"] # 诉讼代理人
RELATIVES = []
RELATIVES.extend(PROXY)
RELATIVES.extend(PRINCIPAL)
RELATIVES.extend(OTHERS)
RELATIVES.extend(LEGAL)
ROLES = ["经营者", "审判长", "代理审判员", "书记员", "人民陪审员"] # 所有角色
ROLES.extend(PARTIES)
ROLES.extend(RELATIVES)
AFFAIR = ["经营者", "法定代表人", "委托代理人"]
FOOTER_ROLE = ["审判长", "代理审判长", "审判员", "代理审判员",
"人民陪审员", "法官助理", "书记员", "速录员"] # 落款处角色信息
PUNCTUATION = set([u",u", u"。", u"?", u"!", u":u", u";"])
PUNCTUATION_ALL = set([u",", u"。", u"?", u"!", u":",
u";", ",", "?", "!", ":", ";"])
LINE_COUNT = 0
DICT = {} # 字典
CHINESE_NUM = {"一": 1, "二": 2, "三": 3, "四": 4, "五": 5, "六": 6, "七": 7, "八": 8,
"九": 9, "十": 10, "两": 2, "十一": 11, "十二": 12, "十三": 13,
"十四": 14, "十五": 15}
NUMBERS = ["一", "二", "三", "四", "五", "六", "七", "八", "九",
"十", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# 正则
JUDGE_PATTERN = "(裁决|裁定|判决)(如下|)(:|:)" # 判决如下
YEAR_MONTH_DATE = "(\\d+)年(\\d+)月(\\d+)(日|)" # 人物出生日期
# 案号正则
ANHAO = "(\(|\[|(|【|〔|(|)(\d+)(\)|\]|】|〕|)|)(.{3,40})([\d、\-—xX]+)(号)(之一)?"
# 案号正则
YISHENANHAO = "(\(|\[|(|【|〔|(|)(\d+)(\)|\]|】|〕|)|)(.{5,40})(\d+)"
YISHENANHAO += "(号)(之一)?(民事|行政|)(判决|裁定|)"
ANHAO_YEAR = "\d{4}" # 案号里年份
TRADEMARK_ID = "商评字(\(|\[|(|【|〔)(\d+)(\)|\]|)|】|〕)第(\d+)号" # 商评字
PATENT_ID = "(专利复审委员会|知识产权局)(.+)第((\d|\.)+)号专利" # 专利号
FOOTER_YEAR_MONTH_DATE = "(.+)年(.+)月(.+)日" # 落款日期
# 第三人和被告/原告share共同代理人的情况, %s是角色
COMMON_ROLES_PATTERN1 = '(被告|原告|第三人|上诉人|被上诉人|原审被告|原审原告|原审第三人|)'
COMMON_ROLES_PATTERN1 += '(和|,|,|、|与|)(被告|原告|第三人|上诉人|被上诉人|原审被告|原审原告|原审第三人|)'
COMMON_ROLES_PATTERN1 += '(|之|的)(共同|)%s'
# 上述(以上)x人共同代表人
COMMON_ROLES_PATTERN2 = '(一|二|两|三|四|五|六|七|八|九|十])?(人|%s)(|之|的)%s'
YISHEN_MIN_PAN = ["(驳回(.{0,30})(诉讼|本诉)请求|的(诉讼|本诉)请求不予支持)", "驳回原告诉讼请求"]
YISHEN_MIN_CAI = []
YISHEN_MIN_CAI_1 = ["(按撤诉处理|按(.{0,20})(撤回|撤销)(起诉|反诉|本诉|诉讼)处理)", "按撤诉处理"]
YISHEN_MIN_CAI_2 = ["((撤回|撤销)(.{0,10})(起诉|反诉|本诉|诉讼)|撤诉)", "撤诉"]
YISHEN_MIN_CAI_3 = ["(驳回(.{0,20})起诉|诉讼|反诉|本诉)", "驳回起诉"]
YISHEN_MIN_CAI_4 = ["(不予受理|不予立案)", "不予受理"]
YISHEN_MIN_CAI_5 = ["中止(诉讼|审理)", "中止诉讼"]
YISHEN_MIN_CAI_6 = ["恢复(诉讼|审理)", "恢复审理"]
YISHEN_MIN_CAI_7 = ["驳回(.{0,10})管辖(权|)异议", "驳回管辖权异议"]
YISHEN_MIN_CAI_8 = ["(扣押|冻结|查封|保全|禁令)", "保全程序"]
YISHEN_MIN_CAI_9 = ["本案(指定|(交|)由)(,{0,10})人民法院(审理|管辖)", "指定审理"]
YISHEN_MIN_CAI_10 = ["并入(.{1,30})号案", "并案审理"]
YISHEN_MIN_CAI_11 = ["(另立案号|另案审理|分(为|立)(二|两|三|四|五|六|七|八|九|十)(件|)案)", "分案审理"]
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_1)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_2)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_3)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_4)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_5)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_6)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_7)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_8)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_9)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_10)
YISHEN_MIN_CAI.append(YISHEN_MIN_CAI_11)
# 正则 Jokers
PATENT_NUMBER = '((ZL|CN|)[ 0-90-9Xx×Xx**]{8,12}\.[0-90-9XxXx×*×])'
TRADEMARK_NUMBER = '(诉争商标|引证商标)?\w*第([0-9]+)号[^,。;、:]*?商标\w*'
ERSHEN_MIN_PAN = []
ERSHEN_MIN_PAN_1 = ["撤销(.{1,10})号(民事判决|原审判决|一审判决)", "撤销一审判决"]
ERSHEN_MIN_PAN_2 = ["(维持原判|维持原审判决|维持一审判决|维持+号民事判决)", "维持一审判决"]
ERSHEN_MIN_PAN.append(ERSHEN_MIN_PAN_1)
ERSHEN_MIN_PAN.append(ERSHEN_MIN_PAN_2)
ERSHEN_XING_PAN = []
ERSHEN_XING_PAN_1 = ["(驳回上诉(.+)维持一审判决|维持原审判决)", "维持一审判决"]
ERSHEN_XING_PAN.append(ERSHEN_XING_PAN_1)
ERSHEN_CAI = []
ERSHEN_XING_CAI_1 = ["(维持原裁定|维持一审裁定|维持原审裁定)", "维持一审裁定"]
ERSHEN_XING_CAI_2 = ["撤销(.+)裁定", "撤销一审裁定"]
ERSHEN_XING_CAI_3 = ["(撤销(.+)判决)(.+)发回(.+)(重审|重新审理)", "撤销一审判决,发回一审重新审理"]
ERSHEN_XING_CAI_4 = ["(撤销(.+)判决)(.+)(驳回(.+)起诉)", "撤销一审判决,驳回原告起诉"]
ERSHEN_XING_CAI_5 = ["(撤回(.+)起诉|撤回(.+)上诉|撤诉)", "撤诉"]
ERSHEN_XING_CAI_6 = ["(恢复诉讼|恢复审理)", "恢复审理"]
ERSHEN_CAI.append(ERSHEN_XING_CAI_1)
ERSHEN_CAI.append(ERSHEN_XING_CAI_2)
ERSHEN_CAI.append(ERSHEN_XING_CAI_3)
ERSHEN_CAI.append(ERSHEN_XING_CAI_4)
ERSHEN_CAI.append(ERSHEN_XING_CAI_5)
ERSHEN_CAI.append(ERSHEN_XING_CAI_6)
# 关键词
SUMMARY = ["不服", "受理", "本院", "原告", "被告"] # 总结段落的启发词语
END_JUDGE = ["提起上诉", "受理费"] # 判决结果结束标志
# 民族正则
NATION = "^(汉|哈尼|保安|白|阿昌|布朗|布依|朝鲜|达斡尔|傣|德昂|东乡|侗|独龙"
NATION += "|鄂伦春|俄罗斯|鄂温克|高山|仡佬|哈萨克|赫哲|回|基诺|景颇|京|柯尔克孜|拉祜|"
NATION += "珞巴|僳僳|黎|满|毛南|门巴|蒙古|苗|仫佬|纳西|怒|普米|羌|撒拉|畲|水|塔吉克|"
NATION += "塔塔尔|土家|土|佤|维吾尔|乌孜别克|锡伯|瑶|彝|裕固|藏|壮)族?$"
# 职务正则
POSITION = "副?总?(董事长|无业|新闻资源合作专员|登记注册科科员|法律事务科科员|合伙事务执行人|技术资产所有人|\
总制片人经营人|产业部负责人|法律事务人员|IT工程师|保护科科长|党委副书记|法定代表人|法规股股长|法务部经理|\
法政科科员|高级工程师|个体工商户|合伙执行人|技术部部长|迁西代理商|商标代理人|事务协调员|首席财务官|首席技术官|\
首席运营官|首席执行官|授权签字人|药品注册员|职业摄影师|专利代理人|专利工程师|财务人员|财务总监|代表理事|\
党委书记|党组书记|法律顾问|法律顺间|法务专员|副董事长|副理事长|副调研员|副研究员|副总代表|高级记者|工作人员|\
管理部长|高级管理人员|管理人员|行政副总|行政管理|行政人员|行政助理|行政专员|技术副总|技术人员|技术总监|经销商户|律师助理|\
内容总监|企管人员|人事专员|社长助理|实习律师|事务专员|首席摄影|授权代表|授权官员|维权专员|文职人员|项目经理|\
销售代表|运营总监|支部书记|执行懂事|指导老师|总裁助理|总工程师|总机械师|总设计师|财务长|承包人|出资人|代理人|\
代理商|法律部负责人|负责人|复审员|副部长|副区长|副书记|副团长|副镇长|副政委|助理副总裁|副总裁|工程师|管理员|合伙人|技术员|经营者|\
理事长|配镜师|评审员|评审审查员|审查员|投资人|业务员|营业员|执行官|总编辑|总干事|总指挥|编辑|法律部部长|\
助理部长|部长|财务|厂长|场长|处长|\
代表|店员|店长|店主|执行董事|董事|队长|法律|法人|法务|副理|副总|评审干部|退休干部|干部|工人|股东|股长|\
知识产权顾问|顾问|雇员|馆长|行长|会计|会员|会长\
|监事|教师|教授|经理|局长|科员|科长|课长|矿长|老师|理事|律师|助理秘书|秘书|区长|社长|所长|台长|团长|网管|文员|县长\
|乡长|校长|协理|药师|业主|园长|员工|院长|运营|站长|镇长|职工|法律部职员|职员|主编|主管|助理主任|\
主任|主席|专利代理人助理|助理|住持|专务|\
专员|总编|总裁|总监|组长|CE0|CEO|PaulBuysse长和AlbrechtDeGraeve董事\
|财务副董事长、首席财务官、财务主管兼秘书|公司法律顾问与商标及版权事务知识产权主管|\
副总裁、法律及知识产权事务助理总法律顾问|公司董事会助理秘书及公司助理首席法律顾问|\
法务部和事务部高级副总裁、法律顾问及秘书|助理秘书、高级副总裁兼首席知识产权顾问|\
董事长董事兼现金及衍生产品市场部负责人|副总法律兼交易和战略部首席知识产权顾问|\
授权代表、知识产权专利申请及诉讼部主管|副总裁、总法律顾问、首席法务官兼秘书|\
执行董事兼环球银行及资本市场业务主管|副总裁、总法律顾问、首席合规官兼秘书|\
副总裁、副总法律顾问兼助理董事会秘书|副总法律兼交易与策略首席知识产权顾问|\
第一副总裁、知识产权总顾问及助理秘书|法律顾问、商标及版权处知识产权负责人|\
董事会助理秘书及公司助理首席法律顾问|高级副总裁、首席法律顾问及董事会秘书|\
助理秘书、高级副总裁及知识产权总顾问|高级副总裁、首席法律顾问、董事会秘书|\
副总法律和交易与策略首席知识产权顾问|周吉宜兼以上十五位之共同委托代理人|副总裁、副总法律顾问兼公司助理秘书|\
执行副总裁兼首席行政官和总法律顾问|副总裁、副总法律顾问兼公司秘书助理|法务和企业联合部副总裁兼授权签字人|\
高级董事首席知识产权和诉讼法律顾问|事务高级副总裁、总法律顾问兼秘书|执行副总裁兼总法律顾问和公司秘书|\
财务副总裁兼首席财务官兼助理秘书|副总兼首席交易和战略知识产权顾问|董事兼现金及衍生产品市场部负责人|\
副总裁、助理总法律顾问兼助理秘书|高级副总裁、总法律顾问兼公司秘书|执行副总裁、总法律顾问兼公司秘书|\
高级副总裁、法务总监兼董事会秘书|高级副总裁兼集团总顾问、集团法务|董事会秘书及公司助理首席法律顾问|\
商标、法律及知识产权事务法律顾问|知识产权及标准部(中国区)负责人|首席财政官、财务主管及执行副总裁|\
首席财务官、运营副总裁及法律代表|首席法务官、法律总顾问及公司秘书|高级副总裁、法律总顾问及公司秘书|\
高级执行副总裁及知识产权管理经理|商业开发、法律及知识产权事务总监|董事会助理秘书及助理首席法律顾问|\
部门主任和公司名称或商标法律主管|法律企业、EMEA和APAC主管|助理公司秘书和知识产权助理总顾问|\
副总裁、副总法律顾问兼助理秘书|法律顾问兼全球商标实践组负责人|副总裁、助理法律顾问兼助理秘书|\
高级副总裁-法律顾问兼公司秘书|副总裁、副总法律顾问兼总裁助理|董事兼副社长执行董事兼财务总监|\
副总裁、副总法律顾问兼秘书助理|亚太、大中华及南非区首席执行官|副总裁、董事会秘书及总法律顾问|\
公司管理委员会及执行理事会主席|专利操作法律及知识产权事务总监|执行副总裁、法律总监及公司秘书|\
助理秘书及知识产权副总法律顾问|知识产权助理法律顾问及助理秘书|全球知识产权交易和战略高级顾问|\
职务资深法律顾问和知识产权主管|执行副总裁、商务法务和助理秘书|副总法律顾问兼法务审计部部长|\
高级副总裁、总法律顾问兼秘书|副总裁兼知识产权副总法律顾问|副总裁兼首席知识产权法律顾问|\
副总裁、总法律顾问兼公司秘书|高级副总裁-法律总顾问兼秘书|授权代表人兼商标事务法律顾问|\
会主席、首席执行官兼常务董事|财务和行政副总裁兼首席执行官|高级副总裁、法律总顾问兼秘书|\
副总裁(法律事务)兼公司秘书|高级副总裁、总顾问兼公司秘书|高级副总裁兼全球副总法律顾问|\
全球公司副总裁、总顾问兼秘书|财务副总裁、首席财务官兼主管|管理委员会董事长兼首席执行官|\
执行副总裁、总法律顾问兼秘书|董事执行官兼研究开发部总经理|执行副总裁兼总法律顾问及秘书|\
CRB‘‘LLC经理兼合伙人|上海古籍出版社副社长兼副总编|副总裁、法律总顾问兼公司秘书|高级副总裁及知识产权法律顾问|\
助理总法律顾问及首席商标律师|副总裁及知识产权副总法律顾问|副执行官、法律顾问及公司秘书|\
总理及原住民事务和糖业部部长|职务律师、知识产权部长及董事|高级副总裁、法律总顾问及秘书|\
职务副总法律顾问及执法副总裁|执行副总裁、总法律顾问及秘书|高级副总裁、总法律顾问及秘书|\
知识产权及标准部中国区负责人|执行副总裁和首席法务官及秘书|高级副总裁、总顾问及公司秘书|\
首席法律顾问助理和商标部主管|投票代理人和高级知识产权律师|公共事务和法律事务部执行总监|\
高级知识产权顾问和集群负责人|执行副总裁、公司总顾问和秘书|研究合作伙伴和技术转移部经理|\
副总裁、助理总顾问和助理秘书|副总裁、总法律顾问和公司秘书|助理总法律顾问和商标部门主管|\
总经理助理兼规划发展部部长|助理秘书兼全球反盗版副总裁|副本部长兼知识产权中心所长|高级副总裁、法务总监兼秘书|\
副总裁兼商标总法律顾问助理|秘书兼职业道德与合规副总裁|副总裁、副总顾问兼助理秘书|副总裁兼知识产权副法律顾问|\
助理秘书长兼商标与品牌总监|副总裁、总法律顾问兼总秘书|董事长、总经理兼首席执行官|顾问、高级副总裁兼公司秘书|\
运营执行副总裁兼法律总顾问|代表取缔役会长兼社长CEO|副总裁、法律顾问兼执行秘书|高级执行副总裁兼首席财务官|\
代表取缔役社长兼首席执行官|法律事务副总裁兼总法律顾问|职务集团董事长兼首席执行官|副总裁兼北美公司职能总顾问|\
北美地区董事长兼首席执行官|北美地区董事长兼首席运营官|高级副总裁兼全球总法律顾问|副总裁-法律兼公司助理秘书|\
行政副总裁、法律顾问兼秘书|副总兼首席知识产权法律顾问|执行副总裁、法律总监及秘书|专利、商标及许可证部总经理|\
第一副总裁及知识产权总顾问|财政、行政、采购及操作总监|金融、产品及区域高级副总裁|执行副总裁、总顾问及总秘书|\
专利法律及知识产权事务总监|高级副总裁、法律顾问及秘书|副总知识产权顾问及助理秘书|职务法律顾问及知识产权主管|\
副总法律顾问及知识产权总监|知识产权及诉讼副总法律顾问|专利及商标部门经理和代理人|总经理及广东保信事务所律师|\
公司法律董事及法定公司秘书|法律及公司事务部执行副总裁|公司和医疗事务部执行副总裁|高级财务副总监和首席财务官|\
资深法律顾问和知识产权主管|知识产权法副总裁和助理秘书|首席运营官和公司战略执行官|高级副总裁和首席法律执行官|\
副总裁兼首席知识产权顾问|副总裁兼总法律和税务顾问|助理秘书兼助理法律总顾问|执行副总裁、总顾问兼秘书|\
副总裁兼首席知识产权律师|法务执行副总裁兼公司秘书|执行董事兼质量保证部部长|秘书、总顾问兼高级副总裁|\
副总裁兼商业食品法律顾问|财务总监、秘书兼财务主管|副总裁、法律总顾问兼秘书|副总裁、总顾问兼公司秘书|\
高级副总裁、总顾问兼秘书|集团执行董事兼首席执行官|助理总法律顾问兼助理秘书|总经理(法务)兼公司秘书|\
董事会副主席兼首席执行官|党委书记、董事长兼总经理|高级执行总裁兼首席执行官|商标及版权顾问兼授权代表|\
法务副总裁兼首席专利顾问|高级副总裁兼首席法律顾问|副总裁兼知识产权法律顾问|副总裁兼副国际总法律顾问|\
助理秘书长兼副总法律顾问|知识产权全球总监兼代理人|合伙人兼美国区总法律顾问|高级副董事长兼总法务顾问|\
副总裁、首席财务官兼主管|董事该公司董事长兼总经理|总裁、首席执行官兼董事长|高级副总裁兼知识产权顾问|\
副总裁、助理总顾问兼秘书|执行(常务)董事兼总经理|大中华地区及亚太区代理人|集团财务及行政管理总经理|\
助理总法律顾问及助理秘书|集团首席法务官及公司秘书|律师、知识产权部长及董事|副总法律顾问及执行副总裁|\
商务及法律事务高级副总裁|高级副总裁及高级法律顾问|副总法律顾问及执法副总裁|顾问及全球商标运营组主管|\
技术管理和标准化部负责人|高级公司法律顾问和代理人|普通合伙人和独立法定代表|欧洲、中东和非洲运营经理|\
副总裁、总法律顾问和秘书|副总法律顾问和高级副总裁|知识产权律师和法定代理人|法律和公司事务高级副总裁|\
高级副总裁和公司法律顾问|品牌执行和商标法高级总监|董事长、首席执行官和主席|公司秘书和职业操守副总裁|\
副总裁、法律总顾问和秘书|总顾问和法律事务部负责人|战略发展顾问兼法律顾问|法定代表人兼实际经营者|\
委副书记兼办公室主任|副总工程师兼机械部主任|高级副总裁兼助理总顾问|法务副总裁兼总法律顾问|\
执行副总裁兼总法律顾问|高级副总裁兼首席财务官|职务高级副总裁兼总顾问|董事会主席兼首席执行官|\
董事局主席兼首席执行官|副总法律顾问兼助理秘书|共同创办人兼首席运营官|副总裁、秘书兼财务主管|\
集团首席执行官兼董事长|高级副总裁兼总法律顾问|助理法律顾问兼助理秘书|法律总顾问兼董事会秘书|\
助理秘书兼高级专利经理|高级副总裁兼法律总顾问|副总裁兼总法律顾问助理|执行副总裁兼首席财务官|\
副总裁、法律总监兼秘书|集团副总裁兼总法律顾问|副总裁兼法务及公司秘书|副总裁兼助理法律总顾问|\
副总裁兼助理总法律顾问|副总裁兼首席法律执行官|总法律顾问兼董事会秘书|资深法律顾问兼助理秘书|\
助理秘书兼高级法律顾问|高级副总裁兼公司总顾问|财务副总裁兼首席财务官|助理总顾问兼高级副总裁|\
营销总监兼授权高级职员|职务董事长兼首席执行官|公司行政总裁兼执行董事|秘书兼(美洲)法务顾问|\
执行副总裁兼首席运营官|高级副总裁兼董事会秘书|首席行政官兼总法律顾问|副总裁兼董事会常务主席|\
公司副总裁兼法律总顾问|公司副主席及助理总顾问|首席执行官及首席财务官|销售、市场及技术副总裁|\
法律顾问及知识产权律师|董事会主席及首席执行官|执行副总裁及法律总顾问|首席商标及版权顾问律师|\
法律董事及法定公司秘书|商标及知识产权部总顾问|副总裁及高级总法律顾问|高级副总裁及总法律顾问|\
董事总经理及法律总顾问|知识产权及许可部总经理|高级副总裁及法律总顾问|副总裁及知识产权总顾问|\
执行合伙人及总法律顾问|人力资源及法律部副总裁|法定代表人及实际经营者|副教授及原告法定代表人|\
董事会主席和首席执行官|总法律顾问和董事会秘书|专利、商标和许可总经理|首席执行官和董事会主席|\
知识产权部和商标部主任|欧洲、中东和非洲区主管|秘书和道德与守法副总裁|法律顾问和知识产权主管|\
副总裁、法律和助理秘书|高级副总裁和公司总顾问|高级法律顾问和商标经理|董事会董事和授权签字人|\
全球知识产权和执行总监|董事会成员和授权签字人|知识产权和打击仿冒经理|首席法律和知识产权总监|\
销售和营销副总裁、董事|公共关系部和财务部经理|高级副总裁和总法律顾问|商务和法务部高级副总裁|\
总经理助理兼法律顾问|高级副总裁兼公司秘书|副总裁兼助理法律顾问|副总裁兼副总法律顾问|常务董事兼首席执行官|\
高级副总裁兼副总顾问|总顾问、副总裁兼秘书|助理总法律顾问兼秘书|董事局主席兼集团总裁|副总裁兼首席专利顾问|\
副董事长兼首席执行官|执行副总裁兼法务总监|总法律顾问兼公司秘书|董事会主席兼行政总裁|公司秘书兼法定代表人|\
公司秘书兼高级副总裁|集团副总裁兼法律顾问|副总裁兼首席法律顾问|副总裁兼福总法律顾问|执行副总裁兼代表董事|\
助理秘书兼助理总顾问|副总裁、总顾问兼秘书|高级副总裁兼助理秘书|副总裁兼知识产权顾问|副总裁兼高级商标顾问|\
法律顾问兼董事会秘书|集团总顾问兼公司秘书|财务总监兼高级副总裁|董事局主席兼行政总裁|副总裁兼高级法律顾问|\
执行副总裁兼法律顾问|首席运营官兼公司秘书|副总裁兼法律顾问助理|、副总经理兼执行董事|执行副总裁兼财务主管|\
特别副总裁兼助理秘书|副总裁兼诉讼事务主管|副总裁兼副法律总顾问|法律顾问兼法务副总裁|高级副总裁兼法律顾问|\
邯郸县兼庄乡退休干部|助理总兼首席照明顾问|助理总顾问兼助理秘书|兼副总经理、执行董事|董事兼授权市场总经理|\
副会长兼任首席执行官|副总经理兼法律总顾问|副董事长兼总律师助理|系公司执行董事兼经理|副总裁兼助理公司秘书|\
副总裁兼秘书兼总顾问|职务执行董事兼总经理|副总裁及法律总监助理|商标及版权总法律顾问|知识产权及认证部经理|\
法务及知识产权部部长|助理总顾问及助理秘书|知识产权及法务部职员|法务及知识产权部主管|副董事长及首席执行官|\
法务及知识产权总经理|财务及成本控制部主管|商标、设计及域名主管|专利商标及许可总经理|集团首席法务官及秘书|\
知识产权及战略部部长|执行董事及总务部部长|副总裁及法律总裁助理|特许销售总经理及董事|副总裁及知识产权顾问|\
创新及企业关系部主任|商标、市场及广告总监|首席法务官及公司秘书|副总裁及副总法律顾问|高级副总裁及法务总监|\
许可及商业事务副总裁|集团总顾问及公司秘书|商务及法务高级副总裁|执行副总裁及财务总监|知识产权及认证部董事|\
总法律顾问及助理秘书|财务及行政执行副总裁|营销及商业部法律总监|职务许可及合同代理人|法律及公司事务部主管|\
法律及知识产权部主管|公司总顾问及公司秘书|销售及医务执行副总裁|管理董事及授权签字人|高级工程师及全权代表|\
市场营销及通讯部经理|高级副总裁及公司秘书|律师、商标及版权经理|技术中心及服务部董事|副总裁和副总法律顾问|\
首席运营官和公司秘书|抚顺市天和地板厂业主|副总裁、总顾问和秘书|涂料和精加工部副总裁|法务总监和总经理秘书|\
商务和法律事务副总裁|法律和商业事务副总裁|商标和知识产权总顾问|系佳和购物中心经营者|执行董事和高级副总裁|\
高级副总裁和法务总监|法律总顾问和公司秘书|高级副总裁和副总顾问|董事主席和首席执行官|集团首席法务官和秘书|\
商标和版权总法律顾问|知识产权和战略副总裁|法定代表人兼总经理|总裁助理兼法律顾问|副总经理兼技术总监|\
副董事长兼总工程师|总经理兼法定代表人|总经理兼党委副书记|副总裁兼授权签字人|副总裁兼总顾问助理|\
董事长兼首席执行官|总经理兼首席执行官|首席执行官兼董事长|副总裁兼首席执行官|总裁兼副总法律顾问|\
副总裁兼法律总顾问|副总裁兼总法律顾问|副总裁兼助理总顾问|法律顾问兼助理秘书|执行副总裁兼总顾问|\
副总裁兼副专利顾问|法务部主任兼代理人|副总裁兼首席财务官|助理部长兼法律顾问|董事总经理兼总顾问|\
高级副总裁兼总顾问|副总顾问兼助理秘书|公司副总裁兼总顾问|商标经理兼授权代表|董事会主席兼总经理|\
执行副总裁兼董事长|总经理兼总法律顾问|法定代表兼授权代表|副总裁兼副法律顾问|副会长兼首席执行官|\
高级副总裁兼总经理|首席财务官兼副总裁|助理秘书兼高级律师|代表兼常务执行董事|副总裁秘书兼总顾问|\
董事长兼首席运营官|总裁兼行政总执行官|助理顾问兼助理秘书|集团副总裁兼总顾问|助理秘书兼高级顾问|\
法律顾问兼公司秘书|法律顾问兼代理律师|副董事长兼财务总监|职务董事长兼总经理|重庆兼善事务所律师|\
职务执行董事兼经理|总编辑兼常务副社长|副总裁兼总经理秘书|法务主管兼公司秘书|驻以色列经理兼董事|\
副社长兼首席执行官|总经理兼首席财务官|系执行董事兼总经理|公司顾问兼公司秘书|职务董事长兼CEO|\
首席商标兼副总顾问|副主席兼首席执行官|总法律顾问兼副总裁|财务总监兼公司秘书|行政总裁兼执行董事|\
副总裁兼法定代表人|副总裁兼代理总顾问|副总裁兼总专利顾问|资深副总裁兼总顾问|执行董事兼任总经理|\
副总裁及首席执行官|首席商标及版权顾问|董事长及首席执行官|助理秘书及高级顾问|高级副总裁及总顾问|\
副总顾问及助理秘书|执行副总裁及总顾问|授权代表及财务总监|商标及版权事务总监|副总裁及首席运营官|\
知识产权及许可总监|执行董事及机要秘书|执行副及亚太区总裁|民事保护及诉讼总监|秘书部长及法务总监|\
法务及合规部副总裁|首席运营官及总顾问|董事总经理及总顾问|助理部长及高级顾问|财务总监及公司秘书|\
法律及公司事务经理|副总裁及总税务顾问|副总经理及财务主管|清算组成员及负责人|国际销售和服务经理|\
内部管理和审计经理|技术开发和保护经理|商标经理和助理秘书|销售和市场部副总裁|商标和产品法律顾问|\
商标和版权法律顾问|资深副总裁和总顾问|首席行政官和代理人|副总裁和总法律顾问|业务发展和执行董事|\
法律顾问和知产主管|首席执行官和董事长|知识产权部长和董事|法务和知识产权部长|集团顾问和法律顾问|\
广告和品牌管理顾问|财务总监和销售经理|执行副总和财务总监|商标和版权事务总监|董事会成员和代理人|\
实际控制人、总经理|法人代表兼总经理|会计兼办公室主任|董事长兼党委书记|总经理兼总工程师|董事长兼总工程师|\
销售部经理兼厂长|副总经理兼负责人|执行董事兼总经理|股东兼实际经营者|总法律顾问兼秘书|总裁兼首席执行官|\
主席兼首席执行官|董事兼总法律顾问|首席执行官兼总裁|高级副总裁兼秘书|秘书兼法律总顾问|副总裁兼助理秘书|\
副总裁兼审计主任|副总裁兼财务主管|会主席兼执行董事|执行副总裁兼秘书|总经理兼执行董事|副总裁兼法务主管|\
副总裁兼法律顾问|总裁兼首席运营官|副董事长兼总顾问|副总裁兼副总顾问|副总裁兼商标律师|副会长兼代表理事|\
法律总顾问兼秘书|总经理兼党委书记|董事长兼执行总裁|律师兼法律总顾问|董事兼首席执行官|秘书兼总法律顾问|\
总顾问兼公司秘书|副总裁兼专利律师|代表理事兼副会长|副总裁兼专利顾问|代理董事兼总经理|副总裁兼公司秘书|\
公司副总裁兼董事|公司董事兼总经理|法务副总裁兼秘书|代表董事兼总经理|社长兼首席执行官|秘书兼商务副总裁|\
副总裁兼秘书助理|副董事长兼总经理|秘书兼助理总顾问|副总裁兼法律总监|董事会成员兼秘书|副总裁兼首席顾问|\
总裁兼首席财务官|公司副总裁兼秘书|首席法律官兼秘书|集团总裁兼董事长|副主席兼首席顾问|职务董事兼总经理|\
副总裁兼法务总监|首席法务官兼秘书|法律部主任兼董事|副总裁兼商标顾问|秘书兼首席财务官|首席商标兼总顾问|\
高级副总裁兼总监|首席执行官兼董事|公司兼职工作人员|总裁兼董事会主席|首席财务官兼董事|首席财务官兼秘书|\
董事长兼研发总监|首席执行官兼社长|职务总经理兼董事|总裁及首席执行官|专利及商标代理人|执行副总裁及秘书|\
副总裁及法律顾问|总经理及授权代表|副总裁以及总顾问|副社长及代表董事|副总裁及助理秘书|总顾问及公司秘书|\
董事及授权签字人|首席执行官及董事|首席法务官及秘书|替任董事及副总裁|公司董事及总经理|执行董事及总经理|\
财务及运营负责人|销售及营销副总裁|专利代理人及律师|法定代表人及股东|专利代理人及职员|副总裁及法务总监|\
董事会主席及总裁|秘书及总法律顾问|副总裁及公司秘书|会主席及常务董事|秘书及首席财务长|总顾问和公司秘书|\
代理人和授权官员|主席和首席执行官|公司董事和总经理|商标和著作权总监|执行董事和总经理|总经理和法定代表|\
法律和税务部总监|董事和总法律顾问|首席执行官和总裁|精益眼镜店的员工|常务董事和代理人|董事和共同代理人|\
董事长兼总经理|执行董事兼经理|前台兼行政助理|股东兼副总经理|董事兼副总经理|法务兼信息专员|副总经理兼监事|\
副总裁兼总顾问|厂长兼技术总监|主席兼行政总裁|董事兼高级顾问|秘书兼财务主管|律师兼授权官员|总经理兼董事长|\
代理经理兼董事|董事兼助理秘书|董事长兼CEO|董事兼高级律师|董事兼执行秘书|合伙人兼总经理|副总裁兼总经理|\
法务兼商务总监|副社长兼总编辑|社长兼执行董事|CEO兼董事长|秘书兼法务总监|公司顾问兼秘书|法律顾问兼秘书|\
公司律师兼董事|顾问兼常务董事|代表董事兼总裁|秘书兼法律总监|主管兼助理秘书|董事兼财务经理|代表董事兼社长|\
董事长兼执行官|董事兼法务主管|财务主管兼秘书|经理兼执行董事|执行董事兼院长|总裁兼执行董事|所有人兼总经理|\
副长兼常务董事|唯一董事兼总裁|CE0兼董事长|党组书记兼局长|财务及行政主管|商标及版权主管|专利及许可部长|\
助理总法律顾问|公司顾问及秘书|市场及销售总监|副总裁及总经理|副总裁及总顾问|行政及财务总监|营销及服务总监|\
律师及律师助理|律师及实习律师|商标及版权经理|董事长及总经理|管理及销售经理|经理及公司秘书|总经理及副总裁|\
商标及外观顾问|资深顾问及律师|总经理及董事长|执行董事及经理|销售及市场经理|商标及设计法务|亿家酒店经营者|\
人事及财务主管|商标和外观主管|董事和授权代表|商标和版权顾问|财务和审计总监|工业和商务总监|行政和审计总监|\
代表董事和社长|副总裁和总经理|行政和财务总监|董事和法律顾问|商标和外观顾问|商标和版权经理|秘书和法务总监|\
董事和公司秘书|科学和运营总监|主席和执行总裁|执行总裁和董事|法律顾问和秘书|专利和商标专员|精益眼镜店员工|\
总经理和董事长|董事兼总经理|股东兼总经理|副总裁兼秘书|总经理兼董事|董事长兼厂长|董事长兼总裁|董事长兼社长|\
合伙人兼经理|长兼常务董事|总裁兼董事长|董事兼副总裁|长兼专务董事|社长兼总编辑|董事长兼校长|社长兼总经理|\
总顾问兼秘书|长兼代表董事|总裁兼CEO|副总裁兼董事|董事长兼经理|董事兼副主席|董事及总经理|董事长及总裁|\
董事及副总裁|总顾问及秘书|副总裁及秘书|个体户经营者|社长及总编辑|总经理及董事|律师及其助理|股东及负责人|\
销售部经营者|副主席和总裁|总裁和总经理|销售中心业主|信息中心主任|董事兼经理|个体工商户|兼法律顾问|股东兼员工|\
会长兼社长|董事兼秘书|董事兼总裁|主管兼秘书|总裁兼秘书|总裁兼董事|社长兼主编|社长兼总编|书记兼院长|及常务董事|\
董事及经理|董事及总裁|董事及秘书|主席及董事|个体经营者|法定代表人|律师及助理|主管工程师|总裁及董事|主管和董事|\
运营部经理|执行副总裁|总裁和秘书|总经理助理|仓库经营者|实际经营者|法制股股长|办公室主任|供销部长|商标评审员|\
助理总法律顾问|法务主管|法务副总裁|法务总监|法务经理|法务职员|法务董事|法务部长|\
法律事务部主管|法律事务部副总裁|法律事务部总监|法律事务部经理|法律事务部职员|法律事务部董事|法律事务部部长)$" | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/main_conf.py | main_conf.py |
class Person:
"""."""
def __init__(self):
"""."""
self.name_ = "" # 姓名
self.usedname_ = "" # 曾用名
self.role_ = "" # 身份
self.roleid_ = "" # 1.原告 2.被告 3.第三人
self.identity_ = "" # 身份 1.当事人 2.法定代表人 3.诉讼代表人 4.授权代表人 5.其它
self.firrole_ = "" # 一审身份
self.secrole_ = "" # 二审身份
self.nation_ = "" # 民族
self.sex_ = "" # 性别
self.birth_ = "" # 出生日期
self.b_year_ = ""
self.b_month_ = ""
self.b_day_ = ""
self.address_ = "" # 住所地址
self.registered_address_ = "" # 注册地址
self.business_address_ = "" # 经营地址
self.country_ = "" # 国籍
self.company_ = "" # 公司
self.title_ = "" # 职务
self.other_ = "" # 其它
self.common_ = '' # 共同代理人(1-10|共同)
def __repr__(self):
"""."""
return '%s,%s,%s' % (self.role_, self.name_, self.birth_)
def to_csv(self):
"""."""
tmp = [
"name=%s" % self.name_, "sexy=%s" % self.sex_,
"birth=%s" % self.birth_, "address=%s" %
self.address_, "country=%s" % self.country_,
"company=%s" % self.company_, "job_title=%s" % self.title_]
return ",".join(tmp)
def to_list(self):
"""."""
member_list = [self.name_, self.sex_, self.birth_,
self.address_, self.country_,
self.company_, self.title_]
return member_list
class RelativePerson:
"""."""
def __init__(self):
"""."""
self.person_ = Person()
self.proxy_ = []
self.legal_representative_ = []
self.relative_ = [] # 新版,代理人和法人放在一起
self.previous_id_ = ""
def __repr__(self):
"""."""
return '<当事人 % r>' % self.person_.name_ + '\n' \
'<代理人 % r>' % self.proxy_ + '\n' \
'<法人代表 % r>' % self.legal_representative_
def to_csv(self):
"""."""
tmp = [self.person_.name_, self.person_.address_,
self.person_.country_] # 当事人个人信息,名字,地址,国籍
for p in self.legal_representative_:
tmp.append(p.name_)
for p in self.proxy_:
tmp.append(p.name_)
tmp.append(p.company_)
tmp.append(p.title_)
return "-".join(tmp)
class LawEntity:
"""."""
def __init__(self):
"""."""
self.id_ = "-1"
self.title_ = '' # 文书标题
self.court_ = "" # 审理法院
self.court_level_ = "" # 法院级别
self.area_ = "" # 案件地区
self.is_foreign_ = False
self.year_to_accept_ = None # 立案年度
self.type_ = "" # 文书类型
self.nature_ = "" # 案件性质
self.number_ = "" # 案件号
self.start_time_ = "" # 案件开始时间
self.level_ = "" # 诉讼等级
self.parent_ = "" # 上一级诉讼号
self.judge_time_ = "" # 判决时间
self.judge_results_ = [] # 判决结果
self.reception_time_ = '' # 受理时间
self.court_time_ = '' # 开庭时间
self.trademark_id_ = "" # 商标号
self.patent_id_ = "" # 专利号
self.yuangao_ = [] # 原告
self.beigao_ = [] # 被告
self.third_people_ = [] # 第三人
self.party_ = [] # 新版,集合原告,被告和第三人
self.presiding_judge_ = [] # 审判长
self.judges_ = [] # 审判员
self.juror_ = [] # 陪审员
self.clerk_ = [] # 书记员
self.judge_assistant_ = [] # 法官助理
self.pre_judge_ = []
self.pre_law_type_ = []
self.next_judge_ = []
self.cause_ = "" # 案由
self.law_ = set() # 法条
self.patent_number_ = set() # 专利号
self.review_ = set() # 专利复审
self.trademark_number_ = set() # 商标号
self.trademark_review_ = set() # 商标评审
self.appeal_money_ = ''
self.judge_money_ = ''
self.accuser_ = ''
self.Judement_ = ''
def __repr__(self):
"""."""
yuangao = []
yuangao_rep = []
yuangao_proxy = []
for yg in self.yuangao_:
yuangao.append(yg.person_.name_)
for rep in yg.legal_representative_:
yuangao_rep.append(rep.name_)
for proxy in yg.proxy_:
yuangao_proxy.append(proxy.name_)
beigao = []
beigao_rep = []
beigao_proxy = []
for bg in self.beigao_:
beigao.append(bg.person_.name_)
for rep in bg.legal_representative_:
beigao_rep.append(rep.name_)
for proxy in bg.proxy_:
beigao_proxy.append(proxy.name_)
third_people = []
third_people_rep = []
third_people_proxy = []
for tp in self.third_people_:
third_people.append(tp.person_.name_)
for rep in tp.legal_representative_:
third_people_rep.append(rep.name_)
for proxy in tp.proxy_:
third_people_proxy.append(proxy.name_)
presiding_judge = []
for pj in self.presiding_judge_:
presiding_judge.append(pj.name_)
judges = []
for jud in self.judges_:
judges.append(jud.name_)
juror = []
for jur in self.juror_:
juror.append(jur.name_)
clerk = []
for cle in self.clerk_:
juror.append(cle.name_)
judge_assistant = []
for ja in self.judge_assistant_:
judge_assistant.append(ja.name_)
return '<文书标题 % r>' % self.title_ + '\n' \
'<审理法院 % r>' % self.court_ + '\n' \
'<法院级别 % r>' % self.court_level_ + '\n' \
'<案件地区 % r>' % self.area_ + '\n' \
'<是否涉外 % r>' % self.is_foreign_ + '\n' \
'<立案年度 % r>' % self.year_to_accept_ + '\n' \
'<文书类型 % r>' % self.type_ + '\n' \
'<案件性质 % r>' % self.nature_ + '\n' \
'<案件号 % r>' % self.number_ + '\n' \
'<案件开始时间 % r>' % self.start_time_ + '\n' \
'<诉讼等级 % r>' % self.level_ + '\n' \
'<上一级诉讼号 % r>' % self.parent_ + '\n' \
'<判决时间 % r>' % self.judge_time_ + '\n' \
'<判决结果 % r>' % self.judge_results_ + '\n' \
'<受理时间 % r>' % self.reception_time_ + '\n' \
'<开庭时间 % r>' % self.court_time_ + '\n' \
'<商标号: % r>' % self.trademark_id_ + '\n' \
'<专利号 % r>' % self.patent_id_ + '\n' \
'<审判长 % r>' % presiding_judge + '\n' \
'<审判员 % r>' % judges + '\n' \
'<陪审员 % r>' % juror + '\n' \
'<书记员 % r>' % clerk + '\n' \
'<法官助理 % r>' % judge_assistant + '\n' \
'<案由 %r>' % self.cause_ + '\n' \
'<前一级案号 %r>' % self.pre_judge_ + '\n' \
'<前一级类型 %r>' % self.pre_law_type_ + '\n' \
'<法条 %r>' % self.law_ + '\n' \
'<专利号 %r>' % self.patent_number_ + '\n' \
'<专利复审文书号 %r>' % self.review_ + '\n' \
'<商标号 %r>' % self.trademark_number_ + '\n' \
'<商标评审文书号 %r>' % self.trademark_review_
def to_csv(self):
"""."""
tmp = []
tmp.append(self.id_) # 文书id 0
tmp.append(self.number_) # 案件号 1
tmp.append(self.court_) # 法院名称 2
tmp.append(self.court_level_) # 法院级别3
tmp.append(self.area_) # 案件地区4
tmp.append(self.nature_) # 案件性质5, 行政、民事
tmp.append(self.type_) # 文书类型6, 判决书、调解书
tmp.append(self.year_to_accept_) # 案件时间7
tmp.append(self.level_) # 诉讼等级8
tmp.append("1" if self.is_foreign_ else "0") # 是否涉外9
tmp.append("#".join(self.to_db(self.yuangao_))) # 原告10-16
tmp.append("#".join(self.to_db(self.beigao_))) # 被告17-23
tmp.append("#".join(self.to_db(self.third_people_))) # 第三人24-30
faguans = [p.name_ for p in self.presiding_judge_] # 审判长31
shenpanyuan = [p.name_ for p in self.judges_] # 审判员32
peishenyuan = [p.name_ for p in self.juror_] # 陪审员33
shujiyuan = [p.name_ for p in self.clerk_] # 书记员34
faguanzhuli = [p.name_ for p in self.judge_assistant_] # 法官助理35
tmp.append(",".join(faguans))
tmp.append(",".join(shenpanyuan))
tmp.append(",".join(peishenyuan))
tmp.append(",".join(shujiyuan))
tmp.append(",".join(faguanzhuli))
# tmp.append(self.trademark_id_) #商标号36
# tmp.append(self.patent_id_) #专利号37
judge_result = "" if len(self.judge_results_) == 0\
else self.judge_results_[
len(self.judge_results_) - 1]
tmp.append(judge_result) # 判决结果38
yishenanhao = ""
ershenanhao = ""
sanshenanhao = ""
yishenjieguo = ""
ershenjieguo = ""
sanshenjieguo = ""
if self.level_ == "二审":
ershenanhao = self.number_
ershenjieguo = judge_result
yishenjieguo = "" if len(self.judge_results_) < 2 else "~".join(
self.judge_results_[:-1])
yishenanhao = "" if len(
self.pre_judge_) == 0 else self.pre_judge_[0]
elif self.level_ == "一审":
yishenanhao = self.number_
yishenjieguo = judge_result
elif self.level_ == "三审":
sanshenanhao = self.number_
yishenanhao = "" if len(
self.pre_judge_) != 2 else self.pre_judge_[0]
ershenanhao = "" if len(self.pre_judge_) == 0 else self.pre_judge_[
len(self.pre_judge_) - 1]
sanshenjieguo = judge_result
ershenjieguo = "" if len(
self.judge_results_) < 2 else self.judge_results_[-2]
yishenjieguo = "" if len(self.judge_results_) < 3 else "~".join(
self.judge_results_[:-1])
tmp.append(yishenanhao) # 一审案号39
tmp.append(ershenanhao) # 二审审案号40
tmp.append(self.judge_time_)
tmp.append(sanshenanhao) # 三审审案号40
tmp.append(yishenjieguo) # 一审结果
tmp.append(ershenjieguo) # 二审结果
tmp.append(sanshenjieguo) # 三审结果
caijueshuhao = ""
if self.trademark_id_ != "":
caijueshuhao = self.trademark_id_
elif self.patent_id_ != "":
caijueshuhao = self.patent_id_
tmp.append(caijueshuhao)
return "#".join(tmp)
def to_db(self, parties):
"""."""
names = []
address = []
countries = []
proxies_name = []
proxies_company = []
proxies_title = []
legal_representatives = []
for party in parties:
names.append(party.person_.name_)
address.append(party.person_.address_)
countries.append(party.person_.country_)
for p in party.legal_representative_:
legal_representatives.append(p.name_)
if len(party.legal_representative_) == 0:
legal_representatives.append("")
if len(party.proxy_) == 0:
p = Person()
party.proxy_.append(p)
for p in party.proxy_:
proxies_name.append(p.name_)
proxies_company.append(p.company_)
proxies_title.append(p.title_)
return [
self.to_combination_str(names), self.to_combination_str(address),
self.to_combination_str(countries),
self.to_combination_str(legal_representatives),
self.to_combination_str(proxies_name),
self.to_combination_str(proxies_company),
self.to_combination_str(proxies_title)]
def to_combination_str(self, data_list):
"""."""
return ",".join(data_list)
def to_json(self):
"""."""
yuangao = [p.to_csv() for p in self.yuangao_]
beigao = [p.to_csv() for p in self.beigao_]
third_people = [p.to_csv() for p in self.third_people_]
tmp = "审理法院 : %s\n" % self.court_
tmp += "法院级别 : %s\n" % self.court_level_
tmp += "是否涉外 : %s\n" % ("是" if self.is_foreign_ else "否")
tmp += "案件号 : %s\n" % self.number_
tmp += "立案年度 : %s\n" % self.year_to_accept_
tmp += "案件地区 : %s\n" % self.area_
tmp += "案件性质 : %s\n" % self.nature_
tmp += "文书类型 : %s\n" % self.type_
tmp += "案件开始时间 : %s\n" % self.start_time_
tmp += "诉讼等级 : %s\n" % self.level_
tmp += "上一级诉讼号 : %s\n" % (",".join(self.pre_judge_))
tmp += "判决时间 : %s\n" % self.judge_time_
tmp += "裁决结果 : %s\n" % self.judge_result_
tmp += "原告 : %s\n" % "~".join(yuangao)
tmp += "被告 : %s\n" % "~".join(beigao)
tmp += "第三人 : %s\n" % "~".join(third_people)
tmp += "审判长 : %s\n" % "~".join([judge.to_csv()
for judge in self.presiding_judge_])
tmp += "审判员 : %s\n" % "~".join([judge.to_csv()
for judge in self.judges_])
tmp += "陪审员 : %s\n" % "~".join([judge.to_csv()
for judge in self.juror_])
tmp += "书记员 : %s\n" % "~".join([judge.to_csv()
for judge in self.clerk_])
tmp += "法官助理 : %s\n" % self.judge_assistant_.to_csv()
tmp += "审理时间 : %s\n" % self.judge_time_
return tmp | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/entity.py | entity.py |
import collections
import jieba
from .entity import LawEntity
from .main_tool import clean, get_next_line2, load_dict
from .main_parse import process_title, process_type, process_number, \
extract_relative_person, parse_cause, parse_content
from .ronghui.Money.AllMoney import Get_Appeal_Judge_Money
def main_extract(file_path):
"""主入口函数."""
rawdata = clean(file_path)
return rawdata, extract_from_rawdata(rawdata)
def extract_from_rawdata(rawdata):
"""副入口函数."""
data_generator = get_next_line2(rawdata)
law_entity = extract_main(data_generator)
return law_entity
def extract_main(data_generator):
"""主文提取."""
load_dict()
# 自定义分词样本,法院
jieba.load_userdict("config/self_dict.txt")
jieba.initialize()
law_entity = LawEntity()
relative_person_list = collections.OrderedDict()
line, raw_line = next(data_generator)
if line[-1:] == '书':
law_entity.title_ = line # 文书标题
line, raw_line = next(data_generator)
# 法院名称, 法院级别, 涉案地区, 是否涉外
line, raw_lines = process_title(line, law_entity, data_generator)
# 文书类型,案件性质
line, raw_lines = process_type(line, law_entity, data_generator)
# 案号
line, raw_lines = process_number(line, law_entity, data_generator)
# PARTIES = ["原告", "被告", "上诉人", "被上诉人", "第三人"]
line, raw_line = extract_relative_person(
line, law_entity, data_generator, relative_person_list)
# for pp in relative_person_list:
# print(relative_person_list[pp])
# for rp in relative_person_list[pp]:
# print(rp.relative_)
# 正文
# 案由
parse_cause(line, law_entity)
# 专利号 抽取专利复审委的文书号 抽取商标号 抽取商标评审委的文书号 摘要 判决结果
patentNumber = set()
trademarkNumber = set()
line, raw_line = parse_content(
line, law_entity, data_generator, patentNumber, trademarkNumber)
for role, role_list in relative_person_list.items():
law_entity.party_.extend(role_list)
# print(law_entity.judge_results_)
return law_entity
if __name__ == '__main__':
filePath = '/home/jokers/python/projects/_file/广知/5.txt'
rawdata, law_entity = main_extract(filePath)
print(law_entity)
Get_Appeal_Judge_Money(law_entity, rawdata)
print(law_entity.judge_money)
print(law_entity.appeal_money) | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/main_new.py | main_new.py |
from .main_conf import PUNCTUATION, PUNCTUATION_ALL, NUMBERS, YISHEN_MIN_PAN,\
ERSHEN_MIN_PAN, YISHEN_MIN_CAI, ERSHEN_XING_PAN, ERSHEN_CAI
import jieba
from jieba import posseg
import codecs
from .dicts import NationalDict, AddressDict
import re
# ABSPATH = os.path.abspath(__file__)
# ABSPATH = os.path.dirname(ABSPATH)+"/"
# 民族
nationalDict = NationalDict("national", "config/national.txt")
# 国家
countryDict = NationalDict("country", "config/country.txt")
# 人民法院
primaryCourtDict = NationalDict("primaryCourt", "config/primary_courts.txt")
# 中级人民法院
middleCourtDict = NationalDict("middleCourt", "config/middle_courts.txt")
# 高级人民法院
advancedCourtDict = NationalDict("advancedCourt", "config/advanced_courts.txt")
# 住所地描述关键词
addressKeyWordDict = NationalDict(
"address_key_word", "config/address_key_word.txt")
# 省市
cityToProvinceDict = AddressDict("city_province", "config/city_province.txt")
# 省县
townToProvinceDict = AddressDict("town_province", "config/town_province.txt")
def load_dict():
"""载入字典,载入各种各样的字典."""
DICT = dict()
DICT["national"] = nationalDict
DICT["country"] = countryDict
DICT["primary_court"] = primaryCourtDict
DICT["middle_court"] = middleCourtDict
DICT["advanced_court"] = advancedCourtDict
DICT["address_key_word"] = addressKeyWordDict
DICT["city_province"] = cityToProvinceDict
DICT["town_province"] = townToProvinceDict
for dict_name, dic in DICT.items():
dic.load_dict()
def seg_words(text):
"""
分词, 并去掉最后一个标点.
input : 文本字符串
output : 词列表
"""
words = jieba.cut(text, cut_all=False)
words = [w for w in words]
index = len(words) - 1
while index >= 0:
if words[index] in PUNCTUATION:
index -= 1
else:
break
words = words[:index + 1]
return words
def seg_words_with_pos(text):
"""
带标注分词.
input : 文本字符串
output : 词列表
"""
words = posseg.cut(text)
words = [w for w in words]
index = len(words) - 1
while index >= 0:
if words[index].word in PUNCTUATION:
index -= 1
else:
break
words = words[:index + 1]
return words
def get_next_line(file_path):
"""
从本地文件获取文本内容的生成器.
EOF 表示完结
"""
with codecs.open(file_path, "r", 'GBK') as f:
while True:
try:
line = f.readline()
except UnicodeDecodeError:
f = codecs.open(file_path, "r", 'UTF-8')
line = f.readline()
if not line:
yield 'EOF', ""
break
raw_line = line
# line = line.strip()
line = pre_process(line)
if not line:
continue
yield line, raw_line
global LINE_COUNT
LINE_COUNT += 1
def get_next_line2(raw):
"""
从数据库获得原始文本,生成器.
EOF 表示完结
"""
for line in raw.split('\n'):
if line.strip() == '':
continue
raw_line = line
line = pre_process(line)
yield line, raw_line
yield 'EOF', ""
def pre_process(line):
"""
文本字符串预处理.
去除空格
去除全角符号
"""
line = re.sub(r"[ \u3000\t\s]*", "", line) # \u3000 审判长\u3000某某
return line
def startWith(key_words, line):
"""If a line contains one of the word in key_words."""
tokens = segmentSentence(line)
if len(tokens) == 0:
return ''
for word in key_words:
if tokens[0].startswith(word):
return word
return ''
def segmentSentence(line):
"""."""
# print "分之前 : ", line
result = []
tmp = ""
flag = 0
for c in line:
if c not in PUNCTUATION_ALL or flag == 1:
if c in ['(', '(']:
flag = 1
if c in [')', ')']:
flag = 0
tmp += str(c)
else:
result.append(tmp)
tmp = ""
if len(tmp) > 0:
result.append(tmp)
return result
def contains(key_words, line):
"""If a line contains one of the word in key_words."""
for word in key_words:
if word in line:
return True
return False
def fixname(person):
"""."""
name = person.name_
name = re.sub('\(以下简称[^\)]*?\)', '', name)
name = re.sub('\(组织机构代码[^\)]*?\)', '', name)
name = re.sub('\(下称[^\)]*?\)', '', name)
name = re.sub('\(统一社会信用代码[^\)]*?\)', '', name)
name = re.sub('\(反诉[^\)]*?\)', '', name)
name = re.sub('[\[\(]原名[^\)]*?[\)\]]', '', name)
name = re.sub('\(系[^\)]*?经营者\)', '', name)
name = re.sub('\(系[^\)]*?业主\)', '', name)
person.name_ = name
def start_with_number(test):
"""."""
for c in test:
if c in NUMBERS:
return True
return False
def is_address(text):
"""判断一个字符串是否是地址串."""
text = text.strip()
if not text:
return False
segment_words = posseg.cut(text)
words = [w for w in segment_words]
for word in words:
if word.flag == "ns":
return True
else:
return False
return False
def add_labels(raw_txt, labels):
"""
给识别出来的标签增加表示,利于在UI上展示.
比如标签“法院名称”,会被拓展成:<span id="court">北京市人民法院</span>
"""
if len(labels) == 0:
return raw_txt
label_format = "<span class=\"%s\">%s</span>" % (" ".join(labels), raw_txt)
return label_format
def rexMatch(pattern, str):
"""正则."""
p = re.compile(pattern)
m = p.search(str)
if m:
return m
return []
def rexMatchAll(pattern, str, index):
"""."""
p = re.compile(pattern)
m = p.findall(str)
if m:
return m[0][index]
return ""
def rexMatchAll3(pattern, str):
"""."""
p = re.compile(pattern)
m = p.findall(str)
if m:
for match in m:
tmp = list(match)
return "".join(tmp)
return ""
def rexMatchAll2(pattern, str):
"""."""
p = re.compile(pattern)
m = p.findall(str)
if m:
result = []
for match in m:
tmp = list(match)
result.append("".join(tmp))
return result
return []
def rexMatchAllYiShenAnHao(pattern, str):
"""."""
p = re.compile(pattern)
m = p.findall(str)
if m:
result = []
types = []
for match in m:
tmp = list(match)
types.append(tmp[-2:])
tmp = tmp[0:-2]
result.append("".join(tmp))
return result, types
return [], []
def format_yishen_panjue(case_type, law_type, judge_result):
"""."""
result = judge_result
# print '一审结果 : %s' % judge_result
if case_type == "民事" and "判决" in law_type:
result = "(部分)支持原告诉讼请求)"
match = rexMatch(YISHEN_MIN_PAN[0], judge_result)
if match:
result = YISHEN_MIN_PAN[1]
elif case_type == "行政" and "判决" in law_type:
if "维持" in judge_result:
result = "维持行政裁决"
elif "驳回" in judge_result:
result = "驳回原告诉讼请求"
elif "撤销" in judge_result:
result = "撤销行政裁决"
elif "裁定" in law_type:
if "移送" in judge_result:
result = "移送审理"
elif "终结诉讼" in judge_result:
result = "终结诉讼"
elif "转为普通程序" in judge_result:
result = "转为普通程序"
else:
for pattern in YISHEN_MIN_CAI:
match = rexMatch(pattern[0], judge_result)
if match:
result = pattern[1]
break
return result
def format_ershen_panjue(case_type, law_type, judge_result):
"""."""
# print '二审结果 : %s' % judge_result
result = judge_result
if case_type == "民事" and "判决" in law_type:
result = "部分维持、部分撤销一审判决"
for pattern in ERSHEN_MIN_PAN:
match = rexMatch(pattern[0], judge_result)
if match:
result = pattern[1]
break
elif case_type == "行政" and "判决" in law_type:
result = "撤销一审判决"
for pattern in ERSHEN_XING_PAN:
match = rexMatch(pattern[0], judge_result)
if match:
result = pattern[1]
break
elif "裁定" in law_type:
if "移送" in judge_result:
result = "移送审理"
elif "终结诉讼" in judge_result:
result = "终结诉讼"
elif "中止诉讼" in judge_result:
result = "中止诉讼"
else:
for pattern in ERSHEN_CAI:
match = rexMatch(pattern[0], judge_result)
if match:
result = pattern[1]
break
return result
def clean(file_path):
"""文书清理."""
print('清洗文书:%s' % file_path)
rawdata = ''
# with codecs.open(file_path, "r", 'GBK') as f:
with open(file_path, "r", encoding='GBK') as f:
try:
rawdata = f.read()
except UnicodeDecodeError:
# f = codecs.open(file_path, "r", 'UTF-8')
try:
f = open(file_path, "r")
rawdata = f.read()
except UnicodeDecodeError:
f = open(file_path, "r", encoding='UTF-16')
rawdata = f.read()
rawdata = rawdata.strip()
if rawdata.startswith('<meta'):
raise Exception("文件格式错误,<meta 开头")
# 清理北知文书中判决摘要的情况
rawdata = re.sub('北京知识产权法院(.|\n)*本摘要并非判决之组成部分,不具有法律效力。', '', rawdata)
rawdata = re.sub(r"[ \u3000\t]*", "", rawdata)
match = re.findall('书记员.*', rawdata)
if len(match) > 0:
rawdata = rawdata[:rawdata.rfind(match[-1]) + len(match[-1])]
# 清理文书html标签
regHtml = '["“]?<(html|meta|style|title|h1|h2|h3|h4|h5|link'
regHtml += '|script|head|body|div).*?>["”]?'
rawdata = re.sub(regHtml, '', rawdata)
# 清理空行
rawdata = re.sub('\n\s*\n', '\n', rawdata)
# 清理文书首行 法院和判决书在同一行的情况
rawdataArr = rawdata.split('\n')
for i in range(len(rawdataArr)):
if i > 3:
break
match = re.search(
'(.*法 *院).*((行 *政|刑 *事|民 *事).*(判 *决 *书|裁 *定 *书))', rawdataArr[i])
if match:
court = match.group(1)
court = re.sub(' *', '', court)
txtType = match.group(2)
txtType = re.sub(' *', '', txtType)
replace = court + '\n' + txtType
rawdata = re.sub(
'(.*法 *院).*((行 *政|刑 *事|民 *事).*(判 *决 *书|裁 *定 *书))',
replace, rawdata)
break
return rawdata
def clean_num(anhao):
"""案号清洗."""
anhao = anhao.strip()
anhao = re.sub(
'(第|字|执|申|初|再|终|恢|保|审|更|特)0+([\d一\--、\-]+号)', r'\1\2', anhao)
anhao = re.sub('[(\[【〔]', '(', anhao)
anhao = re.sub('[)\]】〕]', ')', anhao)
anhao = re.sub('—+', '-', anhao)
if not anhao.startswith('('):
anhao = re.sub('(\d{4})年?(.*)', r'(\1)\2', anhao)
return anhao | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/main_tool.py | main_tool.py |
from .main_tool import seg_words, seg_words_with_pos, contains, \
segmentSentence, fixname, rexMatchAllYiShenAnHao, \
start_with_number, rexMatch, startWith, clean_num
from .main_conf import ANHAO, PARTIES_BG, PARTIES_DSR, PARTIES_STOP, \
LEGAL, PARTIES, RELATIVES, ROLES, \
CHINESE_NUM, PARTIES_YG, PROXY, PRINCIPAL, OTHERS, \
NATION, POSITION, JUDGE_PATTERN, PATENT_NUMBER, \
TRADEMARK_NUMBER, SUMMARY, TRADEMARK_ID, PATENT_ID, \
END_JUDGE, YISHENANHAO, FOOTER_YEAR_MONTH_DATE, \
FOOTER_ROLE
from .entity import RelativePerson, Person
import re
import copy
import sre_constants
from .ronghui.PatentAndTrademark import patent_review, trademark_review
def process_title(line, law_entity, data_generator):
"""处理文书的title."""
if '中华人民共和国' == line:
law_entity.is_foreign_ = True # 涉外
line, raw_line = next(data_generator)
while law_entity.court_ == "":
if line == "EOF":
break
words = seg_words(line)
for w in words:
if "法院" in w:
law_entity.court_ = w
break
if law_entity.court_ == "":
line, raw_line = next(data_generator)
continue
# and candidate_court in DICT["advanced_court"].content_:
if '高级' in law_entity.court_:
law_entity.court_level_ = '高级'
# and candidate_court in DICT["middle_court"].content_:
elif '中级' in law_entity.court_:
law_entity.court_level_ = '中级'
else: # candidate_court in DICT["primary_court"].content_:
law_entity.court_level_ = '基层'
match = re.search('.*[省市区]', law_entity.court_)
if match:
law_entity.area_ = match.group()
else:
tokens = seg_words_with_pos(line)
address = [token for token in tokens]
for add in address:
if add.flag == 'ns': # 标注为ns的表示是一个地址
law_entity.area_ = add.word
break
if "北京高级人民法院" in law_entity.court_:
law_entity.area_ = "北京" # hard code。北京市高级人民法院切不开
return next(data_generator)
def process_type(line, law_entity, data_generator):
"""
处理文书类型.
判决书,裁定书
案件性质
民事,行政
"""
line = re.sub(' *', '', line)
match = re.search('((民事|行政|刑事|执行)(附带民事)?)\w*(裁决书|判决书|裁定书|调解书)', line)
if match:
law_entity.nature_ = match.group(1)
law_entity.type_ = match.group(4)
else:
raise Exception('文件不规范,案件类型识别出错')
return next(data_generator)
def process_number(line, law_entity, data_generator):
"""处理案号,诉讼等级,立案年度."""
line = line.replace(":", "")
line = line.replace(":", "")
match = re.search(ANHAO, line)
if not match:
raise Exception('文件不规范,案号识别出错')
anhao = match.group()
anhao = clean_num(anhao)
law_entity.number_ = anhao
law_entity.year_to_accept_ = match.group(2)
anhao = re.sub('(.*).{1}', '', anhao)
level = "再审"
if re.search('.*(再|抗|监|提|重|申).*', anhao):
level = "再审"
elif "终" in line:
level = "二审"
elif "初" in line:
level = "一审"
law_entity.level_ = level
return next(data_generator)
def extract_relative_person(line, law_entity, data_generator,
relative_person_list):
"""抽取当事人信息."""
relative_role = ''
while True:
if line == "EOF":
break
line = line.replace("委托诉讼代理人", "委托代理人").strip()
line = line.replace("社团法人", "法定代表人").strip()
if re.search(r'审理终结|审查终结|审理完毕|审查完毕', line):
break
if len(line) > 100 and contains(["审理", "不服"], line):
break
if contains(["案由", "纠纷"], line):
break
if len(line) > 200:
break
if relative_role in PARTIES_BG or relative_role in PARTIES_DSR:
for ps in PARTIES_STOP:
if line.startswith(ps):
break
p = parsePerson(line, law_entity.level_)
for n in p.name_.split('、'):
person = copy.deepcopy(p)
person.name_ = n
if person.company_ == 'PRE' or startWith(LEGAL, line):
try:
person.company_ = relative_person_list[relative_role][-1].\
person_.name_
except Exception as e:
person.company_ = ''
if person.role_ in PARTIES:
relative_role = person.role_
person_relative = RelativePerson()
person_relative.person_ = person
# relative_person_list = {‘原告’:[]}
if person.role_ not in relative_person_list:
relative_person_list[person.role_] = []
relative_person_list[person.role_].append(person_relative)
elif person.role_ in RELATIVES:
if person.common_:
addCommonPersons(
relative_person_list, person, relative_role)
else:
relative_person_list[relative_role][-1].relative_.\
append(person)
line, raw_line = next(data_generator)
return line, raw_line
def addCommonPersons(relative_person_list, person, relative_role):
"""处理共同代理人的情况."""
common = person.common_
if relative_role:
if common == '共同':
for i in range(len(relative_person_list[relative_role])):
relative_person_list[relative_role][i].relative_.append(person)
elif isinstance(common, int):
for i in range(common):
if i <= len(relative_person_list[relative_role]):
relative_person_list[relative_role][-i].relative_.\
append(person)
def parsePerson(text, level):
"""
获取一个人的相关信息.
姓名,性别,单位,职务等
params:
text, 提取当事人的段落
level, 诉讼审级
"""
person = Person()
text = text.replace(":", "").strip()
text = text.replace(":", "").strip()
if len(text) == 0:
return person
# 提取曾用名
match = re.search('曾用名(\w+)', text)
if match:
person.usedname_ = match.group(1)
text = re.sub('曾用名(\w+)', '', text)
# 被告人xx,xx职务 tokens = ['被告人xx','xx职务']
tokens = segmentSentence(text)
hasrole = ''
for sentence_count in range(len(tokens)):
token = tokens[sentence_count]
words = seg_words_with_pos(token)
if len(words) == 0:
continue
if sentence_count == 0:
# 提取当事人身份
# hasrole = startWith(ROLES, token)
reg = '|'.join(ROLES)
# print('(%s)'%reg)
match = re.search('^(%s)[^、]+' % reg, token)
if match:
hasrole = match.group(1)
else:
if token in ROLES:
hasrole = token
if not hasrole:
regCommon = '.*(一|二|两|三|四|五|六|七|八|九|十|十一|十二|十三|十四|十五).*?共同.*?'
regCommon += '((委托)?(诉讼)?(代表人|代理人))'
match = re.search(regCommon, token)
if match:
hasrole = match.group(2)
person.common_ = CHINESE_NUM.get(match.group(1)) if \
CHINESE_NUM.get(match.group(1)) else '共同'
token = re.sub(regCommon, '', token)
if hasrole:
person.role_ = hasrole
if person.role_ in PARTIES_YG:
person.roleid_ = 1
person.identity_ = 1
elif person.role_ in PARTIES_BG:
person.roleid_ = 2
person.identity_ = 1
elif person.role_ in PARTIES_DSR:
person.roleid_ = 3
person.identity_ = 1
elif person.role_ in LEGAL:
person.identity_ = 2
elif person.role_ in PROXY:
person.identity_ = 3
elif person.role_ in PRINCIPAL:
person.identity_ = 4
elif person.role_ in OTHERS:
person.identity_ = 5
token = re.sub(hasrole, '', token)
# 根据审级提取当事人历审身份
match = re.search(r'[((](.*?)[))]', token)
if match:
preRole = match.group(1)
bl = pre_role(person, level, preRole)
if bl:
# print(token)
# token = re.sub(r'[((].*?[))]', '', token)
token = re.sub('[((]%s[))]' % preRole, '', token)
# 除去历审身份后,留下的是当事人名字
token = re.sub(r'^[((].*?[))]', '', token)
person.name_ = re.sub(r'[((].*?[))]$', '', token)
try:
tokens[0] = re.sub(person.name_, '', token)
except sre_constants.error as e:
token = re.sub(r'[((].*[))]?', '', token)
token = re.sub(r'[((]?.*[))]', '', token)
token = re.sub(r'[\[].*[\]]?', '', token)
person.name_ = token
try:
repName = person.name_.replace('*', '\*')
repName = repName.replace('+', '\+')
repName = repName.replace('[', '\[')
# print(repName)
tokens[0] = re.sub(repName, '', token)
except Exception as e:
raise e
# 提取当事人民族,出生时间,性别,职务,所属机构,注册地址,经营地址,住所,其它部分
if person.role_:
if not person.nation_:
match = re.search(NATION, token)
if match:
person.nation_ = match.group()
tokens[sentence_count] = ''
if not person.birth_:
reg = r'出?生?于?((\w{1,4})年(\w{1,4})月(\w{1,4})日)'
reg += '(生|出生|$)'
match = re.search(reg, token)
if match:
person.birth_ = match.group(1)
person.b_year_ = match.group(2)
person.b_month_ = match.group(3)
person.b_day_ = match.group(4)
tokens[sentence_count] = ''
if not person.sex_:
match = re.search(r'^[男女]$', token)
if match:
person.sex_ = match.group()
tokens[sentence_count] = ''
if not person.title_:
match = re.search(POSITION, token)
if match:
person.title_ = match.group()
# token = re.sub(person.title_, '', token)
token = re.sub('(.*)%s' % person.title_, r'\1', token)
if re.search('^系?该.*', token) and \
person.role_ not in PARTIES:
person.company_ = 'PRE'
else:
if token.startswith('均系'):
token = token[2:]
elif token.startswith('系'):
token = token[1:]
person.company_ = token
tokens[sentence_count] = ''
if not person.registered_address_:
match = re.search(r'^注册地?(.*)', token)
if match:
person.registered_address_ = match.group(1)
tokens[sentence_count] = ''
if not person.address_:
match = re.search(r'^住址?所?地?(.*)', token)
if match:
person.address_ = match.group(1)
tokens[sentence_count] = ''
if not person.business_address_:
match = re.search(r'^(经营|营业)(场所|地址|地)?(.*)', token)
if match:
person.business_address_ = match.group(3)
tokens[sentence_count] = ''
# 循环结束
if not person.name_ and len(tokens) > 1:
person.name_ = tokens[1]
tokens[1] = ''
person.other_ = ''.join(tokens)
fixname(person)
# print('-------%s'%person)
return person
def parse_cause(line, law_entity):
"""提取案由 (案由分段)(区分民事和行政)."""
if law_entity.cause_:
return
# 行政案件类的关键字
# keyAdministrative = ['纠纷', '一案', '向本院提起', '向本院提出', '向本院申请', '审理终结',
# '审查终结', '提起行政诉讼']
# 民事案件类的关键字
keyCivil = ['一案', '二案', '三案', '四案', '五案', '六案', '七案', '八案', '九案', '十案',
'系列案', '两案', '为由', '纠纷']
if law_entity.nature_ == '行政':
# for k in keyAdministrative:
# index = line.find(k)
# if index != -1:
# law_entity.cause_ = line[:index+len(k)].strip()
# break
law_entity.cause_ = line[:line.find('。')]
if law_entity.nature_ == '民事':
for k in keyCivil:
m = re.search(r'(.*?)%s' % k, line)
if m is not None:
subStr = m.group(1)
reg = r'(\[.*?\]|【.*?】|\(.*?\)|(.*?)|《.*?》|".*?"|'
reg += '“.*?”|\'.*?\'|‘.*?’)'
subStr = re.sub(reg, '', subStr)
array = subStr.split("。")
law_entity.cause_ = array[len(array)-1].strip()
break
def parse_content(
line, law_entity, data_generator, patentNumber, trademarkNumber):
"""正文分析."""
parsed_summary = False
judge_pattern_compiler = re.compile(JUDGE_PATTERN) # 判决结果正则
while True:
if line == "EOF":
break
# 抽取专利号
match = re.search(PATENT_NUMBER, line)
if match:
law_entity.patent_number_.add(match.group())
# 抽取专利复审委的文书号
law_entity.review_ = law_entity.review_.union(patent_review(line))
# 抽取商标号
match = re.search(TRADEMARK_NUMBER, line)
trademarkTypeDic = {'诉争商标': 1, '引证商标': 2}
if match:
trademarkType = match.group(1)
if not trademarkType:
reg0 = match.group()
if '诉争商标' in reg0:
trademarkType = '诉争商标'
elif '引证商标' in reg0:
trademarkType = '引证商标'
law_entity.trademark_number_.add(
(match.group(2),
trademarkTypeDic.get(trademarkType)))
# 抽取商标评审委的文书号
law_entity.trademark_review_ = law_entity.trademark_review_.\
union(trademark_review(line))
if not parsed_summary and contains(SUMMARY, line):
parsed_summary = True
parse_summary(line, law_entity)
# 判决结果
match = judge_pattern_compiler.search(line)
if match:
line, raw_line = parse_judge_result(
line, law_entity, data_generator)
if line.startswith("审判长") or line.startswith("审判员")\
or line.startswith("代理审判长") or line.startswith("代理审判员"):
line, raw_line = process_footer(line, law_entity, data_generator)
break
line, raw_line = next(data_generator)
return line, raw_line
def parse_summary(line, law_entity):
"""处理法律文本正文第一段,摘要段."""
# trademark_id_pattern = "商评字(\(|\[|(|【|〔)(\d+)(\)|\]|)|】|〕)第(\d+)号"
trademark_id_pattern = TRADEMARK_ID
# patent_id_pattern = "(专利复审委员会|知识产权局)(.+)第(\d+)号专利"
patent_id_pattern = PATENT_ID
sentences = segmentSentence(line)
for sentence in sentences:
if law_entity.level_ == "二审" or law_entity.level_ == "再审"\
or law_entity.level_ == "三审":
# matches = rexMatchAll2('([\(\[(【〔]?\d+[\)\]】〕)]?)(.{5,30})(\d+)\
# (号)(之一)?', sentence)
matches, law_types = rexMatchAllYiShenAnHao(YISHENANHAO, sentence)
if matches:
# print 'Sentence2 : %s' % sentence
# print ",".join(matches)
law_entity.pre_judge_.extend(matches)
law_entity.pre_law_type_.extend(law_types)
if law_entity.nature_ == "行政":
pattern = re.compile(trademark_id_pattern)
match = pattern.findall(sentence)
if match and match[0][0] != "" and match[0][1] != "" \
and match[0][2] != "" and match[0][3] != "":
trademark_id = "商评字%s%s%s第%s号" % (
match[0][0], match[0][1], match[0][2], match[0][3])
law_entity.trademark_id_ = trademark_id
pattern = re.compile(patent_id_pattern)
match = pattern.findall(sentence)
if match and match[0][2] != "":
patent_id = "第%s号" % (match[0][2])
law_entity.patent_id_ = patent_id
# 匹配受理时间和开庭时间
reg = r'([1-2]\d{3}年(1[0-2]|[1-9])月([1-9]|[1-3][0-9])日)\S*'
reg += '受理'
match = re.search(reg, sentence)
if match:
receptionTime = match.group(1)
law_entity.reception_time_ = receptionTime
reg = r'([1-2]\d{3}年(1[0-2]|[1-9])月([1-9]|[1-3][0-9])日)\S*'
reg += '开庭'
match = re.search(reg, sentence)
if match:
courtTime = match.group(1)
law_entity.court_time_ = courtTime
def parse_judge_result(line, law_entity, data_generator):
"""抽取判决结果."""
# judge_end_indicator = ["提起上诉", "不服", "受理费"]
judge_end_indicator = END_JUDGE
judge_result = ""
while True:
if line == "EOF":
break
sentences = segmentSentence(line)
for sentence in sentences:
if contains(judge_end_indicator, sentence):
break
judge_result += sentence
line, raw_line = next(data_generator)
if not start_with_number(line):
break
judge_result += '\n'
law_entity.judge_results_.append(judge_result)
return line, raw_line
def process_footer(line, law_entity, data_generator):
"""处理法律文本的落款."""
while True:
if line == "EOF":
break
if "本件与原本核对无异" == line:
continue
# matches = rexMatch('(.+)年(.+)月(.+)日', line)
matches = rexMatch(FOOTER_YEAR_MONTH_DATE, line)
if matches:
law_entity.judge_time_ = matches.group(0)
line, raw_line = next(data_generator)
continue
segment_words = seg_words(line)
# if len(segment_words) < 2:
# break
person = Person()
for sw in segment_words:
if sw in FOOTER_ROLE:
person.role_ = sw
elif person.role_:
person.name_ += sw
if person.name_:
if "审判长" in person.role_:
law_entity.presiding_judge_.append(person)
elif "审判员" in person.role_:
law_entity.judges_.append(person)
elif "人民陪审员" in person.role_:
law_entity.juror_.append(person)
elif "法官助理" in person.role_:
law_entity.judge_assistant_.append(person)
elif "书记员" in person.role_:
law_entity.clerk_.append(person)
line, raw_line = next(data_generator)
return line, raw_line
def pre_role(person, level, preRole):
"""历审身份."""
if level == "一审":
if preRole.find('反诉原告') >= 0:
person.firrole_ = '反诉原告'
elif preRole.find('反诉被告') >= 0:
person.firrole_ = '反诉被告'
elif preRole.find('反诉第三人') >= 0:
person.firrole_ = '反诉第三人'
if person.firrole_:
return True
elif level == "二审":
if preRole.find('原审原告') >= 0 or preRole.find('一审原告') >= 0:
person.firrole_ = '原告'
elif preRole.find('原审被告') >= 0 or preRole.find('一审被告') >= 0:
person.firrole_ = '被告'
elif preRole.find('原审第三人') >= 0 or preRole.find('一审第三人') >= 0:
person.firrole_ = '第三人'
if person.firrole_:
return True
elif level == "再审":
if preRole.find('一审原告') >= 0:
person.firrole_ = '原告'
elif preRole.find('一审被告') >= 0:
person.firrole_ = '被告'
elif preRole.find('一审第三人') >= 0:
person.firrole_ = '第三人'
if preRole.find('原审原告') >= 0 or preRole.find('二审原告') >= 0 \
or preRole.find('二审上诉人') >= 0:
person.secrole_ = '上诉人'
elif preRole.find('原审被告') >= 0 or preRole.find('二审被告') >= 0 \
or preRole.find('二审被上诉人') >= 0:
person.secrole_ = '被上诉人'
elif preRole.find('原审第三人') >= 0 or preRole.find('二审第三人') >= 0:
person.secrole_ = '第三人'
if person.firrole_ or person.secrole_:
return True
return False | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/main_parse.py | main_parse.py |
import re
import os
def get_law(line):
"""针对单个文书的法条提取,整合进入库的方法."""
ABSPATH = os.path.abspath(__file__)
ABSPATH = os.path.dirname(ABSPATH) + "/"
regList = get_txt(ABSPATH + 'config/article_standardization.txt')
reList = re.findall(r'((?:\d{4}年)?《.*?》[^。;《]*)', line)
lawArticle = set()
for s in reList:
resub = re.search(r'(?:.*《.*?》.*[条款项目]*)', s) # 省去了编章节
if not resub:
continue
article = resub.group(0)
lawTxt = re.search(r"《.*?》", article).group(0)
lawResp = re.search(r'.*《.*?》', article)
law = lawResp.group(0)
articleResp = re.findall(r'第\w*?条', article)
lawStandar = standardlized(regList, lawTxt)
if articleResp and lawStandar:
for item in articleResp:
itemSub = re.search(r'%s[^条]*[款项目]' % item, article)
if itemSub:
lawStandar = re.sub(
r'《.*?》', '《' + lawStandar + '》',
law + itemSub.group(0))
lawArticle.add(lawStandar)
else:
lawStandar = re.sub(r'《.*?》', '《' + lawStandar + '》', law)
lawArticle.add(lawStandar)
elif lawStandar:
lawArticle.add(lawStandar)
return lawArticle
def standardlized(regList, lawTxt):
"""."""
for regTuple in regList:
reg = regTuple[0]
match = re.search(r'%s' % reg, lawTxt)
if match:
lawStandar = regTuple[1]
return lawStandar
def parse_content(id, content):
"""提取文本数据."""
sContent = re.split(r'\s+本院认为', content)
returnList = []
for i in range(len(sContent)):
dic = {}
# 编章节条款项目
reList = re.findall(r'((?:\d{4}年)?《.*?》[^。;《]*)', sContent[i])
resultList = []
for s in reList:
resub = re.search(r'(?:.*[编章节条款项目]+)', s)
if resub is None:
resub = re.search(r'.*《.*》', s)
if resub is not None:
resultList.append(resub.group(0))
if resultList is not None and len(resultList) > 0:
dic['id'] = id
dic['type'] = i
dic['articles'] = resultList
returnList.append(dic)
return returnList
def parse_record(result):
"""解析结果集."""
parseResults = []
for record in result:
id = record[0]
content = record[1]
if content is not None and content != '':
returnList = parse_content(id, content)
parseResults.extend(returnList)
return parseResults
def parseArticle(article):
"""法条分割。例如:《?》第x条,第y条-->《?》第x条 《?》第y条."""
lawResp = re.search(r'《.*?》', article)
if lawResp is None:
return None, None
law = lawResp.group(0)
articleResp = re.findall(r'第\w*?条', article)
if articleResp is None:
return None, None
returnList = []
for item in articleResp:
itemSub = re.search(r'%s[^条]*[款项目]' % item, article)
if itemSub is None:
returnList.append(item)
else:
returnList.append(itemSub.group(0))
return law, returnList
def get_txt(fileName):
"""."""
txt = open(fileName)
cList = []
for line in txt:
cArray = line.split("——>")
cTuple = (cArray[0].strip(), cArray[1].strip())
cList.append(cTuple)
return cList
def standardizedArticle(article1, article2, regList):
"""标准化法条."""
for regTuple in regList:
reg = regTuple[0]
match = re.search(r'%s' % reg, article2)
if match is not None:
article3 = regTuple[1]
article3 = re.sub(r'《.*?》', '《' + article3 + '》', article1)
return article3
return None
def extractLaw(article):
"""提取书名号内的内容."""
law = ''
if article is not None:
law = re.search(r"《.*?》", article).group(0)
return law
def extract_detail(content):
"""."""
law = re.search('《(.*?)》', content)
if law:
law = law.group(1)
article = re.search('》\)?)?第?(.*?)条', content)
if article:
article = article.group(1)
content = re.sub('》\)?)?第?(.*?)条', '》', content)
paragraph = re.search('》第?(.*?)款', content)
if paragraph:
paragraph = paragraph.group(1)
content = re.sub('》第?(.*?)款', '》', content)
item = re.search('》第?(.*?)项', content)
if item:
item = item.group(1)
return law, converter(article), converter(paragraph), converter(item)
else:
return None, None, None, None
def converter(number):
"""."""
if not number:
return ''
number = re.sub(r'\(|(|\)|)', '', number)
if len(number) < 1:
return ''
if number.isdigit():
return number
dic = {
'零': '0', '一': '1', '二': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8', '九': '9',
'十': '10', '百': '100'}
dnum = ''
if number[0] == '十':
number = '一' + number
if number[len(number) - 1] == '十':
number = number + '零'
if number[len(number) - 1] == '百':
number = number + '零' + '零'
for d in number:
if d not in dic.keys():
return None
if d != '百' and d != '十':
dnum = dnum + dic.get(d)
return dnum | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/jokers/articlesOfLaw.py | articlesOfLaw.py |
import pymysql
import re
import os
import sys
def getTxt(path):
"""民事案由标准化文件."""
txt = open(path)
cList = []
for line in txt:
cArray = line.split("——>")
cTuple = (cArray[0].strip(), cArray[1].strip())
cList.append(cTuple)
return cList
def truncate(content):
"""
民事文书.
对原始文书截断
(一案|二案|三案|四案|五案|六案|七案|八案|九案|十案|纠纷)从前到后优先级查找
去掉之前的中文字符集【】,[],(),(),《》,“”,"",'‘,’'之间的内容
截取一案之前的整句
"""
keywords = ['一案', '二案', '三案', '四案', '五案', '六案', '七案', '八案', '九案', '十案',
'系列案', '两案', '为由', '纠纷']
m = None
match = re.search('案由[::](.*)', content)
if match:
return match.group(1).strip()
for key in keywords:
m = re.search(r'(.*?)%s' % key, content)
if m is not None:
break
if m is None:
return ''
subStr = m.group(1)
if key == '纠纷':
subStr = subStr + '纠纷'
subStr = re.sub(
r'(\[.*?\]|【.*?】|\(.*?\)|(.*?)|《.*?》|".*?"|“.*?”|\'.*?\'|‘.*?’)',
'', subStr)
array = subStr.split("。")
return array[len(array) - 1].strip()
def clean(coastr, results):
"""."""
for par in results:
if par:
par = par.replace('(', '\(')
par = par.replace(')', '\)')
par = par.replace('*', '\*')
par = par.replace('+', '\+')
par = par.replace('[', '\[')
par = par.replace(']', '\]')
try:
coastr = re.sub('.*%s' % par, '', coastr)
except Exception as e:
print('par:%s,coastr:%s' % (par, coastr))
raise
return coastr
def standardized(cause):
"""
民事文书.
标准化
t_add_case_anyou表,anyou2字段是经过清洗后的字段,使用该字段匹配关键词确定案由
return:['案由1','案由2'...]
"""
ABSPATH = os.path.dirname(os.path.abspath(sys.argv[0]))
regList = getTxt(ABSPATH + "/config/cause_standardization.txt")
returnList = []
if cause is None or cause == '':
return returnList
arr = []
if re.search('著作权(权属)?(、|和|及)(权属)?侵权(纠纷)?', cause):
arr.append(cause)
else:
arr = re.split('、|和|及', cause)
for sa in arr:
anyou = None
for regTuple in regList:
reg = regTuple[0]
match = re.search(r'%s' % reg, sa)
if match is not None:
anyou = regTuple[1]
break
if anyou is not None:
returnList.append(anyou)
if len(returnList) <= 1:
return ''.join(returnList)
return returnList
def truncate_administrative(line):
"""行政文书 案由截断."""
key = ['纠纷', '一案', '向本院提起', '向本院提出', '向本院申请', '审理终结', '审查终结', '提起行政诉讼']
locate = ['商标([\u4e00-\u9fa5]+)行政', '商标([\u4e00-\u9fa5]+)裁定',
'商标([\u4e00-\u9fa5]+)决定', '专利权?([\u4e00-\u9fa5]+)行政',
'专利权?([\u4e00-\u9fa5]+)裁定',
'专利权?([\u4e00-\u9fa5]+)决定']
if not line:
return ''
for k in key:
index = line.find(k)
if index != -1:
cause = line[:index + len(k)]
cause = cause.strip()
for l in locate:
match = re.search(l, cause)
if match:
return match.group(1)
return cause
return ''
reg_administrative = {'驳回': '驳回复审', '无效': '无效宣告', '异议': '异议复审', '争议': '争议',
'撤销复审': '撤销复审',
'专利(权)?(申请)?(复审)?(行政)?(确权)?(纠纷|诉讼)': '其他专利行政授权确权纠纷',
'(其他|因)?商标(行政)?纠纷': '其他商标行政授权确权纠纷',
'商标.*转让(申请)?': '其他商标行政授权确权纠纷',
'行政处罚': '行政处罚', '商标.*核准转让': '其他商标授权确权纠纷',
'商标申请不予核准': '不予注册复审',
'商标撤销行政纠纷': '撤销复审', '商标不予注册复审行政纠纷': '不予注册复审'}
def standardized_administrative(cause):
"""行政文书 案由标准化."""
if not cause:
return ''
for r in reg_administrative:
if re.search(r, cause):
return reg_administrative.get(r)
return ''
def truncate_crime(line):
"""刑事文书,案由截断."""
reg = '.*(一案|审查终结|审理终结|审理完毕|组成合议庭|本院受理后|向本院提起公诉)'
match = re.search(reg, line)
if match:
return match.group()
return ''
def standardized_crime(cause):
"""刑事文书,案由标准化."""
ABSPATH = os.path.dirname(os.path.abspath(sys.argv[0]))
regList = getTxt(ABSPATH + "/config/cause_crime_standardization.txt")
for regTuple in regList:
reg = regTuple[0]
match = re.search(r'%s' % reg, cause)
if match is not None:
return regTuple[1]
return ''
def truncate_main(nature, line):
"""."""
cause = ''
if nature == '民事' or nature == '行政':
cause = truncate(line)
# if nature == '行政':
# cause = truncate_administrative(line)
if nature == '刑事':
cause = truncate_crime(line)
return cause
def standardized_main(nature, cause):
"""."""
anyou = ''
if nature == '民事':
anyou = standardized(cause)
if nature == '行政':
anyou = standardized_administrative(cause)
if nature == '刑事':
anyou = standardized_crime(cause)
return anyou
def administrative_cause():
"""行政案件,根据当事人区分商标和专利的无效宣告、驳回复审区分,需要整合进案由标准化的代码."""
sql = '''SELECT id, caseid, anyou from ip_infos.t_case_coa_relations
where id>%s and (anyou = '无效宣告' or anyou = '驳回复审')
order by id limit 1000'''
sqlParties = '''SELECT name from ip_infos.t_case_parties
where identity = 1 and caseid = %s'''
sqlUpdate = '''UPDATE ip_infos.t_case_coa_relations set anyou = %s
where id = %s'''
conn = pymysql.connect(host="rm-2ze0ek7c57w65y4t6o.mysql.rds.aliyuncs.com",
user="ip_infos_root", passwd="zcb!1511",
charset="utf8")
cur = conn.cursor()
pageNum = 0
while True:
print(pageNum)
cur.execute(sql, pageNum)
results = cur.fetchall()
if len(results) == 0:
break
for record in results:
id = record[0]
pageNum = id
caseid = record[1]
anyou = record[2]
cur.execute(sqlParties, caseid)
print(id)
for r in cur.fetchall():
if re.search('专利局|国家知识产权局|知识产权局|专利评审委员会|专利复审委员会', r[0]):
cur.execute(sqlUpdate, (anyou + "(专利)", id))
print('%s,%s' % (caseid, anyou + "(专利)"))
break
elif re.search('商标局|商标复审委员会|商标评审委员会', r[0]):
cur.execute(sqlUpdate, (anyou + "(商标)", id))
print('%s,%s' % (caseid, anyou + "(商标)"))
break
conn.commit()
if __name__ == '__main__':
administrative_cause() | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/jokers/causeOfLaw.py | causeOfLaw.py |
import re
class Judgement(object):
"""docstring for ."""
def __init__(self):
"""."""
self.ANHAO = "(撤销|维持|变更).*([\(\[(【〔(]\d{4}[\)\]】〕)]"
self.ANHAO += "\w+(初|终)\w+[\d、\-—]+号).*(判决|裁定)"
self.regRuleFirCivil = []
self.regRuleFirAdmini = []
self.regRuleFirCriminal = []
self.regRuleSecCivil = []
self.regRuleSecAdmini = []
self.regRuleSecCriminal = []
self.regRuleReCivil = []
self.regRuleReAdmini = []
self.regRuleReCriminal = []
self.regJudgeFirCivil = [] # 民事一审判定书
self.regJudgeFirAdmini = [] # 行政一审判定书
self.regJudgeFirCriminal = [] # 刑事一审判定书
self.regJudgeSecCivil = [] # 民事二审判定书
self.regJudgeSecAdmini = [] # 行政二审判定书
self.regJudgeSecCriminal = [] # 刑事二审判定书
self.regJudgeReCivil = [] # 民事再审判定书
self.regJudgeReAdmini = [] # 行政再审判定书
self.regJudgeReCriminal = [] # 刑事再审判定书
def generate_judgement_cd(self, law_entity, level, item):
"""裁定书."""
offen, denfen = self.parse_rule(law_entity, item, level)
return offen, denfen
def generate_judgement_pj(self, law_entity, level, results):
"""判决书."""
offen, denfen = self.parse_judge(law_entity, results, level)
return offen, denfen
def parse_rule(self, law_entity, line, resulttype):
"""裁定书的判决结果提取."""
offen = 0
denfen = 0
regRule = []
if law_entity.level_ == '一审' and law_entity.nature_ == '民事':
regRule = self.regRuleFirCivil
elif law_entity.level_ == '一审' and law_entity.nature_ == '行政':
regRule = self.regRuleFirAdmini
elif law_entity.level_ == '一审' and law_entity.nature_ == '刑事':
regRule = self.regRuleFirCriminal
elif law_entity.level_ == '二审' and law_entity.nature_ == '民事':
regRule = self.regRuleSecCivil
elif law_entity.level_ == '二审' and law_entity.nature_ == '行政':
regRule = self.regRuleSecAdmini
elif law_entity.level_ == '二审' and law_entity.nature_ == '刑事':
regRule = self.regRuleSecCriminal
elif law_entity.level_ == '再审' and law_entity.nature_ == '民事':
regRule = self.regRuleReCivil
elif law_entity.level_ == '再审' and law_entity.nature_ == '行政':
regRule = self.regRuleReAdmini
elif law_entity.level_ == '再审' and law_entity.nature_ == '刑事':
regRule = self.regRuleReCriminal
if line.find('反诉') >= 0 and law_entity.nature_ == '民事'\
and law_entity.level_ == '一审':
return -1, -1
for reg in regRule:
if reg[3] == resulttype and re.search(reg[0], line):
offen = reg[1]
denfen = reg[2]
break
return offen, denfen
def parse_judge(self, law_entity, results, resulttype):
"""判决书的判决结果提取."""
offen = 0
denfen = 0
regList = []
if law_entity.level_ == '一审' and law_entity.nature_ == '民事':
regList = self.regJudgeFirCivil
elif law_entity.level_ == '一审' and law_entity.nature_ == '行政':
regList = self.regJudgeFirAdmini
elif law_entity.level_ == '一审' and law_entity.nature_ == '刑事':
regList = self.regJudgeFirCriminal
elif law_entity.level_ == '二审' and law_entity.nature_ == '民事':
regList = self.regJudgeSecCivil
elif law_entity.level_ == '二审' and law_entity.nature_ == '行政':
regList = self.regJudgeSecAdmini
elif law_entity.level_ == '二审' and law_entity.nature_ == '刑事':
regList = self.regJudgeSecCriminal
elif law_entity.level_ == '再审' and law_entity.nature_ == '民事':
regList = self.regJudgeReCivil
elif law_entity.level_ == '再审' and law_entity.nature_ == '行政':
regList = self.regJudgeReAdmini
elif law_entity.level_ == '再审' and law_entity.nature_ == '刑事':
regList = self.regJudgeReCriminal
for line in results:
if line.find('反诉') >= 0 and law_entity.nature_ == '民事'\
and law_entity.level_ == '一审':
return -1, -1
for reg in regList:
if reg[3] == resulttype and re.search(reg[0], line):
if reg[1]:
offen = 1
if reg[2]:
denfen = 1
break
return offen, denfen
def parse_pre(self, judge, txt):
"""提取历审案号和判决结果以及文书类型."""
if judge.trial_level_ == '二审':
match = re.search(self.ANHAO, txt)
if match:
judge.firjudgement_ = match.group(1)
judge.fircasenum_ = match.group(2)
judge.firitype_ = match.group(4)
elif judge.trial_level_ == '再审':
print(re.findall(self.ANHAO, txt))
for match in re.findall(self.ANHAO, txt):
if "终" in match[2]:
judge.secjudgement_ = match[0]
judge.seccasenum_ = match[1]
judge.secitype_ = match[3]
elif "初" in match[2]:
judge.firjudgement_ = match[0]
judge.fircasenum_ = match[1]
judge.firitype_ = match[3]
def parse_administrative(self, judge, txt):
"""解析行政文书号和判决结果."""
match = re.search(
'[0-9]*号?.*((重新)(作出)?.*(争议|复审|无效)?.*(决定|裁定|申请))', txt)
if match:
judge.administrative_judgement_ = match.group(2)
judge.administrative_ = re.sub(match.group(2), '', match.group(1))
return
match = re.search(
'(重新|维持|撤销|变更).*?((商评字|[\[〔[][0-9]{4}[]〕\]]|第?[0-9]+号?)\
.*(争议|无效|无效宣告|驳回复审|复审|审查|争议).*(决定|裁定|申请|裁定)书?》?)', txt)
if match:
judge.administrative_judgement_ = match.group(1)
judge.administrative_ = match.group(2)
return
match = re.search(
'([0-9]*\.[0-9xX].*(无效|争议|复审).*(决定|裁定|申请)).*((重新)(作出)?)', txt)
if match:
judge.administrative_judgement_ = match.group(5)
judge.administrative_ = match.group(1)
return | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/jokers/judgement_extract.py | judgement_extract.py |
import re
import time
import jieba
class Address(object):
"""docstring for Address."""
def __init__(self):
"""."""
self.gcountry = []
self.gcountryAdd = []
self.gcountryMap = dict()
self.gprovince = ""
self.gcity = ""
self.gdistrict = ""
self.gdistrictMap = dict()
self.gcityMap1 = dict()
self.gcityMap2 = dict()
def get_address(
self, id, business_address, address, registered_address):
"""."""
country = ''
province = ''
city = ''
district = ''
detail = ''
ctime = time.strftime('%Y-%m-%d %H:%M:%S')
params = []
if business_address:
country, province, city, district, detail = self.parse_address(
business_address)
params.append((id, address, 3, country, province,
city, district, detail, ctime))
# print('%s,%s,%s,%s' % (ctime, id, country, province))
if address:
country, province, city, district, detail = self.parse_address(
address)
params.append((id, address, 2, country, province,
city, district, detail, ctime))
# print('%s,%s,%s,%s' % (ctime, id, country, province))
if registered_address:
country, province, city, district, detail = self.parse_address(
registered_address)
params.append((id, address, 1, country, province,
city, district, detail, ctime))
# print('%s,%s,%s,%s' % (ctime, id, country, province))
return country, params
def parse_address(self, address):
"""
地址解析.
return:country,province,city,district,detail
"""
country = ''
province = ''
city = ''
district = ''
detail = ''
addCut = jieba.lcut(address)
try:
if addCut[0] in self.gcountry:
country = self.gcountryMap.get(addCut[0])
elif addCut[0] in self.gcountryAdd:
country = addCut[0]
if country:
addCut.remove(addCut[0])
if country and country != '中国':
return (country, '', '', '', ''.join(addCut))
# 解析省份,直辖市为特殊情况解析出市
if not province:
match = re.search(self.gprovince, addCut[0])
if match:
country = '中国'
province = match.group(1)
if match.group(1) in ['北京', '天津', '上海', '重庆']:
city = match.group(1)
addCut.remove(addCut[0])
if not city:
match = re.search(self.gcity, addCut[0])
if match:
city = match.group(1)
if not province:
id = self.gcityMap2.get(match.group(1))
province = self.gcityMap1.get(int(id / 10000) * 10000)
country = "中国"
addCut.remove(addCut[0])
if city:
match = re.search(self.gdistrict, addCut[0])
if match:
district = match.group()
addCut.remove(addCut[0])
elif province and not city and not district:
match = re.findall(self.gdistrict, addCut[0])
provinceNum = self.gcityMap2.get(province)
if provinceNum:
provinceNum = int(provinceNum / 10000)
for m in match:
districtNum = self.gdistrictMap.get(m)
if provinceNum == int(districtNum / 10000):
district = m
city = self.gcityMap1.get(
int(districtNum / 100) * 100)
addCut.remove(addCut[0])
detail = ''.join(addCut)
except IndexError as e:
print(e)
return (country, province, city, district, detail) | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/jokers/extractAddress.py | extractAddress.py |
from .First_sub import MainCut1
from .Second_sub import MainCut2
from .Appealmoney1 import MainLawsuit1
from .Jmoney1 import Main_Judge1
import re
## 一审的诉讼请求,数据
###数据
def get_first_asuit(rawdata):
x1=re.search("(诉讼请求|请求[^。]{,6}判令|请求判决[:: ]|诉称[::]|诉讼请求为)[^\n]*",rawdata,re.S)
if x1:
x1=x1.group()
else:
x1=""
return x1
## 最后判决结果
def Sub_Judge1(content):
x=content.split("\n")
st=""
txt=""
flag=False
for j in x:
txt+=j+"\n"
x1=re.search("(判如下|判令如下|判决:|判决如下|综上,依据《中华人民共和|规定,如下:|(综上所述|综上).{,1}.*规定:)",j)
fx=re.search("判决[^。,;]*(附图|附页):",j)
###
if x1 and not fx:
flag=True
st=j+"\n"
else:
st+=j+"\n"
x1=re.search("审判长|审判员|书记员|速录员|陪审员|速记员|法官助理",st)
if x1:
pre_txt=txt.replace(st,"")
else:
flag=False
pre_txt=""
##需要存储于记录一下入库的错误类型
return pre_txt,st,flag
##shu
###一审判决结果
def get_firstjudge(rawdata):
x1=re.search("(判决(如下|)[:::]{1}|综上,依据《中华人民共和)[^\n]*",rawdata,re.S)
if x1:
subjudge=x1.group()
else:
subjudge=""
return subjudge
def Get_Appeal_Judge_Money(entity,rawdata):
if entity.nature_=="刑事":
pre_txt,st,flag=Sub_Judge1(rawdata)
l=[(1,'3','3')]*11
l[8]=(1,'3',st)
entity.Judement=l[8][2]
Jmoney=Main_Judge1(l)
entity.judge_money=Jmoney
return entity
if entity.nature_!="民事":
return entity
if entity.level_=="一审":
l=MainCut1((1,rawdata))
if len(l)==11:
money=MainLawsuit1(l)
if money=="不祥":
money="-1"
entity.appeal_money=money
Jmoney=Main_Judge1(l)
if Jmoney=="不祥":
Jmoney="-1"
entity.judge_money=Jmoney
entity.accuser=l[2][2]+l[3][2]
entity.Judement=l[8][2]
else:
l=[(1,'2','')]*11
asuit=get_first_asuit(rawdata)
l[3]=(1,3,asuit)
pre_txt,st,flag=Sub_Judge1(rawdata)
if flag:
judgcontent=st
l[8]=(1,"2",judgcontent)
Jmoney=Main_Judge1(l)
else:
Jmoney="不详"
entity.judge_money=Jmoney
elif entity.level_ =="二审":
l,flag=MainCut2((1,rawdata))
if len(l)==17:
money=MainLawsuit2(l)
Jmoney=Main_Judgment2(l)
if Jmoney=="不详":
Jmoney="-1"
entity.judge_money=Jmoney
entity.appeal_money=money
entity.Judement=l[8][1]+l[14][1]
entity.accuser=''
else:
l=[(1,'2','')]*17
pre_txt,st,flag=Sub_Judge1(rawdata)
first_judge=get_firstjudge(rawdata)
if flag:
judgcontent=st
l[14]=(1,judgcontent,'8')
l[8]=(1,first_judge,"")
Jmoney=Main_Judge1(l)
else:
Jmoney="不详"
entity.judge_money=Jmoney
return entity | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/ronghui/Money/AllMoney.py | AllMoney.py |
import re
"""
一审判决金额
"""
def Main_Judge1(i):
def cut_judge1_Money(x):
x1=re.search("(.*?)((加倍支付迟延履行期间的债务利息|于本判决生效之日起十日内付清,逾期不付,|如当事人未按本判决)|(\n如[果]{,1}被告)|(案件受理)|(案受理费)|(案件受理[0-9]+)|(案件保全费)|(公告费)|(财产保全费)|(本诉诉讼费)|(\n[^。,]{,3}诉讼费用)|(本案本诉部分受理费)|(受理费为)|(本案案件受理[0-9])|(本案征收受理费)|(.案受理费)|(本案诉讼受理费)|(各案受理费人民币)|(受理费[0-9]+\.?[0-9]*元)|(本案.{1,6}费})|([\r\n\t]诉讼费)|(本案一审受理费)|(本案应交纳诉讼费)|(本案本诉受理费)|(本案本诉诉讼费)|(本案的受理费)|(本案受理费)|(案件受理费)|(案件受理[0-9]{1})|([两三四五六七八九]案受理费)|(本案诉讼费)|(案件诉讼费)|(本案案件费)|(如不服从判决)|(受理费人民币)|(如不服本判决)|(本案[一二]审诉讼费)|(一审诉讼费)|(如(果)?未按(本)?判决指定的期间履行给付(金钱)?义务)|(如被告逾期不履行本判决所确定的金钱给付义务)|(审判长)|(审判员))",x,flags = re.S)
if x1:
x=x1.group(1)
else:
x=x
return x
"""
处理赘余信息
多是利息
"""
def cn2dig(cn):
cn = cn.group() ## 若使用re.sub调用时则需要启用此条语句。
if set(cn[0]).issubset(set([u'零',u'0',u'〇',u'.',u','] + list(u'兆亿億萬万仟千佰百角分里厘毫元圆'))):
cn = cn[1:]
CN_NUM = {u'〇' : 0,u'一' : 1,u'二' : 2,u'三' : 3,u'四' : 4,u'五' : 5,u'六' : 6,u'七' : 7,u'八' : 8,u'九' : 9,
u'零' : 0,u'壹' : 1,u'贰' : 2,u'叁' : 3,u'肆' : 4,u'伍' : 5,u'陆' : 6,u'柒' : 7,u'捌' : 8,u'玖' : 9,
u'貮' : 2,u'两' : 2,
}
CN_UNIT = {u'毫' : 0.0001,u'厘' : 0.001,u'分' : 0.01,u'角' : 0.1,u'元' : 1,u'圆' : 1,
u'十' : 10,u'拾' : 10,u'百' : 100,u'佰' : 100,u'千' : 1000,u'仟' : 1000,
u'万' : 10000,u'萬' : 10000,u'亿' : 100000000,u'億' : 100000000,u'兆' : 1000000000000,u'美':7,
}
CN_DIGITS = [str(i) for i in range(10)] # 用来判断单纯数字,当然包括小数在内 可以直接返回,如123.00元,123元
CN_DIGITS.append(u"元")
CN_DIGITS.append(u".")
CN_DIGITS_C = [str(i) for i in range(10)] # 用科学计数法来存储的数字,比如12,765,89元
CN_DIGITS_C.extend([u",",u"元",u"."])
CN_DIG_CHAR = [str(i) for i in range(10)] # 处理数字加大小写的数字 比如:123万元,1亿元,2.3亿元
CN_DIG_CHAR.extend([u"元",u".",u",",u"万",u"千",u"百",u"十",u"十万",u"百万",u"千万",u"亿",u"兆",u'美'])
CN_DIG_CHAR_DICT = {u"万":10000,u"千":1000,u"百":100,u"十":10,u"十万":100000,u"百万":1000000,u"千万":10000000,u"亿":100000000,u'十亿':1000000000,u'百亿':10000000000,u'千亿':10000000000,u'万亿':1000000000000,u"兆":1000000000000,u'美':7}
CN_ALL = list(CN_NUM.keys()) + list(CN_UNIT.keys()) # 用大写小写存储的数字
if set(cn).issubset(set(CN_ALL)):
lcn = list(cn) # 将cn拆分为列表
unit = 0 #当前的单位
ldig = []#临时数组
while lcn:
cndig = lcn.pop() # 从cn最后一个开始
if cndig in CN_UNIT: # 对分离出的进行单位判断
unit = CN_UNIT.get(cndig)
if unit==10000:
ldig.append('w') #标示万位
unit = 1
elif unit==100000000:
ldig.append('y') #标示亿位
unit = 1
elif unit==1000000000000:#标示兆位
ldig.append('z')
unit = 1
elif unit==7:
ldig.append('d')
unit = 1
else: ## 否则进行数字判断
dig = CN_NUM.get(cndig)
if unit: # 计算每一个单位的数 比如 四百部分:4*100
dig = dig*unit
unit = 0
ldig.append(dig) # ldig 9 30 400 unit 10
if unit==10: ## 单独处理10-19的数字因为 此时十作为的是数字而不是单位
ldig.append(10)
ret = 0
tmp = 0
while ldig: # 对ldig中各部分数字进行叠加
x = ldig.pop()
if x=='w': # 单独对万进行处理,因为前面不可以直接相乘,下面同理
tmp *= 10000
ret += tmp
tmp=0
elif x=='y':
tmp *= 100000000
ret += tmp
tmp=0
elif x=='z':
tmp *= 1000000000000
ret += tmp
tmp=0
elif x=='d':
tmp*=7
ret += tmp
tmp=0
else:
tmp += x
ret += tmp
return str(ret)+u'元'
elif set(cn).issubset(set(CN_DIGITS)): ## 这种情况相当于为全为数字类型,可以直接返回
return cn
elif set(cn).issubset(set(CN_DIGITS_C)):
return ''.join([i for i in list(cn) if i != ','])
elif set(cn).issubset(set(CN_DIG_CHAR)): ## 对形如1.34万元进行转换
if re.search(".*?(?=\d)",cn) is None:# 处理数字的前缀部分
cn_pre = ''
else:
cn_pre = re.search(".*?(?=\d)",cn).group()
cn = re.search("\d.*元",cn).group()
cn_l = re.search("^\d[\d,\.]*\d?",cn).group() # 截取数字部分
cn_l = ''.join([i for i in list(cn_l) if i != ',']) # 去逗号
cn_l = float(cn_l)
cn_m = re.search("[^\d元\.,]{1,2}",cn)
if cn_m is None:
return cn_pre+cn
else:
return cn_pre+str(cn_l*CN_DIG_CHAR_DICT.get(cn_m.group()))+"元"
else:
return "no"
#####删除驳回的|以上其中的。。。|其中|连带责任的问题|
####数据的问题
def Deal_repeat(x):
x1=re.findall("驳回[^\r\n\t;;]*元[^\r\n\t;;]*",x)
for i in x1:
x=x.replace(i,"")
return x
"""
利息|违约金|复利|担保费|罚息|已支付|尚需付款|还需支付|尚需支付|还需付款|已支付
扣除[^,。,;;]*[,。,;;] 余款[^,。,;;]*[,。,;;]
对于单价的删除
多案并处理
"""
def Deal_interest(x):
x=re.sub("(每[案件]{1}[^,。\n]*[0-9/.]*元[,,]{,1})([^。,,\n]*(计|共|共计|合计人民币|共计|总计|合计|共合|合共|总共)(人民币|)[0-9/.]+)元",lambda t:t.group(2),x)
reg_interest="(利息|违约金|复利|担保费|罚息|尚需支付|已支付|还需支付|还需付款|尚需付款)[\(][^\(\)]*?[\)]"
reg_Every="每.{0,3}[0-9]+\.?[0-9]*?元"
reg_deal="(扣除|余款|已支付|还需支付)[^;;,,。]*?[,;。;,]"
#####利息中关于本金的去重
x1=re.search(reg_interest,x,re.S)
while x1:
x1=x1.group()
x=x.replace(x1,"")
x1=re.search(reg_interest,x,re.S)
####利息中的每的重复
x2=re.search(reg_Every,x,re.S)
while x2:
x2=x2.group()
x=x2.replace(x2,"")
x2=re.search(reg_Every,x,re.S)
####关于扣除和余款方面的
x3=re.search(reg_deal,x,re.S)
while x3:
x3=x3.group()
x=x.replace(x3,"")
x3=re.search(reg_deal,x,re.S)
####处理一些非钱的数字【名字中的刘三元,以及六圆都有可能翻译成6元这个要具体的看文书看看能不能改变】
x4=re.search("、[0-9]元",x,re.S)
while x4:
x4=x4.group()
x=x.replace(x4,"")
x4=re.search("、[0-9]元",x,re.S)
x5 = re.search("以(人民币|价款)?[0-9]+\.?[0-9]*元为(本金|基数|限)",x,re.S)
while x5:
x5 = x5.group()
x = x.replace(x5,"")
x5 = re.search("以(人民币)?[0-9]+\.?[0-9]*元为(本金|基数|限)",x,re.S)
x6 = re.search("((总额)?超过|周年庆|按本金)[0-9]+\.?[0-9]*元",x,re.S)
while x6:
x6 = x6.group()
x = x.replace(x6,"")
x6 = re.search("((总额)?超过|周年庆|按总欠款|按本金|该)[0-9]+\.?[0-9]*元",x,re.S)
x7 = re.search("至被告退还原告保证金(人民币)?[0-9]+\.?[0-9]*元之日止",x,re.S)
while x7:
x7 = x7.group()
x = x.replace(x7,"")
x7 = re.search("至被告退还原告保证金(人民币)?[0-9]+\.?[0-9]*元之日止",x,re.S)
x8 = re.search(".*上述第一、二项相互抵顶",x,re.S)
while x8:
x8 = x8.group()
x = x.replace(x8,"")
x8 = re.search(".*上述第一、二项相互抵顶",x,re.S)
x9 = re.search("元/",x,re.S)
while x9:
x9 = x9.group()
x = x.replace(x9,"")
x9 = re.search("元/",x,re.S)
x10 = re.search("在(人民币)?[0-9]+\.?[0-9]*元(人民币)?的范围内",x,re.S)
while x10:
x10 = x10.group()
x = x.replace(x10,"")
x10 = re.search("在(人民币)?[0-9]+\.?[0-9]*元(人民币)?的范围(之)内",x,re.S)
x11 = re.search("损失(人民币)?([0-9]+\.?[0-9]*元)(人民币)?之[0-9]{1,2}%即[0-9]+\.?[0-9]*元",x,re.S)
while x11:
x11 = x11.group(2)
x = x.replace(x11,"")
x11 = re.search("损失(人民币)?([0-9]+\.?[0-9]*元)(人民币)?之[0-9]{1,2}%即[0-9]+\.?[0-9]*元",x,re.S)
x12 = re.search("租金[0-9]+\.?[0-9]*元(人民币)?从[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日起至实际支付日止的",x,re.S)
while x12:
x12 = x12.group()
x = x.replace(x12,"")
x12 = re.search("租金[0-9]+\.?[0-9]*元(人民币)?从[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日起至实际支付日止的",x,re.S)
return x
###Stander
def Judge1Stander(x):
x=x.replace("x","X") ####有可能出现这样的元的问题
x=x.replace("(","(")
x=x.replace(")",")")
x=x.replace("[","(")
x=x.replace("]",")")
x=x.replace("【","(")
x=x.replace("】",")")
x=x.replace(";",";")
x=x.replace("O","0")
####全元和半圆
x=x.replace(".",".")
x=x.replace(" ","")###首先除空格在除逗号
##这两两次变形,.的小数需要在除掉括号后再进行一步新的测试
####加一个逗号的去重
x=re.sub("\d+(,)\d+",lambda x:(x.group()).replace(",",""),x)
x=re.sub("\d+(,)\d+",lambda x:(x.group()).replace(",",""),x)
x=re.sub("\d+(\.)\d+",lambda x:(x.group()).replace(".","_"),x)
####特殊的字符
x=re.sub("\d+(\.)\d+",lambda x:(x.group()).replace(".","_"),x)
return x
'''
除掉小括号、书名号
'''
def Delsp(i):
t1='[;;,\.。,、::]?[^;;,\.。,、::\(\)]*元[^;;,\.。,、::]*(\([^\(\)]*元[^\(]*[\)])[^\(\)]*?[;;,\.。,、::]'
k1='[;;,\.。,、::]?[^;;,\.。,、::]*(\([^\(\)]*元[^\(]*[\)])[^\(\);;,\.。,、::]*元[^\(\)]*?[;;,\.。,、::]'
reg_title="《[^《》]*元[^《]*?》"
x=re.findall(t1,i)
y=re.findall(k1,i)
s=x+y
l=len(s)
j=0
while j<l:
i=i.replace(s[j],"")
j=j+1
####中括号的
x=re.findall(reg_title,i)
j=0
l=len(x)
while j<l:
i=i.replace(x[j],"")
j=j+1
return i
"""
连带问题???
"""
def Deal_responsible(x):
####关于原来数据的重复,以及全部数据的重复
"""
x1=re.search("(^.*?[0-9一二三四五六七八九十百千万亿]+[,,]{,1}[0-9一二三四五六七八九十百千万亿]*[美]{,1}元[^\n。]*)(.*)",x,re.S)
xall=""
if x1:
xall=x1.group(1)
x=x1.group(2)
else:
pass
"""
xall=""
x1=re.search("^.*?[元¥]{1}",x,re.S)
if x1:
xall=x1.group()
##上述赔偿款项中人民币200,000元承担连带赔偿责任
x=re.sub("上述[^;。\n]*承担连带(赔偿|清偿|)责任","",x,flags=re.S)
x=re.sub("对其中[^;\n。]*承担连带(赔偿|清偿|)责任","",x,flags=re.S)
x1=re.search("上述[^,,。;\r\n\t]*连带(赔偿|责任)",x,re.S)
while x1 :
x1=x1.group()
x=x.replace(x1,"")
x1=re.search("上述[^,,。;\r\n\t]*连带(赔偿|责任)",x,re.S)
#####关于其中的部分重复
x6 = re.search("\(其中.*元.*?\)",x,re.S)
while x6:
x6 = x6.group()
x = x.replace(x6,"")
x6 = re.search("\(其中.*元.*?\)",x,re.S)
x2=re.search("(其中[^,;。]*元.*?)[。;,]",x,re.S)
while x2:
x2=x2.group()
x=x.replace(x2,"")
x2=re.search("(其中[^,;。]*元.*?)[。;,]",x,re.S)
######关于连带责任的部分重复
x=re.sub("(元)([^。,\n,]*在[0-9/.]+元范围内[^。,,\n]*连带(赔偿|清偿|)责任)",lambda t: t.group(1),x)
x=re.sub("在[^。,;\n]*范围内承担连带赔偿责任","",x,flags=re.S)
x5 = re.search("ZL[0-9Xx]+\.?[0-9Xx]*元",x,re.S)
while x5:
x5 = x5.group()
x = x.replace(x5,"。")
x5 = re.search("ZL[0-9Xx]+\.?[0-9Xx]*元",x,re.S)
if x.count("元")<1:
x=xall
x=re.sub("元[^\n。,;:]*在[0-9\.]+元[^\n,。,]*负连带责任","元",x,flags=re.S)
return x
"""
小数点的恢复
"""
def Trans(x):
x=re.sub("[0-9]+(_)[0-9]+",lambda x:(x.group()).replace("_","."),x)
return x
"""
(美元|美金)([0-9]+\.?[0-9]*)元
美元、美金、欧元、日元、新币、英镑
"""
###数据的问题==[]
###数据
def deal_dollars(x):
x1=re.search("(美元|美金)([0-9]+[\.]{,1}[0-9]*)(元)",x,re.S)
while x1:
x1=x1.group()
####完成美元的转化问题数据
x2=str(float(x1[2:len(x1)-1])*7)+"元"
x=x.replace(x1,x2)
x1=re.search("(美元|美金)([0-9]+\.?[0-9]*)(元)",x,re.S)
x1=re.search("([0-9]+[\.]{,1}[0-9]*)(美元)",x,re.S)
while x1:
x1=x1.group()
####完成美元的转化问题数据
x2=str(float(x1[:len(x1)-2])*7)+"元"
x=x.replace(x1,x2)
x1=re.search("([0-9]+[\.]{,1}[0-9]*)(美元)",x,re.S)
###欧元的计算的问题
x1=re.search("[0-9]+\.?[0-9]*(欧元)",x)
while x1:
x1=x1.group()
x2=str(float(x1[:len(x1)-2])*7.27)
x=x.replace(x1,x2)
x1=re.search("[0-9]+\.?[0-9]*(欧元)",x)
x1=re.findall("[^0-9\.]{2}[0-9]{1}元",x)
#####除一些被告原告的有元的数据
x1=re.findall("[^0-9\.]{2}[0-9]{1}元",x)
for j in x1:
x2=re.search("(损失|民币|罚款|开支)",j)
if not x2:
x=x.replace(j,"")
return x
"""
美元律师费人民币6,000元、公证费人民币3,000元、翻译费人民币500元;
###如果是美元或者是欧元,外国币的情况下我觉得涉外的可能性比较大
驳回[^\n]8全部诉讼请求
nu_money":钱的数量不详的
"""
###处理一下美元欧元的
def Deal_all_money(x):
x11=re.findall("[\r\n\t]?[^\r\t\n]*元.*?[\r\t\n]",x)
temp=[]
for i in x11:
x1=re.findall("([0-9]+\.?[0-9]*)元",i)
x2=re.findall("合计人民币|合计|共计|总计|共合|合共|计[0-9]+\.?[0-9]*|共[0-9]+\.?[0-9]*",i,re.S)
if len(x1)>0 and len(x2)==0:
money=round(sum((float(j)for j in x1)),3)
temp.append(money)
elif len(x1)==1 and len(x2)==1:
money=round(float(x1[0]),3)
temp.append(money)
elif len(x1)==2 and len(x2)==2:
money=round(sum(float(j) for j in x1),3)
temp.append(money)
elif len(x1)>1 and len(x2)==1:
l=len(x2)
if l>2:
l=x2[0][0]
else:
l=x2[0]
l1=i.find("元")
e1=i.find(l)
strl="(?<="+l+")"+"([0-9]+\.?[0-9]*)元"
x_all_money=re.findall(strl,i)
if x_all_money:
x_all=round(float(x_all_money[0]),3)
else:
x_all=0
money=[round(float(j),3) for j in x1]
Money=round(sum(money),3)
if Money==x_all*2 or Money-min(money)+min(money)*10000==x_all*2 or l1<e1:
temp.append(x_all)
else:
money=round(sum(float(j) for j in x1),3)
temp.append(money)
else:
money=sum(float(b) for b in x1)
money=round(money,3)
temp.append(money)
money=sum(temp)
return money
###
def MainJudge1(content):
content=re.sub("[一二三四五六七八九十壹贰叁肆伍陆柒捌玖0-9]+案","",content)
content=content.replace(",","")
content=re.sub("([一二三四五六七八九十壹贰叁肆伍陆柒捌玖]{1}[元分厘]{1}(公司|店))","XX公司",content,flags=re.S)
content=re.sub("([0-9]+)([,,]{1})([0-9]+元)",lambda t : t.group(1)+t.group(3),content,flags=re.S)
# content=re.sub("[一二三四位]")
content=cut_judge1_Money(content)
AllMoney=""
content=re.sub("(损失)([一二三四五六七八九十百千万零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九,\.兆亿億萬万仟千佰百十拾零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九角分里厘毫]+)([^零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九角分里厘毫美元一二三四五六七八九十百千万零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九,\.兆亿億萬万仟千佰百十拾]{1})",lambda content:content.group(2).replace(content.group(2),content.group(2)+"元"),content)
content=content.replace(".",".")
reg = "[零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九,\.兆亿億萬万仟千佰百十拾]{1,16}[元圆]{,1}[零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九角分里厘毫万]{0,9}[元角分里厘毫]{1}"
content=re.sub(reg,cn2dig,content) ####实现金钱的转化
content=re.sub("([0-9]+)([,,]{1})([0-9\.]+美元)",lambda content:content.group(content.group(2),content.group(1)+content.group(3)),content)
content=deal_dollars(content)####欧元和美元的问题
x=Deal_repeat(content)####删除驳回的金钱的问题
x=re.sub("[^\dXx美]元",lambda x: x.group().replace("元","yuan"),x) # 将元前面没有数字的去掉
x=Judge1Stander(x)###规范文书中的一些问题
x=Delsp(x)###除小括号和书名号的问题
x=Trans(x) ###恢复小数点
x=Deal_responsible(x)# 删除连带责任的
x=Deal_interest(x)###除去一些利息的问题
x1=re.findall("人民币[1-9]{1}[0-9\.]+[^某元0-9\.]{1}",x)
for j in x1:
if j:
x=x.replace(j,j[:len(j)-1]+"元")
#####金额的计算
#print("文书的判决主文2",x)
x=x.replace("人民币","")
nu_money=re.findall("(XX|X0|某)元",x,re.S)
money=re.findall("([0-9]+[\.]{,1}[0-9]*)元",x)####统计的元
##合计
x_all=re.findall("(合计人民币|共计|总计|合计|共合|合共|共[0-9]+\.?[0-9]*?元|计[0-9]+\.?[0-9]*?元)",x)##总计的钱
if len(nu_money)>0:
AllMoney="不详"
elif len(money)==0:###数据有七千多
AllMoney="0"
####元的个数为1的
elif len(money)==1:
#print("Wrng1")
AllMoney=str(money[0])
####元的个数大于1 并且没有合计的
elif len(money)>1 and len(x_all)==0:
money=sum(float(j) for j in money)
money=round(money,3)
#print("2")
AllMoney=str(money)
######元的个数大于1而且含有总计的个数为1的
elif len(money)>1 and len(x_all)==1:
l=len(x_all)
if l>2:
l=x_all[0][0]
else:
l=x_all[0]
l1=x.find("元")
e1=x.find(l)
##3总数量
l_money=[]
money=re.findall("([0-9/.]+)元",x)
tempmoney=sum(float(m) for m in money)
Alltemp="0"
tall=re.search("((总计|共计人民币|合计人民币|合计|共计|计|经济损失[及和]{0,1}合理费用|共|合))([0-9\.]+)(元)",x,re.S)
if tall:
Alltemp=tall.group(3)
tempmoney=round(float(tempmoney),3)
Alltemp=round(float(Alltemp),3)
if Alltemp*2==tempmoney:
AllMoney=Alltemp
if e1<l1:
money=re.findall("([0-9]+[\.]{,1}[0-9]*)元",x)####统计的元
AllMoney=sum(float(m) for m in money)
AllMoney=round(AllMoney,3)
AllMoney=str(AllMoney)
else:
x1=x.split('\n')
all_="([^。\n]*)(总计|共计人民币|合计人民币|合计|共计|计|经济损失[及和]{0,1}合理费用|共|合)([0-9\.]+)元"
#费前边的关键字
re_pall="损失[^。元]*(违约金|律师费|合理费用|保全费|诉讼费|支出费用|技术转让费|合理的维权费|诉讼费|反诉费|服务费|管理费|分成费|支出费用|维权费)[^。]*元"
re_aall="([一-龥]{2,}[款金额钱费]{1}|违约金|律师费|合理费用|保全费|诉讼费|支出费用|技术转让费|合理的维权费|诉讼费|反诉费|服务费|管理费|分成费|支出费用|维权费)[^。元]*损失[^。]*元"
for j in x1:
count=j.count("元")
All=re.search(all_,j)
l1=j.find('元')
money=re.findall("([0-9]+[\.]{,1}[0-9]*)元",j)
if count>1 and (re_pall or re_aall) and All:
if len(l_money)==0:
Money=All.group(3)
Money=round(float(Money),3)
l_money.append(Money)
else:
Money=sum(float(m) for m in money)
Money=round(Money,3)
Money1=All.group(3)
Money1=round(float(Money1),3)
NewMoney=sum(float(m) for m in money)+sum(float(m) for m in l_money)
NewMoney=round(NewMoney,3)
if Money==Money1*2:
l_money.append(Money1)
elif Money1*2==NewMoney:
l_money=[]
l_money.append(Money1)
else:
l_money.append(Money)
else:
Money=sum(float(m) for m in money)
l_money.append(Money)
AllMoney=sum(float(m) for m in l_money)
AllMoney=round(AllMoney,3)
AllMoney=str(AllMoney)
elif len(money)==2 and len(x_all)==2:###两个合计的问题
money=sum(float(j) for j in money)
AllMoney=str(money)
else:
money=Deal_all_money(x)
AllMoney=(str(money))
return AllMoney
content=i[8][2]
money= MainJudge1(content)
return money | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/ronghui/Money/Jmoney1.py | Jmoney1.py |
import re
def MainCut1(i):
"""
判决结果 :驳回|部分支持|全部驳回|
"""
def Sub_Judge1(content):
x=content.split("\n")
st=""
txt=""
flag=False
for j in x:
txt+=j+"\n"
x1=re.search("(判如下|判令如下|判决:|判决如下|综上,依据《中华人民共和|规定,如下:|(综上所述|综上).{,1}.*规定:)",j)
fx=re.search("判决[^。,;]*(附图|附页):",j)
###
if x1 and not fx:
flag=True
st=j+"\n"
else:
st+=j+"\n"
x1=re.search("审判长|审判员|书记员|速录员|陪审员|速记员|法官助理",st)
if x1:
pre_txt=txt.replace(st,"")
else:
flag=False
pre_txt=""
##需要存储于记录一下入库的错误类型
return pre_txt,st,flag
"""
文书尾部
"""
def Sub_Judge2(content):
content=content.replace("(","(")
content=content.replace(")",")")
x1=re.search("(^.*(判决如下|判决:)[:]{0,1}).*",content)
x2=re.search("(^[^\n]*(综上,依据《中华人民共和((?!(驳回|撤销|维持|\n)).)*|规定,如下:|(综上所述|综上).{,1}[^\n]*规定:))",content)
if x1:
DesBaise=x1.group(1)
x2=content.replace(DesBaise,"")
elif x2:
DesBaise=x2.group(1)
x2=content.replace(DesBaise,"")
else:
DesBaise=""
x2=content
x=x2.split("\n")
flag=False
Inscribe=""
x2=""
for j in x:
Rex=re.search("审判长|审判员|法官助理|速录员|速记员|陪审员",j)
if Rex:
flag=True
if flag:
Inscribe+=j+"\n"
else:
x2+=j+"\n"
Judgment=""
x1=re.search("(^.*?)([一-龥]*(未按照(判决|)指定|如不服|在指定期限内|案件受理费|受理费|可以在判决书送达之日起|如未按(本|)判决|案件保全费|公告费|财产保全费|本诉诉讼费|本案诉讼受理费|(本案.{1,6}费})|[\r\n\t]诉讼费|本案应交纳诉讼费|如(果|)?未按(本|)判决指定的期间履行给付(金钱|)义务|一审诉讼费))",x2,re.S)
if x1:
Judgment=x1.group(1)
footer=x2.replace(Judgment,"")
else:
Judgment=x2
footer=""
return [DesBaise,Judgment,footer,Inscribe]
"""
主函数
处理一下一审分段的
"""
def Optimization1(x):
x=re.sub("^[\n]+","",x)
x1=re.search("(^本院认为.*。)([^。]*(依据|依照|综上所述).*)",x)
m=[]
if x1:
x=x1.group(1)
x1=x1.group(2)
m.append(x)
m.append(x1)
else:
x=""
x1=x
m.append(x1)
return m
"""
##对于长度不够的在经过一次分段处理
"""
def Optimization2(x):
x=re.sub("^[\n]+","",x)
x1=re.search("(^[^。]{,5}(本院(经|)审理查明|本案(经|)审理查明|一审查明).*。)([^。]*(依据|依照|综上所述).*)",x)
m=[]
if x1:
x1=x1.group(1)
x2=x.replace(x1,"")
m.append(x1)
m.append(x2)
else:
x=""
x1=x
m.append(x1)
return m
"""
文书首
"""
def Stander(x):
x=re.sub(">","",x)
x=re.sub("×","×",x)
x=x.replace("&temp;","")
x=x.replace(""","")
x=x.replace("{C}","")
x=x.replace("&","")
x=re.sub(" ","",x)
x=re.sub("&ldqu0;","",x)
x=re.sub("&lsqu0;","",x)
x=re.sub("&rsqu0;","",x)
x=x.replace("lt;","")
x=x.replace("\xe3","")
x=x.replace("\x80","")
x=x.replace("\xc2","")
x=x.replace("\xa0","")
x=x.replace("\x7f","")
x=x.replace("\u3000","")
x=x.replace("当事人原审的意见\n","")
x=x.replace("\t", "")
x=x.replace("&rdqu0;","")
x=re.sub("[ ]+","",x)
x=re.sub("<[^<>]+>","",x)
x=re.sub("\(此页无正文\)","",x)
x=re.sub("判([\n]*|[?]+|)决([\n]*|[?]+|)如([\n]*|[?]+|)下","判决如下",x)
x=re.sub("判([\n]*)决([\n]*|):","判决:",x)
x=re.sub("(|[\n]*)年([\n]*|)","年",x)
x=re.sub("(\n|[\n]*)月(|[\n]*)","月",x)
x=re.sub("[?]{3,}","\n",x)
x=re.sub("[?]+","",x)
x=re.sub("[‘’']","",x)
x=re.sub("[zzZ]{1}[lLl]{1}","ZL",x)
x=re.sub("[\r\n]+","\n",x)
x=re.sub("...: ","",x)
x=x.replace("\x0b","\n")
x=re.sub("[\r\n]+","\n",x)
x=re.sub("[:::::::]{1}",":",x)
x=re.sub("^[\n]+","",x)
x=re.sub("(本页无正文)","",x)
x=re.sub("\(本页无正文\)","",x)
x=re.sub("本判决为终审判决。","",x)
x=re.sub("(\n)日","日",x)
x=re.sub("审([\n]*|[?]+|)判([\n]*|[?]+|)长([\n]+|[?]+)","审判长 ",x)
x=re.sub("代([\n]*|[?]+|)理([\n]*|[?]+|)审判长","代理审判长 ",x)
x=re.sub("审([\n]*|[?]+|)判([\n]*|)员([\n]*|[?]+)","审判员 ",x)
x=re.sub("代([\n]*|[?]+|)理([\n]*|[?]+|)审判员","代理审判员 ",x)
x=re.sub("陪([\n]*|[?]+|)审([\n]*|[?]+|)员([\n]+|[?]+)","陪审员 ",x)
x=re.sub("人([\n]*|[?]+|)民([\n]*|[?]+|)陪审员([\n]+|[?]+)","人民陪审员 ",x)
x=re.sub("书([\n]*|[?]+|)记([\n]*|[?]+|)员([\n]+|[?]+)","书记员 ",x)
x=re.sub("速([\n]*|[?]+)记([?]*|[\n]*)员","速记员",x)
x=re.sub("速记员\n","速记员 ",x)
x=re.sub("速([\n]*)录[\n]*员","速录员",x)
x=re.sub("速录员\n","速录员",x)
x=re.sub("法([\n]*|[?]+|)官([\n]*|[?]+|)助([\n]+|[?]+)理","法官助理 ",x)
#清除开始赘余信息
x1=re.search("(^签发.*?[\n])([^\n]{,30}法院)",x,re.S)
if x1:
x1=x1.group(1)
x=x.replace(x1,"")
x1=re.search("(^.*(已审理终结。|已审理完结。|已审理完毕。))([^\n]{1}.*)",x,re.S)
if x1:
x2=x1.group(3)
x1=x1.group(1)
x=x1+"\n"+x2
x1=re.search("(^.*(已审理终结。|已审理完结。|已审理完毕。))([^\n]{1}.*)",x,re.S)
if x1:
x2=x1.group(3)
x1=x1.group(1)
x=x1+"\n"+x2
###对尾部文书进行基本的规范化
x=re.sub("pt;''>","",x)
x=re.sub("当事人二审的意见\n","",x)
x=re.sub("\(原审判决附图一\)\(原审判决附图二\)","",x)
x1=re.search("^((?!(法院|\n)).)*\n",x,re.S)
###只能整体进行不能单独的进行其他的计算
if x1:
tx1=x1.group()
x=x.replace(tx1,"")
#x=re.sub(tx1,"",x)
#附
x1=re.search("(附:本判决书所依据法律规定的具体条文:|附本判决书引用的主要法律条文:|附.{,1}本判决适用法律条文:|附.{,1}本判决适用法律条款:|附:本案适用的法律条款|附:本案适用的法律条款|附:本案适用的法律条款).+",x,re.S)
if x1:
xx=x1.group()
x=x.replace(xx,"")
return x
def Deal_all(x):
x=re.sub("\n案件相关情况|\n本案相关情况\n|\n判决结果\n|\n裁判理由与结果\n","\n",x)
start=0
first=1
second=2
three=3
four=4
four_1=5
four_2=6
rex="反诉称"
rex1="[^0-9、]*[0-9Xx]{2,}[^0-9]{1}[^。,、]+[0-9-、Xx]+号|\([0-9]{4}\[一-龥0-9]+第[0-9]+号|初字第[0-9]+号"
rex2="于[一-龥0-9]+年[一-龥0-9]+月[一-龥0-9]+日(公开|)[一-龥0-9\(\)]*开庭审理了本案|案由:|审理终结|审理完结|审理完毕|审理结|当庭宣告判决|依法组成合议庭|侵[害犯权]{1}[^\n。]{,30}纠纷一案|无正当理由拒不到庭|^[^,。]*被告[^。,,]{,20}(拒|)不到庭|(服务|技术)合同纠纷一案|纠纷一案"
rex3="(判令被告:|诉请法院判令:|^原告[^。,,;;::、?]*诉(称|):|原告[^。]*诉请:|诉请判令:|请求判决:|请求:|^原告诉请|^[^。]{,30}诉讼[.]{1}称|^[^。]*不服[^。]{,10}(裁|决)定[^。]{,10}向[^。]{,20}诉讼称|原告[^。]{,30}诉(讼|)称|诉讼请求为:|诉讼请求:|[^。,]{,10}向本院起诉要求:|原告[^\n。]{,30}(起诉认为|请求判令|诉称|诉讼请求|起诉请求)|诉称:|提出诉讼请求|请求判令:|请求.院判令|请求判令[^。,;]{,10}:|公司诉称|诉称|诉讼称|提出[^。,]{,5}诉讼请求})"
rex4="(^被告[^,。]*辩|反诉请求:|第三人[^。,、]*述称|^被告[一-龥、]+答辩|被告[一-龥、]+拒不到庭|被告[^。,;、:]+抗辩理由是:|反诉称:|第三人[^。0-9a-zA-Z,、]*陈述意见|被告[^。]{,20}(坚持|认为)[^。]{,20}(认定|意见)|^[^。;]{,30}(辩称|辨称|答辩|答辨)|被告[^。]{,30}对原告所诉事实|被告对原告主张|被告[^。]{,30}(没有到到庭|拒不到庭)|(被告|公司)[^。,](放弃抗[辨辩]{1}|未答[辩辨]{1}|未提出答辩意见))"
rex5="^双方有争议|^双方对以下事|^相关事实|^[^。]{,30}(^(经|)庭前准备会议|^(经|)庭前交换证据|庭审中,双方当事人对以下事实无争议:|.*本院确认如下:|.*本院组织[一-龥]+证据交换和质证|.*根据原告的当庭陈述和举证意见|.*查明如下事实:|^经过庭审|本案认定事实如下:|.*本院根据证据,认定事实如下:|经庭审确认:|经本院审理,查明事实如下:|^查明事实|.*确认本案(的|)(法律|)事实(如下|):|本院[一-龥]*认定:|经本院[一-龥]*讨论认为:|^综上,本院认定|通过[^。,、]*当庭举证、[^。]*事实可以确认:|.*本院予以认定。|.*本院予以确认。|.*经本院庭审质证|.*经庭审调查,|本院认定的事实[^,,。:;:、]*事实|.*经本院审核后认为|^根据证据审核认定的规则|.*本院[^。]*认证如下:|.*综上,本院[^。]*认定(本案|)(事实如下|):|.*经审理[^。]*事实予以认定|^根据证据审核认定[^。,],*本院[^,。:]*予以认定,|^另查明|.*归纳案件的焦点是:|.*本院[^。]*,据此确认以下事实:|.*本院认(证|定)(案件|)(意见|事实|)(如下|):|本院审查认为:|经过庭审[一-龥、]+,本院对上述证据认证如下:|.*经审理查明|根据[^。]*,本院认定的本案事实如下:|庭审中,原、被告对如下事实没有异议:|^综上所述,本院认定事实|^在本院确定的举证期|^经查,|^本院为查明案件事实,|[^。]*本院予以确认:|[^。]*本院经认证查明如下事实:|^经开庭审理,|本院[一-龥]+认定以下事实:|.*本院予以确认:|.*本院对以下事实予以确认:|^综合上述证据,本院认定的基本事实如下:|经过庭审[^。]*[一-龥,,]+确认:|本院根据[^。]*,[一-龥]+事实:|.*原告举证及被告质证如下:|^经开庭审理|另查明:|^本院根据[一-龥]+提交的证据|^本院公开开庭审理,[一-龥]+证据|本院认定的基本事实如下:|^综合分析上述证据及庭审|本院对双方的证据作如下认定:|^综合原、被告双方的举证|经审理查明:|对证据认证如下:|认证如下事实|查明以下案件事实|经庭审质证|^还查明,|本院对案件事实认定如下|本院确定以下与本案有关的事实|本院对本案证据认证如下|查明以下事实|根据[^。]{,30}(陈述|证据),本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下)|查明:|本院经查|本院依法认定本案事实如下|本院确认本案事实如下|本院根据上述[^。]{,30}确认以下事实|经审理|本院经查|根据上述[^。]{,30}本院确认以下事实|经审理查明|经庭审[^。,]{,5}比对|一审查明|经审查|本院查明|本院审理查明|本案相关事实|^[^。]{,30}本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下))"
rex6="^(.{,30})(^双方争议事项为|.*双方争议的焦点在于:|.*本案[一-龥]*争议焦点在于|.*本案中,[一-龥]*争议焦点|根据[一-龥]+认证,本院确认下列事实:|^本院经审核认为|^本院认为,|综合本院对事实的认定,本案的争议焦点|本案[一-龥]*争议焦点[一-龥]+:|本案中,原被告争议的焦点|.*本案[一-龥]+争议焦点[一-龥]+:|.*本院认为:|法院一审认为|本院认证认为|本院认为|一审认为)"
temp=x.split("\n")
state=start
st=""
l=[]
for j in temp:
if state==start:
x1=re.search(rex1,j)
if x1:
st+=j+"\n"
l.append(st)
st=""
state=first
else:
st+=j+"\n"
elif state==first:
x1=re.search(rex2,j)
x2=re.search(rex3,j)
if x1:
l.append(st)
st=j+"\n"
state=second
elif x2:
l.append(st)
l.append('')
st=j+"\n"
state=three
else:
st+=j+"\n"
elif state==second:
x0=re.search(rex3,j)
x01=re.search(rex4,j)
Rex1=re.search("(审理终结|审理完毕|审理完结|审理完毕|审理终|审理完|审理毕|合议庭[^\n]*本院缺席审理)",j,re.S)
###本院审理查明
x1=re.search(rex5,j)
x2=re.search(rex6,j)###本院认为
x1_t=re.search("经(审|)查[,,]{1}本院认为",j)
Rex=re.search("反诉称|辩称:|反诉请求:",j)
if x0 and not Rex and not Rex1:
l.append(st)
st=j+"\n"
state=three
elif x01 and not Rex1 :
l.append(st)
st=''
l.append(st)
st=j+"\n"
state=four
elif x1 and not x1_t and not Rex1:
l.append(st)
st=''
l.extend(['',''])
st=j+"\n"
state=four_1
elif x2 and not Rex1:
l.append(st)
st=''
l.extend(['','',''])
st=j+"\n"
state=four_2
else:
st+=j+"\n"
elif state==three:
x0=re.search(rex4,j)
x1=re.search(rex5,j)
x2=re.search(rex6,j)###本院认为
x1_t=re.search("经(审|)查[,,]{1}本院认为",j)
Rexx=re.search("(审理终结|审理完毕|审理完结|审理完毕|审理终|审理完|审理毕|合议庭[^\n]*本院缺席审理)",j,re.S)
if x0 and not Rexx:
###原告诉称
l.append(st)
st=j+"\n"
Rex=re.search("本院确认[^。,、;::]*本案事实:",j)
Rex1=re.search("(本案争议焦点(是|为|):|本院认为:)",j)
Rex2=re.search("(^.*。)([^。]*(经审理查明|经审查|经查明))",j)
if Rex:
l.append('')
state=four_1
elif Rex1:
l.extend(['',''])
state=four_2
elif Rex2:
l.append(Rex2.group(1)+"\n")
st=Rex2.group(2)+"\n"
state=four_1
else:
state=four
elif x1 and not x1_t:
l.append(st)
st=''
l.append(st)
st=j+"\n"
state=four_1
elif x2:
l.append(st)
st=''
l.extend([st,st])
st=j+"\n"
state=four_2
else:
st+=j+"\n"
elif state==four:
rex="^[^。]{,30}(.*本院[^。]*认证如下:|^本院[一-龥]*证据:|.*经庭审质证|再查明|原审查明事实:|^本案在诉讼过程中|原告[^。,、;::]*证据|^依据[一-龥]*所确认的证据和事实|原告[^。,]*有如下异议:|为证明自己主张,[^。]*向本院提供如下证据:|.*向本院提交如下证据:|.*举证据如下:|原告[^。,,]*,向[^,。、;?::]*提交|^原告[^。]*,[^。]*举证如下:|^原告[^。]*,[^。]*证据资料:|公司[^。]*提供了下列证据材料:|.*本院[^。,]事实(如下):|.*原告[^。,]*材料:|.*本院[^。,]*事实:|.*原告[^。,]*材料:|本院认证意见如下:|原告[^。]*质证如下:|本院[^。]*,据此确认以下事实:|原告[^。]*证据:|.*本院确认以下事实:|.*被告对原告证据质证如下:|原告提供的证据材料如下:|原告[^。,,]*提供如下证据:|^综上,本院[^,。]*事实一致|.*经本院审查|.*在原一审程序中提交的证据有:|[一-龥,,]+质证意见:|^在本案审理过程中,[一-龥]+提供[一-龥]+证据材料:|原告[^。,,;::;]+,提交[^。,;;、]+证据材料:|^法庭质证时|^在本案审理过程中,原告[一-龥]+证据材料|^原告[^,。,、:;;]*,[^:;;,。,、]*证据:|[被原]{1}告[一-龥龥]+质证意见(如下|):|^庭审中[^。]*提交了以下证据|^原告为支持其主张,提供以下证据:|^(被|原)告质证认为|^(原告|被告|第三人)[^。,、]*提供证据如下|经审理查明:|对证据认证如下:|认证如下事实|查明以下案件事实|经庭审质证|^还查明,|本院对案件事实认定如下|本院确定以下与本案有关的事实|本院对本案证据认证如下|查明以下事实|根据[^。]{,30}(陈述|证据),本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下)|查明:|本院经查|本院依法认定本案事实如下|本院确认本案事实如下|本院根据上述[^。]{,30}确认以下事实|经审理|本院经查|根据上述[^。]{,30}本院确认以下事实|经审理查明|经庭审[^。,]{,5}比对|一审查明|经审查|本院查明|本院审理查明|本案相关事实|^[^。]{,30}本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下))"
x0=re.search(rex,j)##审理查明
x_flag=re.search(rex5,j)
x1_t=re.search("经(审|)查[,,]{1}本院认为",j)
x1=re.search(rex6,j)###本院认为
if(x0 or x_flag) and not x1_t:
l.append(st)
st=j+"\n"
state=four_1
elif x1:##
l.append(st)
st=''
l.append('')
st=j+"\n"
state=four_2
else:
st+=j+'\n'
###审理查明
elif state==four_1:
x0=re.search(rex6,j)###本院认为
if x0:
l.append(st)
state=four_2
st=j+'\n'
else:
st+=j+'\n'
elif state==four_2:
st+=j+"\n"
l.append(st)
return l
"""
一审分段过程
"""
def Main_Div1(i):
x=Stander(i[1])
key=["标题","当事人信息","审理经过","原告诉称","被告辩称","审理查明","本院认为","判决依据","判决主文","判决尾部","落款"]
pre_,judge,flag=Sub_Judge1(x)
###尾部信息的获取
ls=Sub_Judge2(judge)
###金额的
if flag:
l=Deal_all(pre_)
#l_1.append(l)
if len(l)==7:
ln=Optimization1(ls[0])
if len(ln)==2:
l[6]=l[6]+"\n"+l[0]
ls[0]=ln[1]
l.extend(ls)
elif len(l)==6:
ln=Optimization1(ls[0])
if len(ln)==2:
l.append(l[0])
ls[0]=ln[1]
else:
l.append('')
l.extend(ls)
elif len(l)==5:
ln=Optimization2(ls[0])
if len(ln)==2:
l.append(ln[0])
l.append('')
ls[0]=ln[1]
else:
l.extend(['',''])
l.extend(ls)
#Optimization
elif len(l)==4:
ln=Optimization1(ls[0])
ln1=Optimization2(ls[0])
if len(ln)==2:
l.extend(['','',ln[0]])
ls[0]=ln[1]
elif len(ln1)==2:
l.extend(['',ln1[0],''])
ls[0]=ln1[1]
else:
l.extend(['','',''])
l.extend(ls)
elif len(l)==3:
ln=Optimization1(ls[0])
ln1=Optimization2(ls[0])
if len(ln)==2:
l.extend(['','','',ln[0]])
ls[0]=ln[1]
elif len(ln1)==2:
l.extend(['','',ln1[0],''])
ls[0]=ln1[1]
else:
l.extend(['','','',''])
l.extend(ls)
else:
l=['无']
if len(l)==11:
l1=[]
l1.append(i[0])
l1=l1*11
l=list(zip(l1,key,l))
return l
l=Main_Div1(i)
return l | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/ronghui/Money/First_sub.py | First_sub.py |
import re
"""
二次提取数据
根据关键字进行粗略的段落的提取
"""
def MainLawsuit1(i):
def Key_new(key,x):
temp="0"
for i in key:
x1=x.count(i)
if x1>0:
temp=i
break
if temp!="0":
x1=temp+".*"
x=re.search(x1,x)
x=x.group()
else:
x="请求法院判令被告赔偿原告XX元。"
return x
"""
1.段落的提取
金额
"""
def cut_(data):
data=re.sub("^\n","",data,flags=re.S)
if data[len(data)-1]!="\n":
data=data+"\n"
x1=re.search("(审理终结|审理完毕|审理完结|审理终|审理终|审理毕|纠纷一案)",data,re.S)
if data.count("\n")<=1 and x1:
return data
###数据
data=re.sub("[一二三四五六七八九十壹贰叁肆伍陆柒捌玖]{1}[分元角]{1}(公司|店)","",data,flags=re.S)
data=re.sub("([^\n。]{,10}(原告[^\n。,,;]*提供下列证据证明其主张|提交了以下证据|提供了以下证据|提供以下证据|提供如下证据|提交如下证据|提供[了]{,1}下列证据|提供证据如下|提交证据如下).*)","",data,flags=re.S)
#data=re.sub("((提供下列证据证明其主张|提供如下证据|提交了以下证据|提供以下证据|提供如下证据|提交如下证据|提供[了]{,1}下列证据|提供证据如下|提交证据如下).*)","",data,flags=re.S)
x1=re.search(".*(审理终结|审理完结|审理完毕|审理完|审理终|审理毕|纠纷一案)[^\n]*",data,re.S)
if x1:
x=x1.group()
xx=data.replace(x,"")
x0=re.search("[0-9/.一二三四五六七八九十百千万亿]+元|(人民币|美元|日元|美金|合理支出|合理费用|经济损失)[0-9/.一二三四五六七八九十百千万]+",x,re.S)
x1=re.search("[诉]{,1}[请要]{1}[^。,,]{,10}判令:|[请要]{1}求判令|请求判决|判令.{,1}被告[::]{1}|诉称[::]{1}|诉称[^。]*[0-9一二三四五六七八九十万亿百/.]+[元角分厘]{1}",x,re.S)
x2=re.search("诉讼请求[::]{1}|请求[^。,、\n]{,5}法院判令:|请求[.]{,3}判令[一二三四五六七八九十各两叁肆伍陆柒捌玖拾]{,1}被告:|诉至.{,5}院[.]{,1}(请求|要求)判令:|现起诉请求法院判令|请求法院依法判令被告:|诉讼请求为:|请求判令[.]{,2}(被告|):|请求(判令|判决)[^。,]{,20}:|原告[^。,]{,30}提出的诉讼请求|原告[^。,]{,30}起诉|诉至本院[.]{,3}:|故我公司要求.{2,15}:|诉请法院判令[::]{1}|提起诉讼,请支持:|其诉讼请求为:|请求法院:|故我.{2,}?要求.{3,15}向我|现我请求法院判令.{3,15}赔偿|要求[^。,,]{,20}向我支付|故我请求法院|请求法院判令|请求法院判令|(要求|请求)[^,。、]{,20}[::]{1}|故要求[,,]{1}|(要求|判令)[^,。;:;\n]{,10}公司|故我诉至法院请求|诉讼请求|诉称|现要求|起诉要求|并判令|判令[^。,]{,30}公司:|故我要求|现我公司要求|依法判决[::]{1}|诉请",xx,re.S)
if x1 and x0:
data=x
elif x2:
data=xx
#x0=re.search("将其诉讼请求变更为")
#在本案审理过程中,原告治历公司将其请求变更为:
#原告明确第一项诉讼请求
data=re.sub("(.*)(并变更诉请为:.*)",lambda t:t.group(2),data,flags=re.S)
x1=re.search("((庭审前|庭审中|庭审时|审理过程中|庭审中)[^\n]*将[其]{,1}(诉讼|)请求变更为:)(.*)",data,re.S)
x2=re.search("(庭审中|审理时|审理过程中|审理中)[^\n。]*原告主张.*",data,re.S)
if x1:
data="请求判令:"+x1.group(4)
elif x2 and "变更" not in data:
data=data.replace(x2.group(),"")
x1=re.search("(.*)(庭审中明确为|庭审中确认|在审理中,原告明确|在审理中,确认|庭审中,明确|庭审中,确认为)(.*)",data,re.S)
if x1:
count1=(x1.group(1)).count("元")
if count1==0:
data="请求判令:"+x1.group(3)
x1=re.search("(庭审中,原告[^\n。,,;;]*明确|在审理中,原告(明确|确认)|原告庭审中(明确|确认)|(庭审中|审理中)[,,]{,1}(原告|)(明确|确认)).*",data,re.S)
if x1:
x1=x1.group()
data=data.replace(x1,"")
x1=re.search("(庭审中|审理过程中|庭审时|诉讼过程中|在庭审过程中|审理中|诉讼中).*",data,re.S)
##: 出现撤回,增加至|更改[0-9一二三四五六七八九十]项元进行更改在进行计算
##该项诉求变更为519615.00元整
#x1=re.search("([0-9一二三四五六七八九十该]{1}项[0-9一二三四五六七八九十百千万](元|¥)")
if x1:
x22=x1.group()
###
x21=data.replace(x22,"")
count1=x21.count("元")+x21.count("¥")
count2=x22.count("元")+x22.count("¥")
if (count2==1 and count1<=1) or count2==count1:
#x21money=re.search("[0-9]万元",)
data=x22
elif count2==1 and count1>1:
#key={"一":"1","二":2,"三":"3","四":"4","五":5,"六":"6","七":"7","八":"8","九":"9","十":"10"}x1=re.search("[0-9一二三四五六七八九十]{1}项[^;;。]*[0-9一二三四五六七八九十百千万亿,][美万]{,1}[元¥]{1}",x22,re.S)
x=re.search("([0-9一二三四五六七八九十]{1})(项[^;;。,,]*?)([0-9\.一二三四五六七八九十百千万亿,,]+[美万]{,1}[元¥]{1})",x22,re.S)
if x:
rex_change=re.search("变更|增加至|修改|更改",x22,re.S)
rex_vanish=re.search("撤销|撤消|撤回",x22,re.S)
x2=x.group(3)
x3=x.group(1)
if rex_change:
data=re.sub("("+x3+")(、[^。;;]*[0-9,\.一二三四五六七八九十百千万][万美]{,1}[元¥]{1})",lambda t:t.group(1)+'、'+x2,x21)
elif rex_vanish:
data=re.sub("("+x3+")(、[^。;;]*[0-9,\.一二三四五六七八九十百千万][万美]{,1}[元¥]{1})",'',x21)
else:
data=x21
else:
data=x21
elif count2>count1:
data=x22
else:
data=x21
return data
def match_multi_word(word_list,text):
key=["判令被告:","因此,请求判令","诉请人民法院判令:","诉称","要求、","要求判令:","原告据此请求法院判令:","请求判令二被告:","请求.{0,6}判令:","故原告起诉要求判决:","请求.{0,5}判决","要求:","故我公司提起诉讼,要求","原告起诉仅主张","请求判令被告:","原告起诉来院,","依法诉请判令","请求判令","恳请判令","请批评指正法院制止科技中心的侵权行为,","(原告)?.{0,15}诉称"]
for i in word_list:
j=text.find(i)
#辩称|未作答辩|答辩称|
w=re.findall("事实和理由|未提交书面答辩状|反诉称",text)#如果找到的即是第一个找到的
if w and "据以上事实和理由" not in text:
w=w[0]
w=text.find(w)
else:
w=-1
if j>w and w!=-1:#and ":" not in w and ":" not in w
continue
if i in text:
#reg = "("+i+")"+".*[\r\n\t]"
reg = "("+i+")"+".*"
reg=re.search(reg,text)
if reg:
reg=reg.group()
count=reg.count("。")+reg.count(";")
if count==0:
reg="("+i+")"+".*?。"
reg = re.search(reg,text,re.S)
if reg is None:
return text
else:
return reg.group()
else:
return reg
else:
return ""
break
if i==word_list[len(word_list)-1]:
reg=Key_new(key,text)
return reg
"""
诉称的赘余
重新定制[之前的规则]
if "诉称:" in x and x2:
x3=x2.group()+".*"
x1=re.findall(x3,x)
if x1 and x1[0].count("元")>0:
x=x1[0]
"""
def DeleteRepeat(x):
x1="(^.*)((因此[.]{,1}请求判令|故诉请判令:|,诉至法院|要求判令[::]{1}|诉至法院请求判令|我.{0,10}要求.{0,10}公司|我.{0,10}(请求|要求).{0,10}(被告|撤销|返还|赔偿|偿还|电影基金会|公司|解除|停止|删除|支付|履行)|故.{2,10}法院.{0,30}(请求|要求).{0,20}(公司|确认|解除|删除)|我?.{0,20}(要求|请求).{0,30}(:|:)|故向法院提起诉讼,诉请一、))"
x2=re.search(x1,x,re.S)
if x2:
x1=x2.group(1)
x2=x.replace(x1,"")
if x2.count('元')>0:
x=x2
return x
"""
除去后边没有换行的部分数句
换行的后边他如果没有标点符号的话直接替换成;
分号来计算以避免数据有可能出现的问题
那么就以:分号数据的
这个的先不添加主要是新添加了尾部处理的功能
"""
def Remove(x):
x1=re.search("(事实和理由|辩称|未作答辨).*",x,re.S)
if x1:
x=x.replace(x1.group(),"")
x1=re.search("(.*?[。,,]{1})([^。,,]*(变更|变更第[一二三四五六七八九十0-9]{1}项诉讼请求为).*)",x,re.S)
if x1:
x21=x1.group(1)
x22=x1.group(2)
x2=re.search("[一二三四五六七八九0-9]{1}项",x22)
count1=x21.count("元")+x22.count("¥")
count2=x22.count("元")+x22.count("¥")
if x2:
if count1==count2:
x=x22
elif count2==0:
x=x21
else:
x=x21
##庭审中|审理过程中|庭审时|诉讼中|诉讼过程中|在庭审过程中|审理中: 出现撤回,增加至|更改[0-9一二三四五六七八九十]项元进行更改在进行计算
##1)如果没有不影响 2)如果有的话如何设置规则 ###
t="(.*)((审理中|事实和理由:|庭审中|更改诉讼请求|审理过程中|庭审时|审理过程中,原告变更诉讼请求为:|故撤回第一项诉讼请求|诉讼中|更改诉讼请求|本案诉讼过程中|诉讼过程中|在庭审过程中|上述网域名称注册证明、授权委托书经台湾台北地方法院公证处公证,上海市公证协会认证|庭审中,原告明确|审理中,原告确认在诉讼过程中|审理中,原告申请撤回|诉讼中,原告变更第二项诉讼请求为|诉讼中,原告变更第二项诉讼请求|诉讼中,原告变更第二项诉讼请求|审理中,原告变更诉讼请求为|审理中,原告明确|审理中,原告变更诉讼请求|庭审中,原告明确诉请|庭审中,原告明确合理费用|诉讼中,原告撤回|审理中,原告申请将|辩称|未作答辩|庭审中,原告明确第一项诉讼请求为:|故撤回第一项诉讼请求|两原告明确第二项诉讼请求|故申请撤回了第一项诉讼请求|答辩称|未提交书面答辩意见|庭审中原告撤回|庭审中,原告撤回|审理中,原告向本院撤回|在审理中,原告增加诉讼请求|审理中,原告确认被告已停止|审理中,原告撤回|审理中,原告撤回第二项诉讼请求).*)"
## 撤回金额在做细分
st=re.search(t,x,re.S)
if st:
x1=st.group(1)
x2=st.group(2)
Count1=x1.count("元")+x1.count("¥")
Count2=x2.count("元")+x1.count("¥")
if Count1==Count2 or Count1<count2:
x=x2
else:
x=x1
return x
####诉讼更改的数据要抽出来一批数据
###除去括号的表达式的问题
def ClaimStander(x):
x=x.replace("(","(")
x=x.replace(")",")")
x=x.replace("[","(")
x=x.replace("]",")")
x=x.replace("【","(")
x=x.replace("】",")")
x=x.replace(";",";")
x=x.replace("[","(")
x=x.replace("]",")")
x=x.replace(" ","")###首先除空格在除逗号
x=re.sub("[a-zA-ZXx]+万元","XX元",x)
x=re.sub("\d+(\.)\d+",lambda x:(x.group()).replace(".","_"),x)
x=re.sub("\d+(.)\d+",lambda x:(x.group()).replace(".","_"),x)
x=re.sub("\d+(,)\d+",lambda x:(x.group()).replace(",",""),x)
x=re.sub("\d+(,)\d+",lambda x:(x.group()).replace(",",""),x)
return x
def cn2dig(cn):
cn = cn.group()
if set(cn[0]).issubset(set([u'零',u'0',u'〇',u'.',u','] + list(u'兆亿億萬万仟千佰百角分里厘毫元圆'))):
cn = cn[1:]
CN_NUM = {u'〇' : 0,u'一' : 1,u'二' : 2,u'三' : 3,u'四' : 4,u'五' : 5,u'六' : 6,u'七' : 7,u'八' : 8,u'九' : 9,
u'零' : 0,u'壹' : 1,u'贰' : 2,u'叁' : 3,u'肆' : 4,u'伍' : 5,u'陆' : 6,u'柒' : 7,u'捌' : 8,u'玖' : 9,
u'貮' : 2,u'两' : 2,
}
CN_UNIT = {u'毫' : 0.0001,u'厘' : 0.001,u'分' : 0.01,u'角' : 0.1,u'元' : 1,u'圆' : 1,
u'十' : 10,u'拾' : 10,u'百' : 100,u'佰' : 100,u'千' : 1000,u'仟' : 1000,
u'万' : 10000,u'萬' : 10000,u'亿' : 100000000,u'億' : 100000000,u'兆' : 1000000000000,
}
CN_DIGITS = [str(i) for i in range(10)] # 用来判断单纯数字,当然包括小数在内 可以直接返回,如123.00元,123元
CN_DIGITS.append(u"元")
CN_DIGITS.append(u".")
CN_DIGITS_C = [str(i) for i in range(10)] # 用科学计数法来存储的数字,比如12,765,89元
CN_DIGITS_C.extend([u",",u"元",u"."])
CN_DIG_CHAR = [str(i) for i in range(10)] # 处理数字加大小写的数字 比如:123万元,1亿元,2.3亿元
CN_DIG_CHAR.extend([u"元",u".",u",",u"万",u"千",u"百",u"十",u"十万",u"百万",u"千万",u"亿",u"兆"])
CN_DIG_CHAR_DICT = {u"万":10000,u"千":1000,u"百":100,u"十":10,u"十万":100000,u"百万":1000000,u"千万":10000000,u"亿":100000000,u'十亿':1000000000,u'百亿':10000000000,u'千亿':10000000000,u'万亿':1000000000000,u"兆":1000000000000}
# CN_DIG_CHAR_DICT = {u"万":10000,u"千":1000,u"百":100,u"十":10,u"十万":100000,u"百万":1000000,u"千万":10000000,u"亿":100000000,u"兆":1000000000000}
CN_ALL = list(CN_NUM.keys()) + list(CN_UNIT.keys()) # 用大写小写存储的数字
if set(cn).issubset(set(CN_ALL)):
lcn = list(cn) # 将cn拆分为列表
unit = 0 #当前的单位
ldig = []#临时数组
while lcn:
cndig = lcn.pop() # 从cn最后一个开始
if cndig in CN_UNIT: # 对分离出的进行单位判断
unit = CN_UNIT.get(cndig)
if unit==10000:
ldig.append('w') #标示万位
unit = 1
elif unit==100000000:
ldig.append('y') #标示亿位
unit = 1
elif unit==1000000000000:#标示兆位
ldig.append('z')
unit = 1
else: ## 否则进行数字判断
dig = CN_NUM.get(cndig)
if unit: # 计算每一个单位的数 比如 四百部分:4*100
dig = dig*unit
unit = 0
ldig.append(dig) # ldig 9 30 400 unit 10
if unit==10: ## 单独处理10-19的数字因为 此时十作为的是数字而不是单位
ldig.append(10)
ret = 0
tmp = 0
while ldig: # 对ldig中各部分数字进行叠加
x = ldig.pop()
if x=='w': # 单独对万进行处理,因为前面不可以直接相乘,下面同理
tmp *= 10000
ret += tmp
tmp=0
elif x=='y':
tmp *= 100000000
ret += tmp
tmp=0
elif x=='z':
ret += tmp
tmp=0
else:
tmp += x
ret += tmp
return str(ret)+u'元'
elif set(cn).issubset(set(CN_DIGITS)): ## 这种情况相当于为全为数字类型,可以直接返回
return cn
elif set(cn).issubset(set(CN_DIGITS_C)):
return ''.join([i for i in list(cn) if i != ','])
elif set(cn).issubset(set(CN_DIG_CHAR)): ## 对形如1.34万元进行转换
if re.search(".*?(?=\d)",cn) is None:# 处理数字的前缀部分
cn_pre = ''
else:
cn_pre = re.search(".*?(?=\d)",cn).group()
cn = re.search("\d.*元",cn).group()
cn_l = re.search("^\d[\d,\.]*\d?",cn).group() # 截取数字部分
cn_l = ''.join([i for i in list(cn_l) if i != ',']) # 去逗号
cn_l = float(cn_l)
cn_m = re.search("[^\d元\.,]{1,2}",cn)
if cn_m is None:
return cn_pre+cn
else:
return cn_pre+str(cn_l*CN_DIG_CHAR_DICT.get(cn_m.group()))+"元"
else:
pass
return "no"
"""
除去没有元的括号的内容
"""
def Deletesp(x):
x1=re.findall("\([^\(元]*\)",x)
y1=re.findall("《[^《》]*》",x)
for i in x1:
x=x.replace(i,"")
for j in y1:
x=x.replace(j,"")
return x
#一般的括号去重主要是钱的去重
def Delsp(i):
t1='[;;,\.。,、::]?[^;;,\.。,、::\(\)]*元[^;;,\.。,、::]*(\([^\(\)]*元[^\(]*[\)])[^\(\)]*?[;;,\.。,、::]'
k1='[;;,\.。,、::]?[^;;,\.。,、::]*(\([^\(\)]*元[^\(]*[\)])[^\(\);;,\.。,、::]*元[^\(\)]*?[;;,\.。,、::]'
x=re.findall(t1,i)
y=re.findall(k1,i)
s=x+y
l=len(s)
j=0
while j<l:
i=i.replace(s[j],"")
j=j+1
return i
#
#1)处理特殊符号元的数据 并且的替换到的是数据的问题
#把单价和价格的去掉
#
def deal_money(x):
x=re.sub("[0-9\.一二三四五六七八九十百千万亿]+元等不等的价格","",x)
rm1=re.findall("¥([0-9]+\.?[0-9]*)[^元0-9\.]",x)
###输出数据读额
while rm1:
x=x.replace("¥"+rm1[0],rm1[0]+"元")
rm1=re.findall("¥([0-9]+\.?[0-9]*)[^元0-9\.]",x)
rm2=re.findall("([0-9]+\.?[0-9]*)¥",x)
while rm2:
rm2=rm2.group()
x=x.replace(rm2[0]+"¥",rm2[0]+"元")
###钱的结果数据问题
rm2=re.findall("([0-9]+\.?[0-9]*)¥",x)
return x
#
#1)美元、美金、欧元处理数据的钱
def deal_dollars(x):
####仅仅是对美元的转化数据
x1=re.search("(美元|美金)([0-9]+\.?[0-9]*)(元)",x,re.S)
while x1:
x1=x1.group()
####完成美元的转化问题数据
x2=str(round(float(x1[2:len(x1)-1])*7,3))+"元"
x=x.replace(x1,x2)
x1=re.search("(美元|美金)([0-9]+\.?[0-9]*)(元)",x,re.S)
x1=re.search("([0-9]+\.?[0-9]*)(美元)",x,re.S)
while x1:
x1=x1.group()
####完成美元的转化问题数据
x2=str(round(float(x1[:len(x1)-2])*7,3))+"元"
x=x.replace(x1,x2)
x1=re.search("([0-9]+\.?[0-9]*)(美元)",x,re.S)
###仅仅是美元的转化
####万美元的转换数据
x1=re.search("([0-9]+\.?[0-9]*)(万美元)",x,re.S)
while x1:
x1=x1.group()
x2=str(round(float(x1[:len(x1)-3]),3)*7*10000)+"元"
x=x.replace(x1,x2)
x1=re.search("([0-9]+\.?[0-9]*)(万美元)",x,re.S)
###欧元的计算的问题
x1=re.search("[0-9]+\.?[0-9]*(欧元)",x)
while x1:
x1=x1.group()
x2=str(round(float(x1[:len(x1)-2])*7.27,3))
x=x.replace(x1,x2)
x1=re.search("[0-9]+\.?[0-9]*(欧元)",x)
x1=re.findall("[^0-9\.]{2}[0-9]{1}元",x)
#####除一些被告原告的有元的数据
x1=re.findall("[^0-9\.]{2}[0-9]{1}元",x)
for j in x1:
x2=re.search("(损失|民币|罚款|开支)",j)
if not x2:
x=x.replace(j,"")
return x
"""
利息
"""
def Deal_interest(x):
x=re.sub("(每[件案]{1}(经济损失(含维权合理支出)人民币|经济损失|注册商标|)[0-9/.]+元)","",x)
reg_interest="(利息|违约金|复利|担保费|罚息|尚需支付|已支付|还需支付|还需付款|尚需付款)[\(][^\(\)]*?[\)]"
reg_Every="每.{0,3}[0-9]+\.?[0-9]*?元"
reg_deal="(扣除|余款|已支付|还需支付)[^;;,,。]*?[,;。;,]"
#####利息中关于本金的去重
x1=re.search(reg_interest,x,re.S)
while x1:
x1=x1.group()
x=x.replace(x1,"")
x1=re.search(reg_interest,x,re.S)
####利息中的每的重复
x2=re.search(reg_Every,x,re.S)
while x2:
x2=x2.group()
x=x2.replace(x2,"")
x2=re.search(reg_Every,x,re.S)
####关于扣除和余款方面的
x3=re.search(reg_deal,x,re.S)
while x3:
x3=x3.group()
x=x.replace(x3,"")
x3=re.search(reg_deal,x,re.S)
####处理一些非钱的数字【名字中的刘三元,以及六圆都有可能翻译成6元这个要具体的看文书看看能不能改变】
x4=re.search("、[0-9]元",x,re.S)
while x4:
x4=x4.group()
x=x.replace(x4,"")
x4=re.search("、[0-9]元",x,re.S)
x5 = re.search("以(人民币|价款)?[0-9]+\.?[0-9]*元为(本金|基数|限)",x,re.S)
while x5:
x5 = x5.group()
x = x.replace(x5,"")
x5 = re.search("以(人民币)?[0-9]+\.?[0-9]*元为(本金|基数|限)",x,re.S)
x6 = re.search("((总额)?超过|周年庆|按本金)[0-9]+\.?[0-9]*元",x,re.S)
while x6:
x6 = x6.group()
x = x.replace(x6,"")
x6 = re.search("((总额)?超过|周年庆|按总欠款|按本金|该)[0-9]+\.?[0-9]*元",x,re.S)
x7 = re.search("至被告退还原告保证金(人民币)?[0-9]+\.?[0-9]*元之日止",x,re.S)
while x7:
x7 = x7.group()
x = x.replace(x7,"")
x7 = re.search("至被告退还原告保证金(人民币)?[0-9]+\.?[0-9]*元之日止",x,re.S)
x8 = re.search(".*上述第一、二项相互抵顶",x,re.S)
while x8:
x8 = x8.group()
x = x.replace(x8,"")
x8 = re.search(".*上述第一、二项相互抵顶",x,re.S)
x9 = re.search("元/",x,re.S)
while x9:
x9 = x9.group()
x = x.replace(x9,"")
x9 = re.search("元/",x,re.S)
x10 = re.search("在(人民币)?[0-9]+\.?[0-9]*元(人民币)?的范围内",x,re.S)
while x10:
x10 = x10.group()
x = x.replace(x10,"")
x10 = re.search("在(人民币)?[0-9]+\.?[0-9]*元(人民币)?的范围(之)内",x,re.S)
x11 = re.search("损失(人民币)?([0-9]+\.?[0-9]*元)(人民币)?之[0-9]{1,2}%即[0-9]+\.?[0-9]*元",x,re.S)
while x11:
x11 = x11.group(2)
x = x.replace(x11,"")
x11 = re.search("损失(人民币)?([0-9]+\.?[0-9]*元)(人民币)?之[0-9]{1,2}%即[0-9]+\.?[0-9]*元",x,re.S)
x12 = re.search("租金[0-9]+\.?[0-9]*元(人民币)?从[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日起至实际支付日止的",x,re.S)
while x12:
x12 = x12.group()
x = x.replace(x12,"")
x12 = re.search("租金[0-9]+\.?[0-9]*元(人民币)?从[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日起至实际支付日止的",x,re.S)
return x
"""
#前期的规范化处理,数据读额
"""
def pre_regular(x):
x=x.replace(".",".")
x=re.sub("([0-9一二三四五六七八九十壹贰叁肆伍陆柒捌玖拾]+分(店|公司))","公司",x,flags=re.S)
x=x.replace("o","0")
x=x.replace("O","0")
return x
"""
恢复
"""
def Trans(x):
x=re.sub("[0-9]+(_)[0-9]+",lambda x:(x.group()).replace("_","."),x)
return x
"""
钱的规范化
"""
def Add_money(x):
x1=re.search("(损失赔偿|经济赔偿|经济损失|合理支出人民币|合理支出|合理费用|合理费用为|共计|人民币|损失|损失为|总记|共计|共合|合共|罚款|利息)([0-9]+\.[0-9]*[^元0-9\.])",x)
while x1:
x2=x1.group(1)
x3=x1.group(2)
x1=x1.group()
if "万" in x3:
x3=str(round(float(x3[:len(x3)-1]),8)*10000)+"元"
x3=x2+x3
x=x.replace(x1,x3)
else:
x3=x3[:len(x3)-1]+"元"+x3[len(x3)-1]
x3=x2+x3
x=x.replace(x1,x3)
x1=re.search("(合理费用|合理费用为|共计|人民币|损失|损失为|总记|共计|共合|合共|罚款|利息)([0-9]+\.[0-9]*[^元0-9\.])",x)
return x
####
"""
连带赔偿??? 连带问题???解释说明错误?
"""
def Deal_responsible(x):
x=re.sub("(对其中的[0-9/.]+元承担连带(赔偿|清偿|)责任)","",x)
x=re.sub("([^\n]*元[^。\n]*)([,,]{1}其中包括[^\n。]*)",lambda t:t.group(1),x,flags=re.S)
x=re.sub("(元)([,,]{1}(包含|包括)((?!(;|;|[一二三四五六七八九十0-9]{1}、|\n|。)).)*)",lambda t :t.group(1),x,flags=re.S)
return x
######################
#诉讼金额去重的主函数
#1)文书的规范化
#2)文书的合计、共计、总计、共合、合共、共、合
#4)合理费用有时候是一个共计的意思如何的能把他设定为共计的意思
def Deal_countMoney(x):
temp_money=[]###作为临时储存的位置
#a_money_pre="([^0-9\.X]{2})[0-9\.]+元"
a_key="(共计|合计|总计|共合|共|计).*?([0-9]+\.?[0-9]*)元"
x_money=re.findall("[;;。]?[^;;。]*[0-9]+\.?[0-9]*元.*?[;;。\r\n\t]",x)
x_count=0
l_money=0#最后的钱的个数
for i in x_money:
x_count=x_count+1
x1=re.findall("([0-9]+\.?[0-9]*)元",i)
xll_k=re.findall(a_key,i)
###一个元
if len(x1)==1:
temp_money.append(round(float(x1[0]),3))
###多个元的
elif len(x1)>1 and len(xll_k)==0:
Money=[round(float(b),3) for b in x1]
Money=sum(Money)
Money=round(Money,3)
temp_money.append(Money)
elif len(x1)==1 and len(xll_k)==1 and x_count!=len(x_money):
Money=round(float(x1[0]),3)
Money=str(Money)
temp_money.append(Money)
elif len(x1)>1 and len(xll_k)==1 and x_count!=len(x_money):
money=[round(float(b),3) for b in x1]
Money=round(sum(money),3)
if round(max(money)*2,3)==Money:
temp_money.append(max(money))
else:
temp_money.append(Money)
###只有一个合计的
elif len(x1)>1 and len(xll_k)==1 and x_count==len(x_money):
money=[round(float(b),3) for b in x1]
Money=sum(money)
Money=round(Money,3)
if round(max(money)*2,3)==Money:
temp_money.append(round(max(money),3))
elif max(money)==Money+sum(temp_money):
l_money=round(max(money),3)
else:
temp_money.append(Money)
elif len(x1)==2 and len(xll_k)==2 and x_count!=len(x_money):
money=[round(float(b),3) for b in x1]
Money=sum(money)
Money=round(Money,3)
temp_money.append(Money)
elif len(x1)==2 and len(xll_k)==2 and x_count==len(x_money):
money=[round(float(b),3) for b in x1]
Money=round((sum(money)+sum(temp_money)),3)
if Money==2*max(money):
l_money=max(money)
else:
temp_money.append(round(sum(money),3))
else:####要共计的钱XX+上后边的钱等于后边的共计的钱
money=[round(float(b),3) for b in x1]
m_l=[round(float(b[1]),3)for b in xll_k]
Max=max(money)
if Max*2==sum(money):
temp_money.append(Max)
elif Max*2==sum(money)+sum(temp_money)and x_count==len(x_money):
l_money=Max
elif sum(m_l)*2==sum(money) and x_count!=len(x_money):
temp_money.append(sum(m_l))
else:
temp_money.append(sum(money))
if l_money!=0:
return str(round(l_money,3))
else:
return str(round(sum(temp_money),3))
###注意小数点的保存后边三位小数点
def Mycount_(x):
a_key="(共计|合计|总计|共合|共|计).*?([0-9]+\.?[0-9]*)元"
x=x.replace("人民币","")
xll_k=re.findall(a_key,x)##合计的代码
#文书
x0=re.findall("[xa-zX某]+元",x,re.I)###不祥的
x1=re.findall("([0-9]+\.?[0-9]*)元",x)###提取出来元的集合
money=""
#unknown=re.search("(赔偿|返还|支付).{0,20}(经济损失|合理费用|合理支出|律师费|损失)",x)
if len(x0)>0:
return "不祥"
elif len(x1)==0:
return '0'
elif len(x1)==1:
return (x1[0])
elif len(x1)>1 and len(xll_k)==0:
temp_money=[round(float(b),3) for b in x1]
money=str(round(sum(temp_money),3))
return money
elif len(x1)==2 and len(xll_k)==2:
temp_money=[round(float(b),3) for b in x1]
money=str(round(sum(temp_money),3))
return money
####一个合计得如何进行操作
elif len(x1)>1 and len(xll_k)==1:
money=Deal_countMoney(x)
return money
###多个合计的话如何
elif len(x1)>2 and len(xll_k)>1 :
money=Deal_countMoney(x)
return money
###诉求金额
def Main_Money_law_suit(content):
content=re.sub("[\((]{1}[^\(()\)元钱。\n]{,30}[\))]{1}","",content)
k=["判令被告:","故请求判令1、","故请求判令一、","故诉请判令:","因此,请求判令","故起诉请求人民法院判令","请求判令:","故原告诉至本院,要求判令:","现起诉请求法院判令","请求法院依法判令被告:","原告吴伟敏的诉讼请求为:","请求判令被告:","综上,请求判令:","请求判令被告三峰公司:","请求判令二被告:","原告音集协向本院提出的诉讼请求","原告安徽隆平公司诉称","故原告起诉至法院,请求判决:","为此,原告诉至本院要求:","请求判令被告:","原告为此诉请法院判令:","特提起诉讼,请求判令被告:","故我公司要求.{2,15}:","现诉请法院要求:","故向贵院提起诉讼,请支持:","其诉讼请求为:","请求法院:","现诉请法院:","故我公司要求.{3,20}向我","故我公司要求我乐公司:","故我.{2,}?要求.{3,15}向我","故我协会要求阿里巴巴公司:","现我要求联华快客公司:","现我请求法院判令.{3,15}赔偿","故我要求不二家公司向我支付","我公司要求闽龙轩公司:","我公司现要求糖潮公司我公司现要求银马休闲第二分公司::","要求骆驼公司和奥特莱斯购物中心:","现我公司要求捷报互动公司和酷粉网络公司:","故我请求法院解除双方签订的,要求铂洛德公司","故根据关于单方解除合同的规定,我请求法院判令","故我公司诉之法院,请求法院判令解除双方之间签订的并判令乾像时空","我公司请求法院判令纳森新源科技公司现我公司要求苏珊珊:","我公司现请求法院判令张智安","现我请求法院撤销双方签订的,要求铂洛德公司","现我根据的规定请求法院解除双方签订的,要求铂洛德公司","我现请求法院确认双方因无法实现合同目的而解除,并判令奥科视清公司","故我诉至法院请求解除我与铂洛德公司","故我协会要求北青网公司:","我请求法院撤销双方签订的,要求铂洛德公司","故我请求法院解除双方签订的,要求施德佳公司","现我公司起诉要求新传在线公司","故我诉至法院请求撤销我与铂洛德公司","现我公司起诉要求新传在线公司","故我社要求京东公司、广东音像公司、京凰公司:","故我公司起诉要求绿野影视","现我公司诉至法院请求判令阿里巴巴","现我公司起诉要求风网信息公司","现我公司起诉要求风网信息公司立即停止侵权行为、","故诉至法院,请求判令时越公司","故我社诉至法院请求判令育才图书公司","现我公司起诉要求海圣康","我公司现要求海圣康泰公司:","故我诉至法院请求判令风火云公司","东方家园公司已经构成了根本违约,故我公司请求","现我公司诉至法院请求判令解除我公司与思杰乐创公司","现我公司请求法院判令银星英派公司支付","。我要求中医药出版社、","为此,我公司要求五八公司:",",故我诉至法院请求依法解除我与极鳌公司","故我公司现诉至法院请求判令博集天卷公司","现我中心诉至法院请求判令锐邦公司支付我","为此,我诉至法院请求判令天盈九州公司","故我要求人民网公司在人民网","现我诉至法院请求判令锦辉富业","故我诉至法院请求判令幸福蓝海","综上,我诉至法院请求判令幸福蓝海","我公司请求法院判令凯旋万嘉公司","现我公司诉至法院请求确认双方所签订的已经解除,并判令大安映画","现我公司诉至法院请求判令解除双方所签,笑游公司返还","我请求法院判令白小红、家和家美家居公司:","故我要求人民网公司立即停止使用我享有著作权的漫画作品,在人民网上刊","现我公司诉至法院请求判令天凡","现我公司要求东方昌喜公司","鉴于上述原因,现我诉至法院请求判令","故我公司请求法院判令光影长河","提请人民法院判决:","请求人民法院依法判决:","诉至法院请求判令二被告:","故我公司请求法院判令祥云医院公司:","故我公司请求法院判令凯盈公司、天猫公司:","故我公司请求法院判令国德公司、天猫公司:","故我公司起诉至法院请求判令","。我公司请求法院判令多米在线公司、好音","庭审中诉请:","故我公司要求酷溜网公司停止侵权","故我请求法院判令彭立冲:","故我公司提起诉讼,请求法院判令如下:","故我公司提起诉讼,请求法院判令如下:","故我公司提起诉讼,请求法院判令如下:","故我公司要求和讯公司停止侵权,赔偿我公司经济","原告据此请求法院判令被告:","故诉请被告停止对原告著作权的侵害","请求法院适用\"多余指定原则\"判令:","以及我国有关民事司法政策之规定,判令:","故诉至本院,要求依据我国","诉诸法院,要求:","原告要求追加赵某某、张某某为本案被告参加诉讼,并要求:","我现请求法院解除双方的合同,要求藏今阁公司","现我要求宏章文化公司","现我公司要求金好风公司、金英马公司继续","故我校现请求法院判令尚德慧公司:","我公司要求豆网公司","现我公司诉至法院请求判令中视丰","现我公司诉至法院请求判令丹颜","故我要求尚德汇智公司","故我要求尚德汇智公司向我公开赔礼道歉,","故我方要求东方昂泰公司立即停止侵权行为","故我公司要求英氏营养食品公司","故我起诉请求法院解除双方签订的","工业大学、马重芳已经构成了违约,我公司请求法院","我公司要求:","我要求必尔得公司:","天海金时公司违反了合同约定,故我公司请求法院","为此,我公司请求法院判令解除双方签订的","故我公司现要求:","现我起诉要求法迪公司","故我公司要求赖一凡","故我向法院提起诉讼,要求判令","现我公司要求博纳西亚公司","现我公司要求京乐福祥公司","故我公司请求法院判令解除双方的,要求恩氏","现我公司要求京东电子商务公司:","故我诉至法院请求判令玻艺坊公司","故我诉至法院请求判令玻艺","故我公司要求酷溜网","现我公司诉至法院请求判令金领艺","为此,我公司诉至法院,请求确认我公司","故我诉至法院请求解除我与大众文艺","我公司要求麦德龙京顺路商场:","故我诉至法院请求判令撤销","故我诉至法院请求撤销我与水晶","现我公司要求周宝柱和万仝旭日公司:","现意林杂志社、杜务请求法院判令海南出版公司、盛世金图公司:","故我公司要求我乐公司和千钧公司:","故我公司要求我乐公司和千钧公司:","我公司要求恒德众宇公司:","特向法院提起诉讼,请求法院作出以下判决:","故诉请要求:","原告特向法院提起诉讼,并当庭明确如下诉讼请求:","故我现要求解除双方签订的,要求","故我请求法院解除双方签订的,并要求","现我公司要求三度空间公司","故我公司起诉要求指云时代公司:","因此,我公司诉至法院,请求","故我公司要求珍审合美公司朝阳水椎子店:","故我公司现要求少儿出版社公司、家乐福公司:","现我要求海鹰国信公司继续履行合同","现我公司要求尊尚壁公司:","故我公司要求瑞联文众公司:","故我公司要求任游网络公司:","故我公司现请求法院判令樊榕公司向","故我要求中国文联出版社","故我公司要求山合水易公司、要雁峥和郭帆:","我公司要求酷溜网公司立即停止侵权行为","故我公司要求北京盛世通公司停止","故我公司要求旅游卫视公司向我公司赔偿","故我公司要求旅游卫视公司向我公司赔偿","故我请求法院判令解除双方签订的,并要求","我请求法院解除双方签订的,要求大雄义谨公司","我请求法院判令解除双方签订的,并要求","故我公司要求华普公司立即停止","故我公司要求飘亮购物中心立即","故我公司要求洁美捷公司立即","故我公司要求华普公司立即","我公司要求阿里云公司和阿里巴巴公司:","故我公司现要求恩迪安达公司:","故我公司请求法院判令农业出版社:","诉请人民法院依法判令1.","原告特向法院提起诉讼,并当庭明确如下诉讼请求:","特向法院提起诉讼,请求法院作出以下判决:","原告特诉至人民法院,请求人民法院判决如下:","特向法院提起诉讼,请求法院作出以下判决:","原告特诉至人民法院,请求人民法院判决如下:","原告特诉至人民法院,请求人民法院判决如下:","原告特向法院提起诉讼,并当庭明确如下诉讼请求:","故请求人民法院依法判处被告:","原告提起诉讼,请求判令:","原告诉请法院判令被告:","故诉至本院,请求判令被告:","现诉至本院,要求判令两被告:","特提起诉讼,请求法院:","请求法院依法判令两被告:","据此,原告诉请判令两被告:","原告提起诉讼,请求判令:","特提起诉讼,请求法院:","据此,原告诉请判令两被告:","据此请求法院依法判决:","请求法院依法判令两被告:","故请求人民法院依法判处被告:","请求法院依法判令两被告:","故请求人民法院依法判处被告:","原告诉请法院判令被告:","特诉至本院,请求本院判令:","故诉至法院,诉讼请求:","特提起诉讼,请求法院:","特诉至本院,请求本院判令:","故诉至法院,诉讼请求:","故诉至本院,请求判令被告:","现诉至本院,要求判令两被告:","诉请人民法院依法判令1.","诉请人民法院依法判令1.","故诉至本院,请求判令被告:","现诉至本院,要求判令两被告:","据此,原告诉请判令两被告:","据此,原告诉请判令:","据此,原告诉请判令:","为此请求判令被告:","现起诉请求法院判令:","为此,诉请法院判令:","据此请求法院依法判决:","为此请求判令被告:","现起诉请求法院判令:","为此,诉请法院判令:","特诉至本院,请求本院判令:","故诉至法院,诉讼请求:","诉请人民法院依法判令1.","诉请人民法院依法判令1.","原告诉请法院判令被告:","原告提起诉讼,请求判令:","原告请求法院判决:","请求本院判令:","故起诉要求判令:","综上,请求法院:","故请求判令被告:","请求法院判决:","请求法院判令:1、","诉请判令:1.","请求法院判令:1","故请求依法判令:1、","请求依法判令被告:","特提起诉讼,请求:","请求:判令被告","请求法院判决:","为此,请求法院:","请求法院判令:","故请求判令被告:","请求判令两被告:1、","请贵院依法判令:","原告诉请判决被告:","故恳请法院:","请求法院判决:","原告请求法院判决:","请求本院判令:","故起诉要求判令:","综上,请求法院:","请求法院判令:1、","请求法院判令:1","故请求依法判令:1、","请求判决:1.","请求依法判令被告:","特提起诉讼,请求:","请求:判令被告","为此,请求法院:","据此,原告诉请判令:","特请求本院:","据此请求法院依法判决:","为此请求判令被告:","现起诉请求法院判令:","为此,诉请法院判令:","请求法院判令:","故请求判令被告:","请求判令两被告:1、","请贵院依法判令:","原告诉请判决被告:","故恳请法院:","请求本院判令:","故起诉要求判令:","综上,请求法院:","请求法院判令:1、","诉请判令:1.","请求法院判令:1","故请求依法判令:1、","请求判决:1.","请求依法判令被告:","特提起诉讼,请求:","请求:判令被告","为此,请求法院:","特请求本院:","请求法院判令:","请求判令两被告:1、","请贵院依法判令:","原告诉请判决被告:","故恳请法院:","特请求本院:","故原告向法院提起诉讼,请求判令:","我公司现请求法院判令霖林木公司:","故请求依法判令:","要求:1、被告停止侵权","要求:1、判令被告","原告诉至法院:","请求法院判令三基公司:","请求法院判令三基公司:","故提起诉讼:","原告微软公司诉请本院判令:","为此,特诉请人民法院:","为此,洪太公司诉请:","特请求法院判令二被告:","请求依照我国《民法通则》、《商标法》、《反不正当竞争法》等相关法律判令:","请求判令熔盛公司:","请求人民法院:","诉至法院,请求判定被告:","请求法院判处被告:","请求法院判处:","特提起诉讼,请求:","请求人民法院:","原告起诉请求法院判令:","请求判令1、","请求判令特惠美公司:","故起诉请求法院判令:","起诉要求判决:","故诉请判决:","为此,原告诉至法院,请求被告","综上,请求法院判令永迎公司:","据此,亚希公司诉请本院判令安俊宇:","据此,罗重恩诉请本院判令郑若刚:","据此,鲜琦诉请本院判令张艺谋:","据此,请求法院判令新世界科技公司:","综上,请求法院判令各被告:","综上,李思敏请求人民法院依法查明事实,判令:","现诉至法院,要求搜狐公司:","汉王公司请求本院判令:","现原告特诉至法院,诉讼请求如下:","诉讼请求如下:","故起诉要求法院判令:","向本院提出诉讼请求:","提出诉讼请求:","综上,请求法院判令泰金联公司","请求法院判令窦琴芬:","遂起诉法院要求判决:","原告据此请求本院判决","我公司要求迈视传媒公司:","综上,师爱琴请求法院判令","综上,原告请求人民法院判决","现起诉要求法院依法判决:","请求判令四被告:","我要求云端公司","请求判令两名被告:","故起诉至法院要求:","请求人民法院判决:","故我要求华动飞天公司:","故我要求创盟音乐公司、华动飞天公司:","故提起诉讼,诉请被告:","现原告向法院提起诉讼,请求判令","故要求:","向本院起诉,请求判令:","特起诉至法院,要求判令两被告:","诉至法院,请求判令:","请求本院判令:","原告要求法院:","故诉诸法院,要求:","故起诉,要求:","故请求本院判令:","两原告请求判令:","故原告请求本院判令:","原告依法诉至法院,要求判令四被告:","故起诉,要求","原告诉至法院请求","原告起诉要求:","故原告提起诉讼,要求法院判令被告:","起诉要求:","特向贵院提起诉讼,请求:","请求:","故诉诸法院,请求判令被告","现起诉,要求","故原告双盾公司诉请:","请法院支持原告的诉讼请求,判令确认被告","故提起诉讼,要求二被告:","故原告诉至法院,要求被告","原告起诉要求,","故向法院起诉,请求判令被告","要求判令:","现诉请:","故诉至法院,要求中国惠普","原告要求法院判令:","请求判令:","故请求法院判令被告光明日报社:","要求判令被告:","原告起诉至法院,要求","故原告诉至法院,请求判令被告","原告起诉要求被告","故请求法院求判令:","原告诉至本院,要求判令:","原告起诉至法院,要求:","故起诉至法院,要求判令:","诉至法院,要求","原告起诉,要求","据此,请求判决:","故原告起诉至法院,要求判令:","诉至法院,请求判令:","故原告请求判令被告:","东软公司请求本院判令:","同星公司请求本院判令:","请求人民法院:","维蒂亚公司请求本院判令:","原告请求本院判令:","故原告请求判令被告:","故原告起诉要求:","特提起诉讼,要求判令:","原告要求法院:","请求法院判令:","故起诉,要求判令两被告:","特请求法院判令二被告:","为此,原告依法向贵院起诉,","故诉讼法院判令:","故要求判令四被告:","原告依法诉至法院,要求法院判决:","故原告起诉要求:","现原告诉至本院,要求判令:","原告诉至法院,要求判令:","故原告诉至本院,要求判令:","原告起诉要求:","原告遂起诉,要求判令","故要求判令:","故起诉,要求:","故要求判令:","故原告诉至本院,要求判令:","请求法院:","故原告起诉要求:"," 请求判决:","原告神仆公司诉请判令","原告上海家化公司诉请判令:","诉至法院,请求:","故原告起诉要求:","故原告提起诉讼,要求:","故诉请本院判令","故起诉,要求三被告","故请求法院判令:","请求人民法院依法判令被告:","故原告诉至法院,请求判令被告","诉至人民法院,请求判令被告","故诉至法院,请求判令:","故我诉至法院请求判令解除双方之间达成的纪录片制作口头协议","现诉至法院要求:","请求人民法院依法判令","诉至法院,请求依法判令","请求人民法院依法判令:","特诉请法院判令两被告:","故诉诸法院,请求判令:","诉至法院,请求判令","为此,请求法院判令","请求依法判令三被告","因此请求法院依法判令","诉请法院依法判令被告:","诉请法院依法判令","诉至法院要求判令:","向法院提起诉讼,诉请:","请求判令交隧集团:","故我诉至法院,要求","现我协会提起诉讼","故我公司诉至法院","请求法院依法判令被告","请求人民法院判令","请贵院依法判决:","请求法院依法判令:","依法判令被告:","诉请人民法院判令","请求本院判令被告","故起诉,请求法院判令:","为此,原告起诉至法院要求:","请求人民法院判令三被告:","故诉至法院,请求判令书生公司:","原告请求本院判令两被告:","原告项百平起诉称:","原告康可公司起诉称:","故我公司诉至法院,请求判令","要求法院:","故原告诉至法院,请求判令被告","故起诉至法院,请求判令","我公司起诉至法院,请求判令","故我公司起诉至法院,请求判令","请求判令被告金三力公司:","向贵院提起诉讼,要求判令:","请求法院依法判令被告:","我要求网高网络公司","故我诉至法院,请求判令","故诉求法院判令被告","故我公司诉至法院,请求法院判令","故请求判令:","请求人民法院判令三被告","诉请判令被告泰格网吧","故原告诉请法院判令","故原告请求法院判令","故诉请法院要求判令被告:","原告请求法院判令被告:","原告诉请:","请求1、","为此向贵院提起诉讼","故诉至法院,请求判令","请求判令被告:","诉至法院,请求法院判令","诉请法院依法判令被告","请求法院判决:","故诉至法院请求判令:","原告请求判令:","请求法院判令被告:","诉请:","请求本院判令两被告","故起诉请求判令:","诉请本院判令:","请求法院判令:","提出诉讼请求:","故诉请法院判令:","原告请求法院判令:","故请求本院判令:","故起诉来院,要求两被告","原告请求法院判令","原告请求法院","诉请法院判令被告","故请求法院判令","诉至法院,请求判令 ","诉请判令被告:","请求法院判令被告","原告请求判令两被告","特向法院提起诉讼请求","现诉至法院,请求判令","故请求法院判令:","故诉至贵院要求判令","故诉请法院判令","请求法院判令二被告:","故提起诉讼,要求被告","据此,请求法院判令","故请求人民法院判令","请求人民法院判令:","请求法院判令二被告:","请求法院依法调解或判决", "诉至本院,请求判令被告: ","诉至本院,请求判令:","故原告诉请判令:","故请求法院:","现起诉要求","原告请求判令被告","请求法院判令二被告","请求依法判令被告:","故原告起诉请求:","诉请法院判令","请求判令被告:","请求判令:","请求依法判令:","提出诉讼请求:","原告诉讼请求:","起诉请求判令:","诉讼请求:","诉请判令:","原告现诉请:","请求判令被告","要求判令:","故此,请求判令,","现请求:","综上,诉请:","依法向贵院提起诉讼,请求贵院依法判决","故原告要求法院:","故原告要求:","两原告请求本院判令:","请求本院:","请求法院判令:","请求判令两被告:1、","请贵院依法判令:","原告诉请判决被告:","故恳请法院:","特请求本院:","诉请判令:1.","请求判决:1.","请求贵院:","要求判令:","请求贵院:","起诉要求:","请求贵院:","请求判令:","要求判令:","要求判令:","起诉要求:","起诉要求:","请求判令:","特提起诉讼,请求法院","请求判令三被告",",请求判令两被告","请求法院依法判令被告","故诉至法院请求判令","现要求二被告","请求判令两被告:","据此,要求:","诉请法院判决:","判令路慧向","请法院依法判令","请求判决:","起诉要求:","请求法院判令:","诉至法院判令:","诉请法院判令:","故起诉请求:","请求判令五被告停止","起诉请求判令","请法院判令","故诉至法院","依法判决:","请求判令停止侵权","故我起诉要求","故起诉要求:","请求判令:","请求依法判令","原告请求法院:","要求判令被告","原告请求:","请求法院依法判令","请求法院:","故请求判令:","原告起诉要求:","提起诉讼请求:","请求法院判令三基公司:",",故起诉:","请求判决被告","请求法院判令三被告","故请求:","请求判令七被告","诉请判令被告赔偿","请求法院判决","请法院判决","请求判令三被告:","特请求法院判令","故诉请法院:","请求法院判令被告支付我","遂向本院提起诉讼,请求判令被告","故诉请判令","故请求法院判令:","我协会提起诉讼","特请求法院:","请贵院依法判决","依法判令被告","诉至法院要求","诉请法院判决","故起诉要求","起诉称:","要求被告:","请求:","请求判决:","现我公司要求","请求法院判令","判令:","请求本院判决","判令二被告","请求本院判令","故我公司要求","我社起诉要求","故原告诉请:","请求判令,二被告支付","请求判令两被告","请法院依法判决","请求判令两被告","请求判令美盛公司","要求法院判令","原告张小泉公司诉称,","原告网尚公司诉称:","请求判令两被告","判令高从武","据此,原告凯摩高公司诉求:","据此,原告农友公司诉求:","原告特诉至法院,请求法院确认","故原告要求四被告","故向人民法院提起诉讼,请求法院依法确认","请求法院.*判令","我协会要求","依法判令:","要求判令:","故起诉至法院,要求","故原告请求判令","请求我院判令两被告","请求:","故要求,","故原告为维护合法权益,诉请判令:","故我诉至法院请求判令中教星中心支付","故我公司请求法院判令易廷公司、中视泽一艺术中心和无限界公司:","起诉请求被告","故请求1、","故诉清法院判令被告","为此,诉请判令被告","判令被告:","请求判决","依法判决","请求判令停止","要求判令","起诉请求","故起诉:","故请求:","故我中心诉至法院,要求","据此,请求法院判令两被告","故诉清法院判令被告","故我们六人诉至法院,要求","故我社诉至法院,要求","故我工作室诉至法院,要求","特提起民事诉讼,要求新浪公司","现要求","请求判令,二被告支付","依法判令","判令被告","起诉要求","请求如下","诉请判令","请求判令","现起诉","原告请求","故起诉请求","故请求法院","故请求判令","起诉称","要求法院","判令:","要求被告","两被告:","两被告:","故原告诉请被告","现诉至法院,要求判决","请求法院确认原告","特诉至法院,请求","原告要求","故起诉","故我要求","故我社要求","故要求,","故诉至本院请求确认","故请求本院依法确认","原告兴发公司诉请:","请求确认","故我们二人诉至法院,要求","故我协会诉至法院,要求","我公司现诉至法院,要求","故我社诉至法院,要求","诉称:","故,我要求","现我再次起诉,要求",",故要求","判令:","故我诉至法院,请求",",我要求作家出版社","综上,天津神宁生物公司认为北京神宁科贸公司、北京圣安百草生物公司和陈如刚构成不正当竞争,要求","故,我公司要求","请求人民法院判决","故要求三七二一公司按照我公司的声明要求","故诉请法院,判令","因此我公司起诉,要求","因此我公司起诉,要求","故诉请:","故我公司起诉,要求","故请求被告支付","现我要求","请求刘长","判令马从凤","判令鑫陆玩具店","特诉至法院,请依法判处被告","请求依法判另1、","诉讼请求","并判令","诉请","故原告","诉称,"]
reg = "[零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九,\.兆亿億萬万仟千佰百十拾]{1,16}[元圆]{,1}[零0〇壹1一贰2两二貮叁3三肆4四伍5五陆6六柒7七捌8八玖9九角分里厘毫万]{0,9}[元角分里厘毫]{1}"
x=pre_regular(content)##前期的规范化处理
x=re.sub("[一二三四五六七八九十壹贰叁肆伍陆柒捌玖]{1}[元分厘]{1}公司","xx公司",x,flags=re.S)
reg0="(0\.[0-9]+)(万元)"
x=re.sub(reg0,lambda t :str(round(float(t.group(1))*10000,3))+"元",x)
x=re.sub("((人民币|经济损失|合理支出|合理费用|律师费)[0-9]+/.{,1}[0-9]*)([^0-9元]{1})",lambda t: t.group(1)+"元",x)
x=re.sub(reg,cn2dig,x)
reg0="(人民币[0-9/.]+)([^0-9/.元])"
x=re.sub(reg0,lambda t: t.group(1)+"元",x)
x=deal_money(x)###美元处理数据的问题
x=deal_dollars(x)###处理美元
x=Add_money(x)####添加元
x=cut_(x)
x1=match_multi_word(k,x)
x1.replace(":",":")
#print("match_multi_word:")
#x1=Remove(x1)
#print("Remove之后的"+x1)
#print("Remove数据的条数:"+x1)
x1=DeleteRepeat(x1)
x1=Deal_responsible(x1)
x=Deal_interest(x1)####除掉一些利息的混乱的赘余信息
x=ClaimStander(x)###规范文书中的一些问题
x=Deletesp(x)####除中括号的一些赘
x=Delsp(x) ####括号去重的问题
x=Trans(x)###小数点的恢复
money=Mycount_(x)
return money
content=i[2][2]+i[3][2]
money=Main_Money_law_suit(content)
return money
#?:?:: (要没有变更的。。。???这个怎么进行修改)诉讼请求为
#m= MainLawsuit1(L) | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/ronghui/Money/Appealmoney1.py | Appealmoney1.py |
import re
def MainCut2(Content):
def Cut2_All_Judgment(x):
Rex="判决如下|判决:|改判如下:|判决[一-龥]*:"
x1=x.split("\n")
st=""
xm=""
flag=False
for j in x1:
xm+=j+"\n"
x2=re.search(Rex,j)
fx=re.search("判决[^。,;]*(附图|附页):",j)
if x2 and not fx:
st=j+"\n"
flag=True
else:
st+=j+"\n"
x=xm.replace(st,"")
return x,st,flag
"""
处理
"""
def Deal2_1(i):
l_temp=[]
temp=i[1]
l_temp.append(i[0])
x=Stander(temp)
x1,temp,Flag=Cut2_All_Judgment(x)
if Flag:
rex="(诉讼请求为:|诉称:|辩称:|请求:|请求判决:|请求判决如下|请求判令:|请求法院(判令|判决)|^[\n]{,1}[^\n]{,20}不服)"
rex_="(诉讼请求为:|^[一-龥,、]*原告[一-龥]*诉称|^[^。]*向[一-龥]+院提起上诉称:|^[^。,,]{,20}称:|^[^。]*辩称:|^[^。]*诉(讼|)称:|请求:|请求判决:|请求判决如下|^[^。]*反诉称:|请求判令:|请求法院判令[一-龥]+:|请求法院判令:)"
x20=re.search("(^.*\n)((综上|关于赔偿数额)[^\n]*(判决:|判决如下|原审法院依照《最高人民法院)[^\n]*\n|原审法院依照《中华人民[^\n。]*(判决:|判决如下)[^\n]*\n)",x1,re.S)
Rex20=re.search("综上[^\n]*作出\([0-9]+\)[^。,,\n]*行政判决[^。\n,,]*[,,](判决:|判决如下)",x1,re.S)
x2=re.search("(^.*?\n)(((综上[^\n]*依[照据]{1}《中华人民共和国[^\n]*(受理费|诉讼请求不予支持|承担连带))|(综上|根据|依照|据此|[一-龥]+法院)[^\n]*(判决:|判决如下)|([原一]{1}审法院遂依照)[^\n]*《[^\n]*(判决:|判决如下)|[一原]{1}审法院(遂|)判决:|[一原]{1}审法院据此(判决:|判决如下))[^\n]*\n)",x1,re.S)
x21=re.search("(^.*?\n)([^\n]*(判决:|判决如下|原审法院据此判决:|原审法院判决:|一审法院判决:)[^\n]*\n)([^\n。]*(不服|判决后|宣判后|原审判决后,|一审宣判后,|如果被告未按本判决|上诉称:|向本院提起上诉,))",x1,re.S)
x3=re.search("""(^.*\n)((([一原]{1}审法院认为|本院认为)[^\n]*(判决:|判决如下)|[^\n]*[一原]{1}审法院(遂|)判决:|[一原]{1}审庭审中[^\n]*(判决:|判决如下)|(因此|)[.]{1}[原一]审法院(据此|)(依|根|跟)(照|据)[^\n]|[^0-9a-zA-Z\n。]*法院(跟|根|依)(照|据))[^\n]*[\n])""",x1,re.S)
x4=re.search("(^.*?\n)(((综上|原审法院判决认为)[^\n]*[依根据]{1}[此照据]{1}《中华人民共和国[^\n]*(案件受理费|判决驳回)|[^\n]*(判决:|判决如下)|[一二三四五六七八九十]{1}、[^\n]*(判决:|判决如下)|[^\n]*(综上|据此|根据)[^\n]*(判决:|判决如下))[^\n]*\n)",x1,re.S)
x5=re.search("(^.*?\n)([^\n]*(遂|,|,)判决[^\n]*\n)([^\n。]*不服|判决后|宣判后|上诉称:)",x1,re.S)##这个是根据书第二段进行区分
rex4_=re.search("(^.*?\n)(([原一]{1}审法院判决认定:[^\n]*据此.原审法院依照《中华|[^\n]*判决如下[^\n]*|[^\n]*受理费[^\n]*|[^\n]*(综上,原审法院判决|原审法院判决|法院判决如下|综上,原审法院依照《中华人民共和[^。]*判决:|[^\n]*《中华人民共和国[^。]*,判决[^。]{1}|判决驳回|[^\n]*综上[^\n]*判决|综上,原审法院依据《中华人民共和国[^。]*(判令|判决)[^。]*|综上,依据《中华人民共和国[^\n]*判决|[,,]{1}判决[^\n]*)|[^。,,]{,10}法院判决认定[^\n]*(根|依|跟)(据|照)[^。]*判决[^\n]* |综上,原审法院判决:|[原一]审法院[^。]{,10}判决[^\n]*|判决:|判决如下|据此[^。]*(判决[^\n]*|判令上诉人向被上诉人赔偿经|据此判决:|依据《[^。]*判决:))[^\n]*[\n])",x1,re.S)
rex_4=re.search("(^.*?\n)([^\n,,。、;:]*法院判决认定[^\n](判决|判决如下)[^\n]*\n)",x1,re.S)
rex__=re.search("(^.*?\n)(据此,[^\n]*依照《中华[^\n]*原审法院判决[^\n]*\n)",x1,re.S)
if x2 or x3 or x4 or rex4_:
if x20 and not Rex20:
l_temp.append(x20.group(1))
l_temp.append(x20.group(2))
x3=x1.replace(x20.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(0)
return True,tuple(l_temp)
elif x2 and not (re.search(rex_,x2.group(2),re.S)) and '政判决书,判决:' not in x2.group(2):
l_temp.append(x2.group(1))
l_temp.append(x2.group(2))
x3=x1.replace(x2.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(1)
return True,tuple(l_temp)
elif x21 and not (re.search(rex_,x21.group(2),re.S)):
l_temp.append(x21.group(1))
l_temp.append(x21.group(2))
x3=x1.replace(x21.group(1),"")
x3=x3.replace(x21.group(2),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(2)
return True,tuple(l_temp)
elif x3 and not (re.search(rex_,x3.group(2),re.S)):
l_temp.append(x3.group(1))
l_temp.append(x3.group(2))
x3=x1.replace(x3.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(3)
return True,tuple(l_temp)
elif x4 and not (re.search(rex_,x4.group(2),re.S)):
l_temp.append(x4.group(1))
l_temp.append(x4.group(2))
x3=x1.replace(x4.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(4)
return True,tuple(l_temp)
elif rex4_ and not (re.search(rex_,rex4_.group(2),re.S)) and "原审法院判决查明:" not in rex4_.group(2):
l_temp.append(rex4_.group(1))
l_temp.append(rex4_.group(2))
x3=x1.replace(rex4_.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(5)
return True,tuple(l_temp)
###
elif rex_4:
l_temp.append(rex_4.group(1))
l_temp.append(rex_4.group(2))
x3=x1.replace(rex_4.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(6)
return True,tuple(l_temp)
###不服数据的数量
elif x5 and not (re.search(rex,x5.group(2),re.S)):
l_temp.append(x5.group(1))
l_temp.append(x5.group(2))
x3=x1.replace(x5.group(1),"")
x3=x3.replace(x5.group(2),"")
l_temp.append(x3)
l_temp.append(temp)
l_temp.append(7)
return True,tuple(l_temp)
elif rex__ and not (re.search(rex,rex__.group(),re.S)):
l_temp.append(rex__.group(1))
l_temp.append(rex__.group(2))
x3=x1.replace(rex__.group(),"")
l_temp.append(x3)
l_temp.append(temp)
l_m.append(8)
l_temp.append(8)
return True,tuple(l_temp)
else:
#l_11.append(x1)
return False,tuple(['wrong'])
else:
return False,tuple(['wrong'])
else:
#l_t.append(temp)
return False,tuple(['wrong'])
return False,tuple(['wrong'])
"""
1)文书落款
"""
def Cut2_Inscribe(x):
x1=re.search("(^.*?\n)([^\n]*(审判长|审判员|陪审员|书记员|法官助理|速记员|速录员).*)",x,re.S)
if x1:
x2=x1.group(2)
x1=x1.group(1)
else:
x1=x
x2=""
return x1,x2
"""
1)判决依据
2)判决主文
3)判决尾部
"""
def Cut2_judgment(x):
x1=re.search("(^.*(依照《[^\n。]*规定:\n|判决如下|判决:)(:|))(.*?\n)([^\n]*(本案诉讼费|如未按(本|)判决|受理费|如不服|可以在判决书送达之日起|诉讼费[^\n]*由[^。\n]*[负承]{1}担).*)",x,re.S)
x2=re.search("(^.*(依照《[^\n。]*规定:\n|判决如下|判决:)(:|))(.{3,})",x,re.S)
if x1:
x3=x1.group(5)
x2=x1.group(4)
x1=x1.group(1)
elif x2:
x3=x2.group(1)
x2=x.replace(x3,"")
x1=x3
x3=""
else:
x3=""
x2=""
x1=x
return x1,x2,x3
###
def to_sub3(content):
x1,x=Cut2_Inscribe(content)
x1,x2,x3=Cut2_judgment(x1)
return [x1,x2,x3,x]
"""
再次切分
"""
def deal2_repeat1(x):
m=[]
x1=re.search("(^[^。]*(本院认为|综上所述|综上).*。)([^。]*(判决:|判决如下).*)",x,re.S)
x2=re.search("(^.*。)([^。]*(依照|依据|据此).*(判决|判决如下)[:]{,1}.*)",x,re.S)
if x1:
x2=x1.group(1)
x1=x1.group(3)
m.append(x2)
m.append(x1)
elif x2:
x1=x2.group(1)
x2=x.replace(x1,"")
m.append(x1)
m.append(x2)
else:
m.append(x)
return m
"""
一审判决结果
"""
def deal2_repeat2(x):
m=[]
x1=re.search("(^.*(判决:|判决如下)[:]{,1})(.*)",x,re.S)
x2=re.search("(^.*(依照|据此|依据|根据)((?!(判决)).)*判决)(.*)",x,re.S)
x3=re.search("(^.*(依照|据此|依据|根据)((?!(如下:|规定,判令)).)*(如下:|判令))(.*)",x,re.S)
if x1:
x2=x1.group(3)
x1=x1.group(1)
m.append(x1)
m.append(x2)
elif x2:
x1=x2.group(1)
x2=x.replace(x1,"")
m.append(x1)
m.append(x2)
elif x3:
x1=x3.group(1)
x2=x.replace(x1,"")
m.append(x1)
m.append(x2)
else:
m.append(x)
return m
"""
分段下
上诉人诉称、被上诉人辩称、二审查明、二审本院认为、判决依据、判决主文、判决尾部、落款
"""
def Deal_to_page2(i):
rex21="(^[^。,、]{,5}不服,以专利复审委员会认|一审判决后,[^。]*提起上诉。|^[^。,]{,10}上诉理由为|^[^。]{2,10}公司以原审判决[^。]*向本院提起上诉,请求改判|^[^。,]{2,10}向本院提起上诉,请求改判|^[^。]*公司上诉认为,|^.{,5}公司不服,上诉认为|^法院提出上诉|^上诉人认为|^上诉人[^。,、]{,10}称|^[^。]{,10}{}不服,请求撤销原审判决|不服.*其上诉理由为:|^上诉请求[及和]{1}理由|二审(审理|)期间,上诉人提交了|上诉人[^。,,]*陈述为:|^判决后[^。]{,30}不服|提[起出]{1}上诉,请求依法改判|^[^。]{,20}公司向本院提起上诉|^[^。]{,30}公司上诉认为,|提起上诉,请求依法改判|向本院提起上诉,主要上诉理由是:|^上诉人[^。]*要求二审法院|上诉请求撤销.*上诉理由:|(向本院提起上诉|不服[原一]审判决).*(理由为:|理由如下:)|法院再审过程中,原审原告[^,,。]*称:|不服原审判决,上诉(至本院|请求)|上诉人[^。,,]*称:|不服原审判决,向本院上诉|(宣判后|一审判决宣告后).*(不服|提[出起]{1}上诉).*(其理由[为是]{1}:|)|提起上诉.*上诉理由为:|不服,上诉认为|^上诉人[^。]*提出|不服[^。]*提[出起]{1}上诉|向本院提出上诉.*其上诉请求是:|向本院提起上诉,请求改判|上诉提出:|不服[原一]{1}审判决[^。]*称:|二上诉人共同上诉请求|不服原审判决[^。]*提(起|出)上诉|二审庭审时称:|不服上述判决,向本院上诉.*理由是:|^原审法院判决后[^。]*不服提起上诉|不服原审法院判决.*上诉理由为:|上诉至本院.*理由为:|不服.*上诉理由是:|诉至一审法院称|宣判后[,,]{1}[一-龥]+提起上诉|^上诉请求和理由:|不服[^。]*向本院提起上诉|宣判后[^。]*其主要上诉理由为:|^[^。]*不服(原审|)判决,向本院提起上诉|请求判决:|^[^。]*不服判决,向本院提起上诉|^(原审|一审|)(判决后|宣判后|判决宣判后)[^。]*不服[^。]*(上诉|诉至本院|诉讼请求:)|上诉人[^。]*法院,请求判令.{,3}被告|^[^。]*上诉请求,撤销|向本院提出上诉:|上诉请求[^。]*.事实和理由:|^[^。]*诉至法院,请求判令|诉讼请求为,判令[一-龥]{,5}被告:|^上诉人[^。]*请求|向原审法院提起诉讼,请求:|请求法院:|上诉请求均为:|^[^。:;;:a-zA-Z]{,10}上诉请求.*(事实和理由|上诉理由为):|上诉认为:|起诉请求:|请求依法判令:|^[^。,:;;:a-zA-Z]*诉讼请求为,判令|^[^。,:;;:a-zA-Z]*不服一审判决|^[^。,,;:;]{,5}公司上诉请求|诉请法院判令:|^[^。,;:、?,]*公司上诉理由:|原审诉请判令:|提起诉讼,请求法院判决[一-龥、]+:|起诉,请求依法判令:|诉至[一-龥]+法院称:|诉至[原一]{1}审法院(,|)称:|^[^。]*提起诉讼请求判决|^[^。]*诉至原审法院,要求法院判令|^[^。]*请求原审法院判令|^[^。]*诉至原审法院,请求判令|^[^。]*向原审法院起诉,请求判令|^[^。]*向原审法院提起本案诉讼,请求判令|起诉称:|公司上诉请求:|上诉请求为:|上诉请求二审法院:|[^。]*上诉请求,如下:|[^。]*上诉请求撤销一审判决|^[^。]*不服[^。,,]*提起(行政|)诉讼|恳请人民法院查明事实,依法判决二被告|^[^。]*上诉请求:|^[^。]*,请求:|^[^。]*请求一审法院:|^[^。]*诉讼请求是:|^[^。]*诉至一审法院称|^[^。]*原审法院起诉[,,]请求:|^[^。]*起诉至[原一]{1}审法院称:|^[^。]*向[一原]{1}审法院起诉请求:|^[^。]*向[原一]{1}审法院提起诉讼|^原告诉请|请求原审法院判令:|^[^。]{,30}诉讼[.]{1}称|不服[^。]{,10}(裁|决)定[^。]{,10}向[^。]{,20}诉讼称|原告[^。]{,30}诉(讼|)称|诉讼请求为:|诉讼请求:|[^。,]{,10}向本院起诉要求:|原告[^\n。]{,30}(起诉认为|请求判令|诉称|诉讼请求|起诉请求)|诉称:|提出诉讼请求|请求判令:|请求.院判令|请求判令[^。,;]{,10}:|公司诉称|诉称|诉讼称|提出[^。,]{,5}诉讼请求})"
rex22="(^[^。]*[^不。]{1}服从原审判决|^被上诉人[^。,,、]*服从|^[^。,]{,30}(专利复审|商标评审)[^。\n]*服从[^。,、]{,2}判决|^[^。,]*面答辩意见称|被上诉人[^。]*提交答辩|被上诉人[^。]*答辩:|被上诉人[^。,,]*意见认为:|.*据此,被上诉人[一-龥]+请求本院|被上诉人[^。]*主要理由|^[^。]*答辩认为:|被上诉人[^。]{,20}(坚持|认为)[^。]{,20}(认定|意见)|^[^。;]{,30}(辩称|辨称|答辩|答辨|反诉称)|被告[^。]{,30}对原告所诉事实|被告对原告主张|被上诉人[^。]{,30}(没有到到庭|拒不到庭)|(被告|公司)[^。,](放弃抗[辨辩]{1}|未答[辩辨]{1}|未提出答辩意见))"
rex23="^二审第一次庭审中[^。]*证据:|^本院审理过程中[^。]*证据:|^[^。]*为支持其上诉主张,在二审中提交|^二审审理期间,上诉人提交|^[^。,]{,10}在二审期间提供如下证据:|原审法院判决后,双方当事人均未上诉|^二审庭审中|^二审审理期间,上诉人提交[^,。](材料|证据)|^本案二审中[^。]*提供了证据|^[^。]{,20}在二审期间向本院提交了如下证据:|^本院在二审期间[^。]*提交.{,1}证据|本院[^。]*予以确认|^本院.*查明如下事实:|经开庭审查,本院认可|本院[^。,、?]*证据予以确认|二审诉讼中均未向本院提交|二审诉讼中[^。]*均未向本院提交|二审[^,。、,]*均未提[交供]{1}|二审[^。]*均未提交|二审时均未提交证据|^[^。]*二审期间[^。]*双方当事人[^。,]*提交|双方当事人在二审期间均未提交新证据|^[^。]{,30}(二审庭审中,(原、被告|)当事人双方均未提交|本案二审期间,双方当事人提供以下证据:|本案二审期间.*质证|二审举证期限内|本院审核.*对原判认定的事实予以确认|质证意见:|当事人[^。,:;、]*送至本院|^在二审过程中[^。]*向本院提交了|^本案在二审审理期间|.*二审期间均未提供新证据|.*本院[^。,,;::、]*事实予以确认|在二审期间,向本院提交|本院经审查|二审经庭审查明|.*本院二审予以确认|.*本院对原审法院查明的事实予以确认|本案二审中.*提交新证据|二审确认一审查明的案件事实属实|本案在审理过程中|^二审诉讼过程|本院二审|.*经审查,一审判决认定的事实|本院经二审审理|.*本院予以确认|^对原审判决所查明|^经本院查明|^在二审(庭审|审理期间|期间)|^二审(中|期间)|本院经过审理|一审判决认定[^。]*本院依法予以确认|本院[^。]*对一审查明的事实|据上,本院二审认定:|本院认定的事实与[一原]{1}审认定的事实|.*经查,|.*其余查明事实与原审查明基本一致|经质证|.*二审查明|对原审判决所确认的法律事实[^。]*故本院[一-龥]+确认。|.*经本院审查|.*本院评判如下:|.*本院分析认定如下:|.*经庭审质证|经二审法院开庭审理,查明|二审所查明|.*另查明:|一审查明的事实属实,本院予以确认|.*本院对原判认定的事实予以确认|.*本院根据[^。]*认定:|^.{,1}本院[二庭]{1}审查明|.*二审查明的事实与原判认定一致|本案审理[^。]*本院予以确认|二审查明的事实与原判认定一致|.*本院审理查明的其他事实与原审判决认定事实基本一致。|根据原审及二审[^。]*本院确认原审法院认定的事实属实。|对原判认定的事实[^。]*本院予以确认。|本院经查|经[一-龥、]+质证|本院经审理后查明|本院认证如下|本院另查明|^本院经审查认为|^[^。]*经上述举证、质证,本院审查认为|^原审法院经审查查明|.*判决所认定的基本事实是:|原审法院审理查.{,2}:|[一原]{1}审判决查明:|[一原]{1}审查明[,,:]{1}|^[原一]{1}审法院经审查查明:|.*判决查明以下事实:|^.{,3}原判认定:|^原审主要查明,|原审法院认证认为:|^[一-龥]+提交证据查明:|原判确认以下案件事实:|一审查明事实如下:|^原审判决确认:|^[一-龥]*一审查明:|^原审法院根据对证据的分析认证,认定事实如下:|^一审判决查明法律事实如下:|^查明,|^原审判决查明[^。、?,a-zA-Z,;:;:]{,10}:|^原判决认定的基本事实是:|^原审法院经查明|^[一-龥]+法院/([一-龥]+/)审理查明|^原判决查明:|[原一]{1}审法院审理过程[^。]*经[一-龥]*质证:|[原一]{1}审法院根据[一-龥、]+,依法查明以下事实:|^原审判决查明:|^一审法院认定事实:|^[一-龥]法院认定:|^原审法院经查明:|^原审法院认定:|^原判确认的法律事实是:|^[一-龥]+法院\([0-9]+\)[一-龥]+第[0-9]+号[一-龥]+判决认定:|^[一原]{1}审判决查明,|^原审法院确认事实如下:|^原审法院根据[一-龥、]+提交的证据认定:|^原审法院确认如下法律事实:|^[一-龥\(\)]+审理后认定:|^北京市第一中级人民认定:|.*原审均认定:|^原初审认定|[^。]*,故二审予以确认:|^原审法院确认的法律事实是:|^[原一]{1}审法院[^,。,;]*以下事实无异议:|^原判查明|^[原一]{1}审认定|^[原一]{1}审查明|^[一-龥\(\)]+法院[一-龥\(\)]*判决查明|^[一-龥\(\)]+法院[一-龥\(\)]*审理认定|^[原一]{1}审法院判决查明|^[原一]{1}审法院经确认事实|^[原一]{1}审法院审理认定|^[原一]{1}审判决认定|^[原一]{1}审判决审查认定|[一原]{1}审期间[一-龥\(\),,]+原判予以认定:|^[一-龥\(\)]+法院查明|^[一-龥\(\)]+法院[一-龥\(\)]*判决认定|^[原一]{1}审法院查明|^原审法院根据[^。]*(举证|庭审质证|法庭认证)[^。]*认定以下事实:|^[一-龥]+审理查明|^查明以下案件事实|^经庭审质证|^还查明,|本院对案件事实认定如下|^本院确定以下与本案有关的事实|^本院对本案证据认证如下|^查明以下事实|^根据[^。]{,30}(陈述|证据),本院(确认如下事实|认定事实如下|^认定如下事实|确认事实如下)|^查明:|^本院经查|^本院依法认定本案事实如下|^本院确认本案事实如下|本院根据上述[^。]{,30}确认以下事实|经审理|^本院经查|^根据上述[^。]{,30}本院确认以下事实|^经审理查明|^经庭审[^。,]{,5}比对|^一审查明|^经审查(?!(,本院认为|,本院认为))+|^本院查明|^另查明|^本院审理查明|^本案相关事实|^[^。]{,30}本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下))"
Rex24="(焦点:|争议的焦点[,,]本院评析如下:|本案(二审|)(的|)(主要|)(争议|涉及)(的|)焦点|根据.*本案争议焦点|争议焦点.*分析认定如下:|本院认为本案争议的焦点|本案的争议焦点问题|争议(的|)焦点[^。,:;?]*:|^本院认为|焦点(为|是|如下|即为|问题|问题是|问题为):|本院认为:|主要争议问题是:)"
rex24="^.{,30}(本案(二审|)(的|)(主要|)(争议|涉及)(的|以下)焦点|根据.*本案争议焦点|本院认为:|^本院认为|.*主要争议问题是:|.*双方当事人在二审中[^。,;:;:]*向本院提交|^根据.*认定事实正确|本案争议的关键问题是|本院经审理认为:|关于争议焦点|.*二审争议焦点即为:|.*本案的争议焦点在于|.*本院认为|.*二审的争议焦点即为:|本案的争议焦点[^。,]*:|.*焦点(在于|是|为|如下|归纳为:|问题是)[:,,]{1}|.*争议(的|)焦点(如下:|是|为|如)|本案的主要争议焦点|.*争议的焦点为:|.*本案诉讼争议的焦点是:|.*本案二审的争议焦点在于:|^本院再审认为|.*争议焦点(为|是|):|综合[一-龥]+上诉理由及答辩意见,[一-龥]+争议焦点为:|^本院认为:|^原审法院经审理认为:|^根据上述事实,一审法院认为:|^原初审认为|^[一-龥\(\)]+法院[一-龥\(\)]*判决认为|[^。]*法院经审理认为:|^[一原]{1}审法院审理认为|^[一原]{1}审判决认为|^[一-龥\(\),,]*一审法院经审理认为|^[一-龥\(\)]+法院判决认为|法院一审认为|本院认证认为|本院认为|[一原]{1}审认为|^[原一]{1}审法院认为|^原判认[定为]{1}|^[一-龥]+法院认[定为]{1})"
start=0
one_pg=1
two_pg=2
thr_pg=3
fou_pg=4
K={"1":rex21,"2":rex22,"3":rex23,"4":rex24}
x=i[3]
temp=x.split("\n")
state=start
st=""
l=[]
num=0
st=""
num=1
for j in temp:
x=K[str(num)]
x1=re.search(x,j)
if state==start:
x1=re.search(rex21,j)
x2=re.search(rex22,j)
x3=re.search(rex23,j)
x4=re.search(rex24,j)
X4=re.search(Rex24,j)
if x1:
l.append(st)
st=j+"\n"
state=1
elif x2 and not X4:
l.append(st)
st=j+"\n"
state=2
l.append('')
elif x3 and not X4:
l.append(st)
st=j+"\n"
state=thr_pg
l.extend(['',''])
elif x4 or X4:
l.append(st)
st=j+"\n"
state=fou_pg
l.extend(['','',''])
else:
st+=j+"\n"
elif state==one_pg:
x2=re.search(rex22,j)
x3=re.search(rex23,j)
x4=re.search(rex24,j)
X4=re.search(Rex24,j)
if x2 and not X4:
l.append(st)
st=j+"\n"
state=two_pg
elif x3 and not X4:
l.append(st)
st=j+"\n"
state=thr_pg
l.append('')
elif x4 or X4:
l.append(st)
st=j+"\n"
state=fou_pg
l.append('')
l.append('')
else:
st+=j+"\n"
elif state==two_pg:
Rex23=re.search("^上诉人[^。,]*(提交|证据|证明)",j)
x3=re.search(rex23,j)
x4=re.search(rex24,j)
X4=re.search(Rex24,j)
if (x3 or Rex23) and not X4:
l.append(st)
st=j+"\n"
state=thr_pg
elif x4 or X4:
l.append(st)
l.append('')
st=j+"\n"
state=fou_pg
else:
st+=j+"\n"
elif state==thr_pg:
x4=re.search(rex24,j)
X4=re.search(Rex24,j)
if x4 or X4:
l.append(st)
st=j+"\n"
state=fou_pg
else:
st+=j+"\n"
elif state==fou_pg:
st+=j+"\n"
l.append(st)
if len(l)==5:
l_fr=to_sub3(i[4])
l.extend(l_fr)
elif len(l)==4:
l_fr=to_sub3(i[4])
temp=l_fr[0]
temp=deal2_repeat1(temp)
### 列表的操作
if len(temp)==2:
l_fr[0]=temp[1]
l.append(temp[0])
l.extend(l_fr)
else:
l.append('')
l.extend(l_fr)
elif len(l)==3:
l_fr=to_sub3(i[4])
temp=l_fr[0]
temp=deal2_repeat1(temp)
### 列表的操作
if len(temp)==2:
l_fr[0]=temp[1]
l.append('')
l.append(temp[0])
l.extend(l_fr)
else:
l.extend(['',''])
l.extend(l_fr)
else:
l.append("")
return l
"""
处理部分特殊文书
"""
def Deal2_TrialProcess(x):
x1=x.split("\n")
st=""
for i in x1:
if len(i)>5 and i[len(i)-1]!="。":
st+=i
else:
st+=i+"\n"
if st=="":
st=x
elif st[len(st)-1]!="\n":
st=st+"\n"
return st
"""
1)标题【文书号和法院】
2)当事人信息
3)审理经过
4)原告诉称
5)被告辩称
6)审理查明
7)本院认为
"""
def Deal2_to_page1(i):
import time
now=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
rex1="([^0-9、]*[0-9Xx]{2,}[^0-9]{1}[^。,、]+[0-9-、Xx]+号|第[0-9]+号)"
rex2="专利申请权纠纷一案|案由:|审理终结|审理完结|审理完毕|审理结|当庭宣告判决|依法组成合议庭|侵[害犯权]{1}[^\n。]{,30}纠纷一案|无正当理由拒不到庭|被告[^。,,]{,20}(拒|)不到庭|服务合同纠纷一案"
rex3="(^[^。,、]{,5}不服|原告[^。]*法院,请求判令.{,3}被告|^[^。]*上诉请求,撤销|向本院提出上诉:|上诉请求[^。]*.事实和理由:|^[^。]*诉至法院,请求判令|诉讼请求为,判令.被告:|^上诉人[^。]*请求|向原审法院提起诉讼,请求:|请求法院:|上诉请求均为:|^[^。:;;:a-zA-Z]{,10}上诉请求.*(事实和理由|上诉理由为):|上诉认为:|起诉请求:|请求依法判令:|^[^。,:;;:a-zA-Z]*诉讼请求为,判令|^[^。,:;;:a-zA-Z]*不服一审判决|^[^。,,;:;]{,5}公司上诉请求|诉请法院判令:|^[^。,;:、?,]*公司上诉理由:|原审诉请判令:|提起诉讼,请求法院判决[一-龥、]+:|起诉,请求依法判令:|诉至[一-龥]+法院称:|诉至[原一]{1}审法院(,|)称:|^[^。]*提起诉讼请求判决|^[^。]*诉至原审法院,要求法院判令|^[^。]*请求原审法院判令|^[^。]*诉至原审法院,请求判令|^[^。]*向原审法院起诉,请求判令|^[^。]*向原审法院提起本案诉讼,请求判令|起诉称:|公司上诉请求:|上诉请求为:|上诉请求二审法院:|[^。]*上诉请求,如下:|[^。]*上诉请求撤销一审判决|^[^。]*不服[^。,,]*提起(行政|)诉讼|恳请人民法院查明事实,依法判决二被告|^[^。]*上诉请求:|^[^。]*,请求:|^[^。]*请求一审法院:|^[^。]*诉讼请求是:|^[^。]*诉至一审法院称|^[^。]*原审法院起诉[,,]请求:|^[^。]*起诉至[原一]{1}审法院称:|^[^。]*向[一原]{1}审法院起诉请求:|^[^。]*向[原一]{1}审法院提起诉讼|^原告诉请|请求原审法院判令:|^[^。]{,30}诉讼[.]{1}称|不服[^。]{,10}(裁|决)定[^。]{,10}向[^。]{,20}诉讼称|原告[^。]{,30}诉(讼|)称|诉讼请求为:|诉讼请求:|[^。,]{,10}向本院起诉要求:|原告[^\n。]{,30}(起诉认为|请求判令|诉称|诉讼请求|起诉请求)|诉称:|提出诉讼请求|请求判令:|请求.院判令|请求判令[^。,;]{,10}:|公司诉称|诉称|诉讼称|提出[^。,]{,5}诉讼请求})"
rex4="(答辩认为:|被告[^。]{,20}(坚持|认为)[^。]{,20}(认定|意见)|^[^。;]{,30}(辩称|辨称|答辩|答辨|反诉称)|被告[^。]{,30}对原告所诉事实|被告对原告主张|被告[^。]{,30}(没有到到庭|拒不到庭)|(被告|公司)[^。,](放弃抗[辨辩]{1}|未答[辩辨]{1}|未提出答辩意见))"
rex5="^[^。]{,30}(.*认定如下:|[一原]{1}审法院确认以下事实:|^一审法院对双|一审法院认定事实:|.*分析认定如下:|^原审法院经审查查明|.*判决所认定的基本事实是:|原审法院审理查.{,2}:|[一原]{1}审判决查明:|[一原]{1}审查明[,,:]{1}|^[原一]{1}审法院经审查查明:|.*判决查明以下事实:|^.{,3}原判认定:|^原审主要查明,|原审法院认证认为:|^[一-龥]+提交证据查明:|原判确认以下案件事实:|一审查明事实如下:|^原审判决确认:|^[一-龥]*一审查明:|.*认定事实如下:|^一审判决查明法律事实如下:|^查明,|^原审判决查明[^。、?,a-zA-Z,;:;:]{,10}:|^原判决认定的基本事实是:|^原审法院经查明|^[一-龥]+法院/([一-龥]+/)审理查明|^原判决查明:|[原一]{1}审法院审理过程[^。]*经[一-龥]+质证:|[原一]{1}审法院根据[一-龥、]+,依法查明以下事实:|^原审判决查明:|^一审法院认定事实:|^[一-龥]法院认定:|^.审法院经查明:|^原审法院认定:|^原判确认的法律事实是:| ^[一原]{1}审判决查明|.*确认事实如下:|^原审法院根据[一-龥、]+提交的证据认定:|^原审法院确认如下法律事实:|^[一-龥\(\)]+审理后认定:|^北京市第一中级人民认定:|.*原审均认定:|^原初审认定|[^。]*,故二审予以确认:|.*确认的法律事实是:|^[原一]{1}审法院[^,。,;]*以下事实无异议:|^原判查明|^[原一]{1}审认定|^[原一]{1}审查明|^[一-龥\(\)]+法院[一-龥\(\)]*判决查明|^[一-龥\(\)]+法院[一-龥\(\)]*审理认定|^[原一]{1}审法院判决查明|^[原一]{1}审法院经确认事实|^[原一]{1}审法院审理认定|^[原一]{1}审判决审查认定|[一原]{1}审期间[一-龥\(\),,]+原判予以认定:|^[一-龥\(\)]+法院查明|^[原一]{1}审法院查明|^原审法院根据[^。]*(举证|庭审质证|法庭认证)[^。]*认定以下事实:|^[一-龥]+审理查明|^查明以下案件事实|^经庭审质证|^还查明,|本院对案件事实认定如下|^本院确定以下与本案有关的事实|^本院对本案证据认证如下|^查明以下事实|^根据[^。]{,30}(陈述|证据),本院(确认如下事实|认定事实如下|^认定如下事实|确认事实如下)|^查明:|^本院经查|^本院依法认定本案事实如下|^本院确认本案事实如下|本院根据上述[^。]{,30}确认以下事实|经审理|^本院经查|^根据上述[^。]{,30}本院确认以下事实|^经审理查明|^经庭审[^。,]{,5}比对|^一审查明|^经审查((?!(,本院认为|,本院认为)).)+|^本院查明|^另查明|^本院审理查明|^本案相关事实|^[^。]{,30}本院(确认如下事实|认定事实如下|认定如下事实|确认事实如下))"
rex6="^(.{,30})(原判认定:|判决认定|主要争议焦点在于|.*原审法院审查认为,|一审争议焦点为|.*争议(的|)焦点(在于|是|为|如下|归纳为:|问题是)|一审法院认为,|.*焦点(是|如下|在于|)[:,,]{1}|双方当事人争议的焦点是|争议的焦点(问题|)为:|原审法院经审理认为:|^[一-龥\(\)]+认为[:,,]{1}|原审法院经审理认为:|^[一-龥\(\)]+法院[一-龥\(\)]*判决认定|^[一-龥]+法院\([0-9]+\)[一-龥]+第[0-9]+号[一-龥]+判决认定:|[一-龥\(\)]+法院判决认定:|^[^。]*[原一]{1}审判决认定:|.*争议焦点(为|是|):|本院认为|^原审法院经审理认为:|.*一审法院认为:|^原初审认为|^[一-龥\(\)]+法院[一-龥\(\)]*判决认为|[^。]*法院经审理认为:|^[一原]{1}审法院审理认为|^[一原]{1}审判决认为|^[一-龥\(\),,]*一审法院经审理认为|^[一-龥\(\)]+法院判决认为|法院一审认为|本院认证认为|本院认为|[一原]{1}审认为|^[原一]{1}审法院认为|^原判认[定为]{1}|^[一-龥]+法院认[定为]{1})"
####给一个优先级的设定
start=0
first=1
second=2
three=3
four=4
four_1=5
four_2=6
x=i[1]
temp=x.split("\n")
num=0
state=start
st=""
l=[]
for j in temp:
num+=1
###标题
if state==start:
x1=re.search(rex1,j)
if x1:
st+=j+"\n"
##暂时的
l.append(st)
st=""
state=first
else:
st+=j+"\n"
###当事人
elif state==first:
x1=re.search(rex2,j)
if x1:
l.append(st)
st=j+"\n"
state=second
###审理经过
else:
st+=j+"\n"
###审理经过
elif state==second:
x0=re.search(rex3,j)###原告俗称
x01=re.search(rex4,j)####
x1=re.search(rex5,j)###
x2=re.search(rex6,j)###本院认为
####原判认定 本院认为?还是得
#原审法院经审理查明
Rex5=re.search("(^经审理查明|(一|原)审(经|)(审理|)查明|原判认定|(原|一)审(法院|)(经|)(审理|)查明|一审法院[^。,,]*事实如下:|原判查明)",j)
Rex=re.search("(原判认定:|原审判决认定:|(原|一)审法院(经|)(审理|)认定:|^经审查.{,1}本院认为|^[一-龥\(\),,]+审理认为|认为:|[一-龥\(\),,]*原审法院认为|审理认为:)",j)
Rex_n6=re.search("^第[0-9]+号判决认为:",j)
if x0 and not Rex5 and not Rex:
###审理经过
st=Deal2_TrialProcess(st)
l.append(st)
st=j+"\n"
state=three
elif x01 and not Rex5:
l.append(st)
st=''
l.append(st)
st=j+"\n"
state=four
elif (x1 or Rex5)and not Rex :
l.append(st)
st=''
l.extend(['',''])
st=j+"\n"
state=four_1
elif (x2 or Rex) and not Rex_n6:
l.append(st)
st=''
l.extend(['','',''])
state=four_2
else:
st+=j+"\n"
####
elif state==three:
x0=re.search(rex4,j)
x1=re.search(rex5,j)
x2=re.search(rex6,j)###本院认为
Rex5=re.search("(经审理查明|原判认定|[原一]{1}审(法院|)(经|)(审理|)查明|一审法院[^。,,]*事实如下:|原判查明)",j)
Rex=re.search("(原判认定:|原审判决认定:|(原|一)审法院(经|)(审理|)认定:|^经审查.{,1}本院认为|^[一-龥\(\),,]+审理认为|认为:|[一-龥\(\),,]*原审法院认为|审理认为:)",j)
if x0 and not Rex5 and not Rex:
###原告诉称
l.append(st)
st=j+"\n"
state=four
elif (x1 or Rex5)and not Rex:
l.append(st)
st=''
l.extend(st)
st=j+"\n"
state=four_1
elif x2 or Rex:
l.append(st)
st=''
l.extend([st,st])
st=j+"\n"
state=four_2
else:
st+=j+"\n"
###被告辩称
elif state==four:
x0=re.search(rex5,j,re.S)##审理查明
x1=re.search(rex6,j)###本院认为
Rex=re.search("((原|一)审法院(经|)(审理|)认定:|^经审查.{,1}本院认为|^[一-龥\(\),,]+审理认为|认为:|[一-龥\(\),,]*原审法院认为|审理认为:)",j)
if x0 and not Rex:
l.append(st)
x1=re.search("(^.*。)(原判认定.*)",j)
if x1:
l.append(x1.group(1))
st=x1.group(2)+"\n"
state=four_2
else:
st=j+"\n"
state=four_1
elif x1 or Rex:##
l.append(st)
st=''
l.append(st)
state=four_2
st=j+"\n"
else:
st+=j+'\n'
#审理查明数据
elif state==four_1:
x0=re.search(rex6,j)###本院认为
Rex=re.search("(原判认定:|(原|一)审法院(经|)(审理|)认定:|^经审查.{,1}本院认为|^[一-龥\(\),,]+审理认为|认为:|[一-龥\(\),,]*原审法院认为|审理认为:)",j)
if x0 or Rex:
l.append(st)
st=j+'\n'
state=four_2
else:
st+=j+"\n"
elif state==four_2:
st+=j+"\n"
l.append(st)
txt=Deal_to_page2(i)
#################################################
if len(l)==7:
if len(txt)!=9:
return ["二审出错啦"]
###标题,当事人信息,审理经过,原告诉称,被告辩称,审理查明,本院认为
m=deal2_repeat2(i[2])
if len(m)==1:
l.append(m[0])
else:
txt[0]=m[1]+"\n"+txt[0]
l.append(m[0])
#####一审判决结果,上诉人诉称,被上诉人辩称,二审查明,二审认为,二审判决结果
#Txt=zip()
l_id=[]
l_id.append(i[0])
l_id=l_id*9
l.extend(txt)
else:
if len(txt)==9:
m=deal2_repeat1(i[2])
if len(m)==2:
k=len(l)
while k<=6:
l.append('')
k+=1
l.append(m[0])
txt[0]=m[1]+"\n"+txt[0]
else:
k=len(l)
while k<=7:
l.append('')
k+=1
l.extend(txt)
else:
pass
key=["标题","当事人信息","审理经过","原告诉称","被告辩称","审理查明","本院认为","判决依据","一审判决结果","上诉人诉称",'被上诉人辩称','二审审理查明','二审法院认为',"判决依据","判决主文","判决尾部","落款"]
l_now=[]
l_now.append(now)
l_now=l_now*17
l_id=[]
l_id.append(str(i[0]))
l_id=l_id*17
if len(l)==17:
l=list(zip(l_id,l,key,l_now))
return l
def Stander(x):
x=re.sub(">","",x)
x=re.sub("×","×",x)
x=x.replace("&temp;","")
x=x.replace(""","")
x=x.replace("{C}","")
x=x.replace("&","")
x=re.sub(" ","",x)
x=re.sub("&ldqu0;","",x)
x=re.sub("&lsqu0;","",x)
x=re.sub("&rsqu0;","",x)
x=x.replace("lt;","")
x=x.replace("\xe3","")
x=x.replace("\x80","")
x=x.replace("\xc2","")
x=x.replace("\xa0","")
x=x.replace("\x7f","")
x=x.replace("\u3000","")
x=x.replace("当事人原审的意见\n","")
x=x.replace("\t", "")
x=x.replace("&rdqu0;","")
x=re.sub("[ ]+","",x)
x=re.sub("<[^<>]+>","",x)
x=re.sub("\(此页无正文\)","",x)
x=re.sub("判([\n]*|[?]+|)决([\n]*|[?]+|)如([\n]*|[?]+|)下","判决如下",x)
x=re.sub("判([\n]*)决([\n]*|):","判决:",x)
x=re.sub("(|[\n]*)年([\n]*|)","年",x)
x=re.sub("(\n|[\n]*)月(|[\n]*)","月",x)
x=re.sub("[?]{3,}","\n",x)
x=re.sub("[?]+","",x)
x=re.sub("[‘’']","",x)
x=re.sub("[zzZ]{1}[lLl]{1}","ZL",x)
x=re.sub("[\r\n]+","\n",x)
x=re.sub("...: ","",x)
x=x.replace("\x0b","\n")
x=re.sub("[\r\n]+","\n",x)
x=re.sub("[:::::::]{1}",":",x)
x=re.sub("^[\n]+","",x)
x=re.sub("(本页无正文)","",x)
x=re.sub("\(本页无正文\)","",x)
x=re.sub("本判决为终审判决。","",x)
x=re.sub("(\n)日","日",x)
x=re.sub("审([\n]*|[?]+|)判([\n]*|[?]+|)长([\n]+|[?]+)","审判长 ",x)
x=re.sub("代([\n]*|[?]+|)理([\n]*|[?]+|)审判长","代理审判长 ",x)
x=re.sub("审([\n]*|[?]+|)判([\n]*|)员([\n]*|[?]+)","审判员 ",x)
x=re.sub("代([\n]*|[?]+|)理([\n]*|[?]+|)审判员","代理审判员 ",x)
x=re.sub("陪([\n]*|[?]+|)审([\n]*|[?]+|)员([\n]+|[?]+)","陪审员 ",x)
x=re.sub("人([\n]*|[?]+|)民([\n]*|[?]+|)陪审员([\n]+|[?]+)","人民陪审员 ",x)
x=re.sub("书([\n]*|[?]+|)记([\n]*|[?]+|)员([\n]+|[?]+)","书记员 ",x)
x=re.sub("速([\n]*|[?]+)记([?]*|[\n]*)员","速记员",x)
x=re.sub("速记员\n","速记员 ",x)
x=re.sub("速([\n]*)录[\n]*员","速录员",x)
x=re.sub("速录员\n","速录员",x)
x=re.sub("法([\n]*|[?]+|)官([\n]*|[?]+|)助([\n]+|[?]+)理","法官助理 ",x)
#清除开始赘余信息
x1=re.search("(^签发.*?[\n])([^\n]{,30}法院)",x,re.S)
if x1:
x1=x1.group(1)
x=x.replace(x1,"")
x1=re.search("(^.*(已审理终结。|已审理完结。|已审理完毕。))([^\n]{1}.*)",x,re.S)
if x1:
x2=x1.group(3)
x1=x1.group(1)
x=x1+"\n"+x2
x1=re.search("(^.*(已审理终结。|已审理完结。|已审理完毕。))([^\n]{1}.*)",x,re.S)
if x1:
x2=x1.group(3)
x1=x1.group(1)
x=x1+"\n"+x2
###对尾部文书进行基本的规范化
x=re.sub("pt;''>","",x)
x=re.sub("当事人二审的意见\n","",x)
x=re.sub("\(原审判决附图一\)\(原审判决附图二\)","",x)
x1=re.search("^((?!(法院|\n)).)*\n",x,re.S)
###只能整体进行不能单独的进行其他的计算
if x1:
tx1=x1.group()
x=x.replace(tx1,"")
#x=re.sub(tx1,"",x)
#附
x1=re.search("(附:本判决书所依据法律规定的具体条文:|附本判决书引用的主要法律条文:|附.{,1}本判决适用法律条文:|附.{,1}本判决适用法律条款:|附:本案适用的法律条款|附:本案适用的法律条款|附:本案适用的法律条款).+",x,re.S)
if x1:
xx=x1.group()
x=x.replace(xx,"")
return x
Flag,lcontent=Deal2_1(Content)
if Flag:
l_content=Deal2_to_page1(lcontent)
if len(l_content)==17:
return l_content,True
else:
return l_content,False
else:
return ['wu'],False | zcb-gz | /zcb_gz-1.1.tar.gz/zcb_gz-1.1/zcb_gz/ronghui/Money/Second_sub.py | Second_sub.py |
# zcbe
[](https://travis-ci.com/myzhang1029/zcbe)

[](https://codecov.io/gh/myzhang1029/zcbe)
[](https://codeclimate.com/github/myzhang1029/zcbe/maintainability)
## Introduction
The Z cross build environment is a tool for managing cross-compile environments.
It comes with concurrent building, dependency tracking and other useful features.
## Usage
### Tutorial
TODO
### CLI Usage
```
zcbe [-h] [-w] [-W WARNING] [-B] [-C CHDIR] [-o FILE] [-e FILE]
[-f FILE] [-a] [-s] [-n] [-u] [-H ABOUT]
[PROJ [PROJ ...]]
The Z Cross Build Environment
positional arguments:
PROJ List of projects to build
optional arguments:
-h, --help show this help message and exit
-w Suppress all warnings
-W WARNING Modify warning behavior
-B, --rebuild, --always-make, --always-build
Force build requested projects and dependencies
-C CHDIR, --chdir CHDIR, --directory CHDIR
Change directory to
-o FILE, --stdout-to FILE
Redirect stdout to FILE ('{n}' expands to the name of
the project)
-e FILE, --stderr-to FILE
Redirect stderr to FILE ('{n}' expands to the name of
the project)
-f FILE, --file FILE, --build-toml FILE
Read FILE as build.toml
-a, --all Build all projects in mapping.toml
-s, --silent Silence make standard output(short for -o /dev/null)
-n, --dry-run, --just-print, --recon
Don't actually run any commands
-u, --show-unbuilt List unbuilt projects and exit
-H ABOUT, --about ABOUT
Help on a topic("topics" for a list of topics)
```
| zcbe | /zcbe-0.4.4.tar.gz/zcbe-0.4.4/README.md | README.md |
zcbor
=====
zcbor is a low footprint [CBOR](https://en.wikipedia.org/wiki/CBOR) library in the C language that comes with a schema-driven script tool that can validate your data, or even generate code for you.
Aside from the script, the CBOR library is a standalone library which is tailored for use in microcontrollers.
The validation/conversion part of the script works with YAML and JSON data, in addition to CBOR.
It can for example validate a YAML file against a schema and convert it into CBOR.
The schema language used by zcbor is CDDL (Consise Data Definition Language) which is a powerful human-readable data description language defined in [IETF RFC 8610](https://datatracker.ietf.org/doc/rfc8610/).
zcbor was previously called "cddl-gen".
Features
========
Here are some possible ways zcbor can be used:
- Python script and module:
- Validate a YAML/JSON file and translate it into CBOR e.g. for transmission.
- Validate a YAML/JSON/CBOR file before processing it with some other tool
- Decode and validate incoming CBOR data into human-readable YAML/JSON.
- As part of a python script that processes YAML/JSON/CBOR files. zcbor is compatible with PyYAML and can additionally provide validation and/or easier inspection via named tuples.
- C code:
- Generate C code for validating and decoding or encoding CBOR, for use in optimized or constrained environments, such as microcontrollers.
- Provide a low-footprint CBOR decoding/encoding library similar to TinyCBOR/QCBOR/NanoCBOR.
CBOR decoding/encoding library
==============================
The CBOR library found at [headers](include) and [source](src) is used by the generated code, but can also be used directly.
To use it, instantiate a `zcbor_state_t` object, which is most easily done using the `zcbor_new_*_state()` functions or the `ZCBOR_STATE_*()` macros.
The `elem_count` member refers to the number of encoded objects in the current list or map.
`elem_count` starts again when entering a nested list or map, and is restored when exiting.
`elem_count` is one reason for needing "backup" states (the other is to allow rollback of the payload).
You need a number of backups corresponding to the maximum number of nested levels in your data.
Backups are needed for encoding if you are using canonical encoding (`ZCBOR_CANONICAL`), or using the `bstrx_cbor_*` functions.
Backups are needed for decoding if there are any lists, maps, or CBOR-encoded strings in the data.
Note that the benefits of using the library directly is greater for encoding than for decoding.
For decoding, the code generation will provide a number of checks that are tedious to write manually, and easy to forget.
```c
/** The number of states must be at least equal to one more than the maximum
* nested depth of the data.
*/
zcbor_state_t states[n];
/** Initialize the states. After calling this, states[0] is ready to be used
* with the encoding/decoding APIs.
* elem_count must be the maximum expected number of top-level elements when
* decoding (1 if the data is wrapped in a list).
* When encoding, elem_count must be 0.
*/
zcbor_new_state(states, n, payload, payload_len, elem_count);
/** Alternatively, use one of the following convenience macros. */
ZCBOR_STATE_D(decode_state, n, payload, payload_len, elem_count);
ZCBOR_STATE_E(encode_state, n, payload, payload_len, 0);
```
The CBOR libraries assume little-endianness by default, but you can define ZCBOR_BIG_ENDIAN to change this.
Configuration
-------------
The C library has a few compile-time configuration options.
These configuration options can be enabled by adding them as compile definitions to the build.
Name | Description
------------------------- | -----------
`ZCBOR_CANONICAL` | When encoding lists and maps, do not use indefinite length encoding. Enabling `ZCBOR_CANONICAL` increases code size and makes the encoding library more often use state backups.
`ZCBOR_VERBOSE` | Print messages on encoding/decoding errors (`zcbor_print()`), and also a trace message (`zcbor_trace()`) for each decoded value, and in each generated function (when using code generation). Requires `printk` as found in Zephyr.
`ZCBOR_ASSERTS` | Enable asserts (`zcbor_assert()`). When they fail, the assert statements instruct the current function to return a `ZCBOR_ERR_ASSERTION` error. If `ZCBOR_VERBOSE` is enabled, a message is printed.
`ZCBOR_STOP_ON_ERROR` | Enable the `stop_on_error` functionality. This makes all functions abort their execution if called when an error has already happened.
`ZCBOR_BIG_ENDIAN` | All decoded values are returned as big-endian.
Python script and module
========================
Invoking zcbor.py from the command line
---------------------------------------
The zcbor.py script can directly read CBOR, YAML, or JSON data and validate it against a CDDL description.
It can also freely convert the data between CBOR/YAML/JSON.
It can also output the data to a C file formatted as a byte array.
Following are some generalized examples for validating, and for converting (which also validates) data from the command line.
The script infers the data format from the file extension, but the format can also be specified explicitly.
See `zcbor validate --help` and `zcbor convert --help` for more information.
```sh
python3 <zcbor base>/zcbor/zcbor.py validate -c <CDDL description file> -t <which CDDL type to expect> -i <input data file>
python3 <zcbor base>/zcbor/zcbor.py convert -c <CDDL description file> -t <which CDDL type to expect> -i <input data file> -o <output data file>
```
Or invoke its command line executable (if installed via `pip`):
```sh
zcbor validate -c <CDDL description file> -t <which CDDL type to expect> -i <input data file>
zcbor convert -c <CDDL description file> -t <which CDDL type to expect> -i <input data file> -o <output data file>
```
Note that since CBOR supports more data types than YAML and JSON, zcbor uses an idiomatic format when converting to/from YAML/JSON.
This is relevant when handling YAML/JSON conversions of data that uses the unsupported features.
The following data types are supported by CBOR, but not by YAML (and JSON which is a subset of YAML):
1. bytestrings: YAML supports only text strings. In YAML, bytestrings ('<bytestring>') are represented as {"bstr": "<hex-formatted bytestring>"}, or as {"bstr": <any type>} if the CBOR bytestring contains CBOR-formatted data, in which the data is decoded into <any type>.
2. map keys other than text string: In YAML, such key value pairs are represented as {"keyval<unique int>": {"key": <key, not text>, "val": <value>}}
3. tags: In cbor2, tags are represented by a special type, cbor2.CBORTag. In YAML, these are represented as {"tag": <tag number>, "val": <tagged data>}.
Importing zcbor in a Python script
----------------------------------
Importing zcbor gives access to the DataTranslator class which is used to implement the command line conversion features.
DataTranslator can be used to programmatically perform the translations, or to manipulate the data.
When accessing the data, you can choose between two internal formats:
1. The format provided by the cbor2, yaml (pyyaml), and json packages.
This is a format where the serialization types (map, list, string, number etc.) are mapped directly to the corresponding Python types.
This format is common between these packages, which makes translation very simple.
When returning this format, DataTranslator hides the idiomatic representations for bytestrings, tags, and non-text keys described above.
2. A custom format which allows accessing the data via the names from the CDDL description file.
This format is implemented using named tuples, and is immutable, meaning that it can be used for inspecting data, but not for changing or creating data.
Code generation
===============
The generated code consists of:
- A header file containing typedefs for the types defined in the CDDL, as well as declarations for decoding functions for some types (those specified as entry types). The typedefs are the same for both encoding and decoding.
- A C file containing all the encoding/decoding code.
The code is split across multiple functions, and each function contains a single `if` statement which "and"s and "or"s together calls into the cbor libraries or to other generated decoding functions.
CDDL allows placing restrictions on the members of your data structure.
Restrictions can be on type, on content (e.g. values/sizes of ints or strings), and repetition (e.g. the number of members in a list).
The generated code will validate the input (i.e. the structure if encoding, or the payload for decoding), which means that it will check all the restriction set in the CDDL description, and fail if a restriction is broken.
The cbor libraries do most of the actual translation and moving of bytes, and the validation of values.
There are tests for the code generation in [tests/](tests/).
The tests require [Zephyr](https://github.com/zephyrproject-rtos/zephyr) (if your shell is set up to build Zephyr samples, the tests should also build).
Build system
------------
When calling zcbor with the argument `--output-cmake <file path>`, a cmake file will be created at that location.
The cmake file creates a cmake target and adds the generated and non-generated source files, and the include directories to the header files.
This cmake file can then be included in your project's `CMakeLists.txt` file, and the target can be linked into your project.
This is demonstrated in the tests, e.g. at tests/decode/test3_simple/CMakeLists.txt.
zcbor can be instructed to copy the non-generated sources to the same location as the generated sources with `--copy-sources`.
Introduction to CDDL
====================
In CDDL you define types from other types.
Types can be defined from base types, or from other types you define.
Types are declared with '`=`', e.g. `Foo = int` which declares the type `Foo` to be an integer, analogous to `typedef int Foo;` in C.
CDDL defines the following base types (this is not an exhaustive list):
- `int`: Positive or negative integer
- `uint`: Positive integer
- `bstr`: Byte string
- `tstr`: Text string
- `bool`: Boolean
- `nil`: Nil/Null value
- `float`: Floating point value
- `any`: Any single element
CDDL allows creating aggregate types:
- `[]`: List. Elements don't need to have the same type.
- `{}`: Map. Key/value pairs as are declared as `<key> => <value>` or `<key>: <value>`. Note that `:` is also used for labels.
- `()`: Groups. Grouping with no enclosing type, which means that e.g. `Foo = [(int, bstr)]` is equivalent to `Foo = [int, bstr]`.
- `/`: Unions. Analogous to unions in C. E.g. `Foo = int/bstr/Bar` where Foo is either an int, a bstr, or Bar (some custom type).
Literals can be used instead of the base type names:
- Number: `Foo = 3`, where Foo is a uint with the additional requirement that it must have the value 3.
- Number range: `Foo = -100..100`, where Foo is an int with value between -100 and 100.
- Text string: `Foo = "hello"`, where Foo is a tstr with the requirement that it must be "hello".
- True/False: `Foo = false`, where Foo is a bool which is always false.
Base types can also be restricted in other ways:
- `.size`: Works for integers and strings. E.g. `Foo = uint .size 4` where Foo is a uint exactly 4 bytes long.
- `.cbor`/`.cborseq`: E.g. `Foo = bstr .cbor Bar` where Foo is a bstr whose contents must be CBOR data decodeable as the Bar type.
An element can be repeated:
- `?`: 0 or 1 time. E.g. `Foo = [int, ?bstr]`, where Foo is a list with an int possibly followed by a bstr.
- `*`: 0 or more times. E.g. `Foo = [*tstr]`, where Foo is a list containing 0 or more tstrs.
- `+`: 1 or more times. E.g. `Foo = [+Bar]`.
- `x*y`: Between x and y times, inclusive. E.g. `Foo = {4*8(int => bstr)}` where Foo is a map with 4 to 8 key/value pairs where each key is an int and each value is a bstr.
Note that in the zcbor script and its generated code, the number of entries supported via `*` and `+` is affected by the default_max_qty value.
Any element can be labeled with `:`.
The label is only for readability and does not impact the data structure in any way.
E.g. `Foo = [name: tstr, age: uint]` is equivalent to `Foo = [tstr, uint]`.
See [test3_simple](tests/decode/test3_simple/) for CDDL example code.
Introduction to CBOR
====================
CBOR's format is described well on [Wikipedia](https://en.wikipedia.org/wiki/CBOR), but here's a synopsis:
Encoded CBOR data elements look like this.
```
| Header | Value | Payload |
| 1 byte | 0, 1, 2, 4, or 8 bytes | 0 - 2^64-1 bytes/elements |
| 3 bits | 5 bits |
| Major Type | Additional Info |
```
The available major types can be seen in `zcbor_major_type_t`.
For all major types, Values 0-23 are encoded directly in the _Additional info_, meaning that the _Value_ field is 0 bytes long.
If _Additional info_ is 24, 25, 26, or 27, the _Value_ field is 1, 2, 4, or 8 bytes long, respectively.
Major types `pint`, `nint`, `tag`, and `prim` elements have no payload, only _Value_.
* `pint`: Interpret the _Value_ as a positive integer.
* `nint`: Interpret the _Value_ as a positive integer, then multiply by -1 and subtract 1.
* `tag`: The _Value_ says something about the next non-tag element.
See the [CBOR tag documentation](See https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml) for details.
* `prim`: Different _Additional info_ mean different things:
* 20: `false`
* 21: `true`
* 22: `null`
* 23: `undefined`
* 25: Interpret the _Value_ as an IEEE 754 float16.
* 26: Interpret the _Value_ as an IEEE 754 float32.
* 27: Interpret the _Value_ as an IEEE 754 float64.
* 31: End of an indefinite-length `list` or `map`.
For `bstr`, `tstr`, `list`, and `map`, the _Value_ describes the length of the _Payload_.
For `bstr` and `tstr`, the length is in bytes, for `list`, the length is in number of elements, and for `map`, the length is in number of key/value element pairs.
For `list` and `map`, sub elements are regular CBOR elements with their own _Header_, _Value_ and _Payload_. `list`s and `map`s can be recursively encoded.
If a `list` or `map` has _Additional info_ 31, it is "indefinite-length", which means it has an "unknown" number of elements.
Instead, its end is marked by a `prim` with _Additional info_ 31 (byte value 0xFF).
Usage Example
=============
Code generation
---------------
This example is is taken from [test3_simple](tests/decode/test3_simple/).
If your CDDL file contains the following code:
```cddl
Timestamp = bstr .size 8
; Comments are denoted with a semicolon
Pet = [
name: [ +tstr ],
birthday: Timestamp,
species: (cat: 1) / (dog: 2) / (other: 3),
]
```
Call the Python script:
```sh
python3 <zcbor base>/zcbor/zcbor.py code -c pet.cddl -d -t Pet --oc pet_decode.c --oh pet_decode.h
# or
zcbor code -c pet.cddl -d -t Pet --oc pet_decode.c --oh pet_decode.h
```
And use the generated code with
```c
#include <pet_decode.h> /* The name of the header file is taken from the name of
the cddl file, but can also be specifiec when calling
the script. */
/* ... */
/* The following type and function refer to the Pet type in the CDDL, which
* has been specified as an --entry-types (-t) when invoking zcbor. */
Pet_t pet;
size_t decode_len;
bool success = cbor_decode_Pet(input, sizeof(input), &pet, &decode_len);
```
The process is the same for encoding, except:
- Change `-d` to `-e` when invoking zcbor
- Input parameters become output parameters and vice versa in the code:
```c
#include <pet_encode.h> /* The name of the header file is taken from the name of
the cddl file, but can also be specifiec when calling
the script. */
/* ... */
/* The following type and function refer to the Pet type in the CDDL, which
* has been specified as an --entry-types (-t) when invoking zcbor. */
Pet_t pet = { /* Initialize with desired data. */ };
uint8_t output[100]; /* 100 is an example. Must be large enough for data to fit. */
size_t out_len;
bool success = cbor_encode_Pet(output, sizeof(output), &pet, &out_len);
```
CBOR decoding/encoding library
------------------------------
For encoding:
```c
#include <zcbor_encode.h>
uint8_t payload[100];
zcbor_state_t state;
zcbor_new_state(&state, 1, payload, sizeof(payload), 0);
res = res && zcbor_list_start_encode(&state, 0);
res = res && zcbor_tstr_put(&state, "first");
res = res && zcbor_tstr_put(&state, "second");
res = res && zcbor_list_end_encode(&state, 0);
uint8_t timestamp[8] = {1, 2, 3, 4, 5, 6, 7, 8};
struct zcbor_string timestamp_str = {
.value = timestamp,
.len = sizeof(timestamp),
};
res = res && zcbor_bstr_encode(&state, ×tamp_str);
res = res && zcbor_uint32_put(&state, 2 /* dog */);
res = res && zcbor_list_end_encode(&state, 0);
```
Converting
----------
Here is an example call for converting from YAML to CBOR:
```sh
python3 <zcbor base>/zcbor/zcbor.py convert -c pet.cddl -t Pet -i mypet.yaml -o mypet.cbor
# or
zcbor convert -c pet.cddl -t Pet -i mypet.yaml -o mypet.cbor
```
Which takes a yaml structure from mypet.yaml, validates it against the Pet type in the CDDL description in pet.cddl, and writes binary CBOR data to mypet.cbor.
See the tests in <zcbor base>/tests/ for examples of using the python module
Running tests
=============
The tests for the generated code are based on Zephyr ztests.
Tests for the conversion functions in the script are implemented with the unittest module.
There are also test.sh scripts to quickly run all tests.
[`tests/test.sh`](tests/test.sh) runs all tests, including python tests in [`tests/scripts`](tests/scripts).
These tests are dependent upon the `pycodestyle` package from `pip`.
Run these scripts with no arguments.
To set up the environment to run the ztest tests, follow [Zephyr's Getting Started Guide](https://docs.zephyrproject.org/latest/getting_started/index.html), or see the workflow in the [`.github`](.github) directory.
Command line documentation
==========================
Added via `add_helptext.py`
zcbor --help
------------
```
usage: zcbor [-h] {code,validate,convert} ...
Parse a CDDL file and validate/convert between YAML, JSON, and CBOR. Can also
generate C code for validation/encoding/decoding of CBOR.
positional arguments:
{code,validate,convert}
options:
-h, --help show this help message and exit
```
zcbor code --help
-----------------
```
usage: zcbor code [-h] [--version] -c CDDL [--no-prelude] [-v]
[--default-max-qty DEFAULT_MAX_QTY] [--output-c OUTPUT_C]
[--output-h OUTPUT_H] [--output-h-types OUTPUT_H_TYPES]
[--copy-sources] [--output-cmake OUTPUT_CMAKE] -t
ENTRY_TYPES [ENTRY_TYPES ...] [-d] [-e] [--time-header]
[--git-sha-header] [-b {32,64}]
[--include-prefix INCLUDE_PREFIX] [-s]
Parse a CDDL file and produce C code that validates and xcodes CBOR.
The output from this script is a C file and a header file. The header file
contains typedefs for all the types specified in the cddl input file, as well
as declarations to xcode functions for the types designated as entry types when
running the script. The c file contains all the code for decoding and validating
the types in the CDDL input file. All types are validated as they are xcoded.
Where a `bstr .cbor <Type>` is specified in the CDDL, AND the Type is an entry
type, the xcoder will not xcode the string, only provide a pointer into the
payload buffer. This is useful to reduce the size of typedefs, or to break up
decoding. Using this mechanism is necessary when the CDDL contains self-
referencing types, since the C type cannot be self referencing.
This script requires 'regex' for lookaround functionality not present in 're'.
options:
-h, --help show this help message and exit
--version show program's version number and exit
-c CDDL, --cddl CDDL Path to one or more input CDDL file(s). Passing
multiple files is equivalent to concatenating them.
--no-prelude Exclude the standard CDDL prelude from the build. The
prelude can be viewed at zcbor/cddl/prelude.cddl in
the repo, or together with the script.
-v, --verbose Print more information while parsing CDDL and
generating code.
--default-max-qty DEFAULT_MAX_QTY, --dq DEFAULT_MAX_QTY
Default maximum number of repetitions when no maximum
is specified. This is needed to construct complete C
types. The default_max_qty can usually be set to a
text symbol if desired, to allow it to be configurable
when building the code. This is not always possible,
as sometimes the value is needed for internal
computations. If so, the script will raise an
exception.
--output-c OUTPUT_C, --oc OUTPUT_C
Path to output C file. If both --decode and --encode
are specified, _decode and _encode will be appended to
the filename when creating the two files. If not
specified, the path and name will be based on the
--output-cmake file. A 'src' directory will be created
next to the cmake file, and the C file will be placed
there with the same name (except the file extension)
as the cmake file.
--output-h OUTPUT_H, --oh OUTPUT_H
Path to output header file. If both --decode and
--encode are specified, _decode and _encode will be
appended to the filename when creating the two files.
If not specified, the path and name will be based on
the --output-cmake file. An 'include' directory will
be created next to the cmake file, and the C file will
be placed there with the same name (except the file
extension) as the cmake file.
--output-h-types OUTPUT_H_TYPES, --oht OUTPUT_H_TYPES
Path to output header file with typedefs (shared
between decode and encode). If not specified, the path
and name will be taken from the output header file
(--output-h), with '_types' added to the file name.
--copy-sources Copy the non-generated source files (zcbor_*.c/h) into
the same directories as the generated files.
--output-cmake OUTPUT_CMAKE
Path to output CMake file. The filename of the CMake
file without '.cmake' is used as the name of the CMake
target in the file. The CMake file defines a CMake
target with the zcbor source files and the generated
file as sources, and the zcbor header files' and
generated header files' folders as
include_directories. Add it to your project via
include() in your CMakeLists.txt file, and link the
target to your program. This option works with or
without the --copy-sources option.
-t ENTRY_TYPES [ENTRY_TYPES ...], --entry-types ENTRY_TYPES [ENTRY_TYPES ...]
Names of the types which should have their xcode
functions exposed.
-d, --decode Generate decoding code. Either --decode or --encode or
both must be specified.
-e, --encode Generate encoding code. Either --decode or --encode or
both must be specified.
--time-header Put the current time in a comment in the generated
files.
--git-sha-header Put the current git sha of zcbor in a comment in the
generated files.
-b {32,64}, --default-bit-size {32,64}
Default bit size of integers in code. When integers
have no explicit bounds, assume they have this bit
width. Should follow the bit width of the architecture
the code will be running on.
--include-prefix INCLUDE_PREFIX
When #include'ing generated files, add this path
prefix to the filename.
-s, --short-names Attempt to make most generated struct member names
shorter. This might make some names identical which
will cause a compile error. If so, tweak the CDDL
labels or layout, or disable this option. This might
also make enum names different from the corresponding
union members.
```
zcbor validate --help
---------------------
```
usage: zcbor validate [-h] [--version] -c CDDL [--no-prelude] [-v]
[--default-max-qty DEFAULT_MAX_QTY] -i INPUT
[--input-as {yaml,json,cbor,cborhex}] -t ENTRY_TYPE
Read CBOR, YAML, or JSON data from file or stdin and validate it against a
CDDL schema file.
options:
-h, --help show this help message and exit
--version show program's version number and exit
-c CDDL, --cddl CDDL Path to one or more input CDDL file(s). Passing
multiple files is equivalent to concatenating them.
--no-prelude Exclude the standard CDDL prelude from the build. The
prelude can be viewed at zcbor/cddl/prelude.cddl in
the repo, or together with the script.
-v, --verbose Print more information while parsing CDDL and
generating code.
--default-max-qty DEFAULT_MAX_QTY, --dq DEFAULT_MAX_QTY
Default maximum number of repetitions when no maximum
is specified. It is only relevant when handling data
that will be decoded by generated code. If omitted, a
large number will be used.
-i INPUT, --input INPUT
Input data file. The option --input-as specifies how
to interpret the contents. Use "-" to indicate stdin.
--input-as {yaml,json,cbor,cborhex}
Which format to interpret the input file as. If
omitted, the format is inferred from the file name.
.yaml, .yml => YAML, .json => JSON, .cborhex => CBOR
as hex string, everything else => CBOR
-t ENTRY_TYPE, --entry-type ENTRY_TYPE
Name of the type (from the CDDL) to interpret the data
as.
```
zcbor convert --help
--------------------
```
usage: zcbor convert [-h] [--version] -c CDDL [--no-prelude] [-v]
[--default-max-qty DEFAULT_MAX_QTY] -i INPUT
[--input-as {yaml,json,cbor,cborhex}] -t ENTRY_TYPE -o
OUTPUT [--output-as {yaml,json,cbor,cborhex,c_code}]
[--c-code-var-name C_CODE_VAR_NAME]
Parse a CDDL file and validate/convert between CBOR and YAML/JSON. The script
decodes the CBOR/YAML/JSON data from a file or stdin and verifies that it
conforms to the CDDL description. The script fails if the data does not
conform. 'zcbor validate' can be used if only validate is needed. JSON and
YAML do not support all data types that CBOR/CDDL supports. bytestrings
(BSTR), tags, and maps with non-text keys need special handling: All strings
in JSON/YAML are text strings. If a BSTR is needed, use a dict with a single
entry, with "bstr" as the key, and the byte string (as a hex string) as the
value, e.g. {"bstr": "0123456789abcdef"}. The value can also be another type,
e.g. which will be interpreted as a BSTR with the given value as contents (in
cddl: 'bstr .cbor SomeType'). E.g. {"bstr": ["first element", 2, [3]]} Dicts
in JSON/YAML only support text strings for keys, so if a dict needs other
types of keys, encapsulate the key and value into a dict (n is an arbitrary
integer): e.g. {"name": "foo", "keyvaln": {"key": 123, "val": "bar"}} which
will conform to the CDDL {tstr => tstr, int => tstr}. Tags are specified by a
dict with two elements, e.g. {"tag": 1234, "value": ["tagged string within
list"]} 'undefined' is specified as a list with a single text entry:
"zcbor_undefined".
options:
-h, --help show this help message and exit
--version show program's version number and exit
-c CDDL, --cddl CDDL Path to one or more input CDDL file(s). Passing
multiple files is equivalent to concatenating them.
--no-prelude Exclude the standard CDDL prelude from the build. The
prelude can be viewed at zcbor/cddl/prelude.cddl in
the repo, or together with the script.
-v, --verbose Print more information while parsing CDDL and
generating code.
--default-max-qty DEFAULT_MAX_QTY, --dq DEFAULT_MAX_QTY
Default maximum number of repetitions when no maximum
is specified. It is only relevant when handling data
that will be decoded by generated code. If omitted, a
large number will be used.
-i INPUT, --input INPUT
Input data file. The option --input-as specifies how
to interpret the contents. Use "-" to indicate stdin.
--input-as {yaml,json,cbor,cborhex}
Which format to interpret the input file as. If
omitted, the format is inferred from the file name.
.yaml, .yml => YAML, .json => JSON, .cborhex => CBOR
as hex string, everything else => CBOR
-t ENTRY_TYPE, --entry-type ENTRY_TYPE
Name of the type (from the CDDL) to interpret the data
as.
-o OUTPUT, --output OUTPUT
Output data file. The option --output-as specifies how
to interpret the contents. Use "-" to indicate stdout.
--output-as {yaml,json,cbor,cborhex,c_code}
Which format to interpret the output file as. If
omitted, the format is inferred from the file name.
.yaml, .yml => YAML, .json => JSON, .c, .h => C code,
.cborhex => CBOR as hex string, everything else =>
CBOR
--c-code-var-name C_CODE_VAR_NAME
Only relevant together with '--output-as c_code' or .c
files.
```
| zcbor | /zcbor-0.6.0.tar.gz/zcbor-0.6.0/README.md | README.md |
from redis import Redis
from typing import Callable, Dict, Union, List
from celery import Celery, Task, group
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from .common.exceptions import BizException
from .common.utils import obj_to_ref, singleton
from .common.keys import get_result_key
from .model.callback import Callback
LOGGER = get_task_logger(__name__)
# @singleton
class CeleryClient(object):
"""
Celery服务客户端
注意:此类不建议手动初始化,可通过CeleryClientHolder初始化,方便各service类自动获取
"""
def __init__(self, celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
self.broker_url = celery_broker_url
self.backend_uri = celery_result_backend
self.monitor_redis_uri = monitor_redis_uri
self.app_code = app_code
self.default_expire_seconds = 12 * 3600
self.celery_client = Celery(
'zcbot-celery',
broker=self.broker_url,
backend=self.backend_uri,
task_acks_late=True
)
self.rds_client = Redis.from_url(url=monitor_redis_uri, decode_responses=True)
def apply_group(self, task_name: str, task_params_list: List[Dict] = None, options: Dict = None, callback: Callback = None, queue_name: str = None, timeout: float = None, **kwargs):
"""
服务组调用
:param task_name: 任务名称
:param task_params_list: 任务参数清单
:param timeout: 【可选参数】超时时间
:param queue_name: 【可选参数】任务队列名称,默认`task.{task_name}`
:param options: 【可选参数】配置项
:param callback: 【可选参数】回调函数
:param kwargs:
:return:
"""
try:
# 同步/异步
_headers = {'app_code': self.app_code}
if callback and callback.app_code:
_headers['app_code'] = callback.app_code or self.app_code
# 调用
task_list = []
for task_params in task_params_list:
task_list.append(self.get_task_by_name(task_name).signature(kwargs=task_params, options=options))
task_group = group(task_list)
_queue_name = queue_name or f'task.{task_name}'
async_result = task_group.apply_async(queue=_queue_name, headers=_headers)
# 结果
if callback:
# 【异步】绑定回调处理函数
LOGGER.info(f'[服务组]异步调用 task={task_name}')
self._bind_callback(task_name, async_result, callback)
return async_result
else:
# 【同步】等待结果
LOGGER.info(f'[服务组]同步调用 task={task_name}')
_timeout = timeout or 60
if not timeout and kwargs and kwargs.get('timeout', None):
_timeout = float(kwargs.get('timeout'))
async_result.successful()
rs = async_result.get(timeout=_timeout)
async_result.forget()
return rs
except Exception as e:
LOGGER.error(f'处理异常: task_name={task_name}, kwargs={len(task_params_list)}, e={e}')
raise e
def apply(self, task_name: str, task_params: Dict = None, callback: Callback = None, queue_name: str = None, timeout: float = None, **kwargs):
"""
单任务请求调用
:param task_name: 任务名称
:param task_params: 任务参数字典
:param callback:【可选参数】回调函数
:param queue_name:【可选参数】任务队列名称,默认`task.{task_name}`
:param timeout:【可选参数】超时时间
:param kwargs:
:return:
"""
try:
# 同步/异步
_headers = {'app_code': self.app_code}
if callback and callback.app_code:
_headers['app_code'] = callback.app_code or self.app_code
# 调用
_queue_name = queue_name or f'task.{task_name}'
task = self.get_task_by_name(task_name)
async_result = task.apply_async(kwargs=task_params, queue=_queue_name, headers=_headers)
# 结果
if callback:
# 【异步】绑定回调处理函数
LOGGER.info(f'[服务]异步调用 task={task_name}, client={task.app.conf}')
self._bind_callback(task_name, async_result, callback)
return async_result
else:
# 【同步】等待结果
LOGGER.info(f'[服务]同步调用 task={task_name}')
_timeout = timeout or 60
if not timeout and kwargs and kwargs.get('timeout', None):
_timeout = float(kwargs.get('timeout'))
rs = async_result.get(timeout=_timeout)
async_result.forget()
return rs
except Exception as e:
LOGGER.error(f'处理异常: task_name={task_name}, kwargs={task_params}, e={e}')
raise e
# 缓存Celery任务对象
def get_task_by_name(self, task_name: str):
task = Task()
task.bind(self.celery_client)
task.name = task_name
return task
# 异步结果处理函数绑定
def _bind_callback(self, task_name: str, async_result: AsyncResult, callback: Callback):
rs_key = get_result_key(app_code=self.app_code, task_name=task_name, task_id=async_result.id)
self.rds_client.set(rs_key, callback.json(), ex=self.default_expire_seconds)
# 构建回调对象
def build_callback(self, callback_func: Union[str, Callable] = None, callback_data: Union[str, Dict, List] = None, app_code: str = None, tenant_code: str = None):
callback = Callback()
callback.app_code = app_code or self.app_code
callback.tenant_code = tenant_code or None
callback.callback_data = callback_data or None
if isinstance(callback_func, str):
callback.callback_func = callback_func
else:
callback.callback_func = obj_to_ref(callback_func)
return callback
class CeleryClientHolder(object):
__default_instance = None
@staticmethod
def init_default_instance(celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
if not CeleryClientHolder.__default_instance:
CeleryClientHolder.__default_instance = CeleryClient(celery_broker_url, celery_result_backend, monitor_redis_uri, app_code)
LOGGER.error(f'CeleryClientHolder初始化默认实例: monitor_redis={monitor_redis_uri}, broker_url={celery_broker_url}, result_backend={celery_result_backend}')
@staticmethod
def get_default_instance():
if not CeleryClientHolder.__default_instance:
raise BizException(f'默认实例尚未初始化,请先初始化实例!')
return CeleryClientHolder.__default_instance | zcbot-celery-sdk | /zcbot-celery-sdk-0.0.48.tar.gz/zcbot-celery-sdk-0.0.48/zcbot_celery_sdk/client.py | client.py |
import json
import traceback
from threading import Thread
from celery import Celery, exceptions
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redis import Redis
from .common import thread_pool
from .common.keys import get_result_key_filter, get_task_id_from_key
from .common.utils import ref_to_obj, singleton
LOGGER = get_task_logger(__name__)
@singleton
class CeleryRedisResultMonitor(object):
"""
每个应用中仅可启动一个实例
"""
def __init__(self, celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
self.broker_url = celery_broker_url
self.backend_uri = celery_result_backend
self.monitor_redis_uri = monitor_redis_uri
self.app_code = app_code
self.error_retry = 0
self.celery_client = Celery(
'zcbot-celery-monitor',
broker=self.broker_url,
backend=self.backend_uri,
task_acks_late=True
)
self.rds_client = Redis.from_url(url=monitor_redis_uri, decode_responses=True)
def start(self):
Thread(target=self._watch, name='celery-monitor').start()
LOGGER.info(f'启动Celery结果监听服务...')
def _watch(self):
try:
# 当前:每个app一个结果监视器
while True:
filter_key = get_result_key_filter(app_code=self.app_code)
keys = self.rds_client.keys(filter_key)
if keys:
for key in keys:
task_id = get_task_id_from_key(key)
if task_id:
async_result = AsyncResult(id=task_id, app=self.celery_client)
if async_result.successful():
# 完成
try:
result = async_result.get()
callback = json.loads(self.rds_client.get(key))
callback_func = callback.get('callback_func', None)
callback_data = callback.get('callback_data', None)
if callback_func:
# TODO 兼容线程与协程
func = ref_to_obj(callback_func)
thread_pool.submit(func, result, callback_data)
LOGGER.info(f'回调执行: callback_func={callback_func}, callback_data={callback_data}')
else:
LOGGER.warning(f'无回调: task={task_id}')
# 清理
self._remove_task(key, async_result)
except exceptions.TimeoutError as te:
LOGGER.error(f'异常: 结果获取超时 task={task_id}, e={traceback.format_exc()}')
except LookupError as le:
LOGGER.error(f'异常: 回调函数反序列化异常 task={task_id}, e={traceback.format_exc()}')
except Exception as e:
LOGGER.error(f'异常: 结果处理异常 task={task_id}, e={traceback.format_exc()}')
elif async_result.failed():
# 失败
self._remove_task(key, async_result)
LOGGER.error(f'失败: task={task_id}')
except Exception:
LOGGER.error(f'监控异常: {traceback.format_exc()}')
self.error_retry = self.error_retry + 1
finally:
LOGGER.info(f'监控异常后重试: {self.error_retry}')
self._watch()
def _remove_task(self, key, async_result):
# 清理任务
async_result.forget()
self.rds_client.delete(key) | zcbot-celery-sdk | /zcbot-celery-sdk-0.0.48.tar.gz/zcbot-celery-sdk-0.0.48/zcbot_celery_sdk/monitor.py | monitor.py |
import asyncio
import functools
import typing
from concurrent.futures import ThreadPoolExecutor
T = typing.TypeVar('T')
# worker pool starts in main process
_CELERY_TASK_POOL: typing.Optional[ThreadPoolExecutor] = None
def get_worker_pool() -> ThreadPoolExecutor:
"""
get worker pool instance
should be executed in main process
main process -> worker 1 -> task 1
task 2
task 3
worker 2 -> task 1
task 2
task 3
worker 3 -> task 1
task 2
task 3
task pool should be released once tasks are finished
while worker pool should be always held at background
:return: worker pool instance
"""
global _CELERY_TASK_POOL
if not _CELERY_TASK_POOL:
_CELERY_TASK_POOL = ThreadPoolExecutor(thread_name_prefix='consumer')
return _CELERY_TASK_POOL
def get_pool_size() -> int:
"""
:return: worker pool instance
"""
global _CELERY_TASK_POOL
if _CELERY_TASK_POOL:
return _CELERY_TASK_POOL._work_queue.qsize()
return 0
def submit(func: typing.Callable[..., T],
*args: typing.Any,
**kwargs: typing.Any) -> T:
"""
run single task in worker
:param func: function
:param args: args
:param kwargs: keyword args
:return: result of the function
"""
future = get_worker_pool().submit(func, *args)
return future
async def run_in_worker(func: typing.Callable[..., T],
*args: typing.Any,
**kwargs: typing.Any) -> T:
"""
run single task in worker
:param func: function
:param args: args
:param kwargs: keyword args
:return: result of the function
"""
loop = asyncio.get_event_loop()
f = functools.partial(func, **kwargs)
worker_pool = get_worker_pool()
return await loop.run_in_executor(worker_pool, f, *args) | zcbot-celery-sdk | /zcbot-celery-sdk-0.0.48.tar.gz/zcbot-celery-sdk-0.0.48/zcbot_celery_sdk/common/thread_pool.py | thread_pool.py |
import time
import six
from functools import wraps, partial
from inspect import isclass, ismethod
def singleton(cls):
_instance = {}
@wraps(cls)
def _singlenton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singlenton
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isclass(f_self) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given callable.
:rtype: str
:raises TypeError: if the given object is not callable
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
function
"""
if isinstance(obj, partial):
raise ValueError('Cannot create a reference to a partial()')
name = get_callable_name(obj)
if '<lambda>' in name:
raise ValueError('Cannot create a reference to a lambda')
if '<locals>' in name:
raise ValueError('Cannot create a reference to a nested function')
if ismethod(obj):
if hasattr(obj, 'im_self') and obj.im_self:
# bound method
module = obj.im_self.__module__
elif hasattr(obj, 'im_class') and obj.im_class:
# unbound method
module = obj.im_class.__module__
else:
module = obj.__module__
else:
module = obj.__module__
return '%s:%s' % (module, name)
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, six.string_types):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref)
def time_stat(func):
def inner():
print(f'开始计时')
start = time.time()
func()
end = time.time()
print(f'耗时: {end - start}秒')
return inner | zcbot-celery-sdk | /zcbot-celery-sdk-0.0.48.tar.gz/zcbot-celery-sdk-0.0.48/zcbot_celery_sdk/common/utils.py | utils.py |
from typing import Dict
from .base import BaseService
from ..model.callback import Callback
from ..model.param import SkuSearchParam
class SkuSearchService(BaseService):
"""
商品搜索服务
"""
def search(self, platform: str = None, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name=f'sku_search.{platform}', task_params=task_params, callback=callback, **kwargs)
def search_jd_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】京东PC端
"""
return self.get_client().apply(task_name='sku_search.jd_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_sn_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】苏宁PC端
"""
return self.get_client().apply(task_name='sku_search.sn_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_mmb_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】慢慢买PC端
"""
return self.get_client().apply(task_name='sku_search.mmb_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_mmb_m(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】慢慢买手机端
"""
return self.get_client().apply(task_name='sku_search.mmb_m', task_params=task_params.dict(), callback=callback, **kwargs)
def search_ehsy_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】西域PC端口
"""
return self.get_client().apply(task_name='sku_search.ehsy_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_tmall_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】天猫PC端口
"""
return self.get_client().apply(task_name='sku_search.tmall_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_tmall_h5(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】天猫PC端口
"""
return self.get_client().apply(task_name='sku_search.tmall_h5', task_params=task_params.dict(), callback=callback, **kwargs)
def search_mymro_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】MYMRO PC端口
"""
return self.get_client().apply(task_name='sku_search.mymro_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_gome_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】国美PC端口
"""
return self.get_client().apply(task_name='sku_search.gome_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_zkh_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】震坤行PC端口
"""
return self.get_client().apply(task_name='sku_search.zkh_pc', task_params=task_params.dict(), callback=callback, **kwargs)
def search_xfs_pc(self, task_params: SkuSearchParam = None, callback: Callback = None, **kwargs):
"""
【商品搜索】鑫方盛PC端口
"""
return self.get_client().apply(task_name='sku_search.xfs_pc', task_params=task_params.dict(), callback=callback, **kwargs) | zcbot-celery-sdk | /zcbot-celery-sdk-0.0.48.tar.gz/zcbot-celery-sdk-0.0.48/zcbot_celery_sdk/service/sku_search.py | sku_search.py |
from typing import List
from ..client.mongo_client import Mongo
# 获取链接分拣规则配置
def get_url_parse_rule(host: str = None):
if host:
return Mongo().get('zcbot_url_parse_rule', {'_id': host})
return Mongo().list('zcbot_url_parse_rule')
# 获取支持网站平台
def get_platforms():
return Mongo().list('zcbot_platforms', sort=[('sort', 1)])
# 获取支持网站平台
def get_platforms_by_group(group_code: str, enable: int = None):
_query = {}
if group_code:
_query["group_code"] = group_code
if enable is not None:
_query["enable"] = enable
return Mongo().list('zcbot_batch_spider_group', query=_query)
# 根据爬虫组编号,获取可选的爬虫清单
def get_spiders_by_group(group_code: str = None, plat_codes: List[str] = None, enable: int = None):
_query = {}
if group_code:
_query["groupCode"] = group_code
if plat_codes and len(plat_codes):
_query["platCode"] = {"$in": plat_codes}
if enable is not None:
_query["enable"] = enable
return Mongo().list('zcbot_batch_spider_group', query=_query)
# 根据爬虫组编号,获取爬虫组清单
def get_spider_group_list(group_code: str = None, plat_codes: List[str] = None, enable: int = None):
_query = {}
if group_code:
_query["groupCode"] = group_code
if plat_codes and len(plat_codes):
_query["platCode"] = {"$in": plat_codes}
if enable is not None:
_query["enable"] = enable
return Mongo().list('zcbot_batch_spider_group', query=_query)
#
# def get_platforms_by_group_code(group_code):
# _query = {
# "group_code": group_code
# }
# _fields = {
# "spiders": 0,
# "_id": 0
# }
# # return mongo.list('zcbot_batch_spider_group', query=_query, fields=_fields)
# return mongo.aggregate('zcbot_platforms', [
# {
# '$lookup': {
# 'from': 'zcbot_batch_spider_group',
# 'localField': '_id',
# 'foreignField': 'plat_code',
# 'as': 'spider'
# }
# },
# {
# '$match': {'spider.group_code': group_code}
# },
# {
# '$project': {'spider': 0}
# },
# {'$sort': {'sort': 1}}
# ]) | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/dao/base.py | base.py |
import json
import requests
from ..util import logger
from ..util.decator import singleton
from ..model.entity import PortainerNode, BatchSpider
LOGGER = logger.get('容器')
@singleton
class PortainerClient(object):
"""
Portainer客户端简易封装
"""
def __init__(self):
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3775.400 QQBrowser/10.6.4208.400'
# 客户端
self.create_api = '{}/api/endpoints/{}/docker/containers/create?name={}'
self.start_api = '{}/api/endpoints/{}/docker/containers/{}/start'
self.stop_api = '{}/api/endpoints/{}/docker/containers/{}/kill'
def create_container(self, node: PortainerNode, spider: BatchSpider, container_name: str, param_data):
container_conf = {
"Cmd": [
"/bin/sh",
"-c",
spider.param % param_data
],
"Env": spider.env,
"Image": spider.dockerImage
}
rs = requests.post(
url=self.create_api.format(node.apiBaseUrl, node.endpointId, container_name),
headers={
'X-API-Key': node.apiToken,
'User-Agent': self.user_agent,
'Content-Type': 'application/json;charset=UTF-8',
},
data=json.dumps(container_conf)
)
if rs:
data = json.loads(rs.text) or {}
if data.get('Id', None):
id = data.get('Id', '')
LOGGER.info(f'创建成功: id={id}, endpoint={node.endpointId} name={container_name}')
return id
LOGGER.error(f'创建失败: result={rs.text}, endpoint={node.endpointId} name={container_name}')
def start(self, node: PortainerNode, container_id: str):
rs = requests.post(
url=self.start_api.format(node.apiBaseUrl, node.endpointId, container_id),
headers={
'X-API-Key': node.apiToken,
'User-Agent': self.user_agent,
'Content-Type': 'application/json;charset=UTF-8',
}
)
if rs and rs.status_code in [204, 200]:
LOGGER.info(f'启动成功: id={container_id}')
return container_id
LOGGER.error(f'启动失败: id={container_id}')
def stop(self, node: PortainerNode, container_id: str):
rs = requests.post(
url=self.stop_api.format(node.apiBaseUrl, node.endpointId, container_id),
headers={
'X-API-Key': node.apiToken,
'User-Agent': self.user_agent,
'Content-Type': 'application/json;charset=UTF-8',
}
)
if rs and rs.status_code in [204, 200]:
LOGGER.info(f'停止成功: id={container_id}')
return container_id
LOGGER.error(f'停止失败: id={container_id}') | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/client/portainer_client.py | portainer_client.py |
import pytz
from pymongo import MongoClient
from ..util import cfg
from ..util.exceptions import NoConfigException
from ..util.decator import singleton
@singleton
class Mongo(object):
"""
Mongo基础客户端简易封装
"""
def __init__(self, mongo_uri: str = None, mongo_db: str = None, *args, **kwargs):
self.mongo_uri = mongo_uri or cfg.get('ZCBOT_CORE_MONGO_URL')
self.mongo_db = mongo_db or cfg.get('ZCBOT_CORE_MONGO_DB')
if not self.mongo_uri:
raise NoConfigException('mongodb uri not config!')
if not self.mongo_db:
raise NoConfigException('mongodb database not config!')
self.client = MongoClient(self.mongo_uri, tz_aware=True, tzinfo=pytz.timezone('Asia/Shanghai'))
self.db = self.client[self.mongo_db]
# 获取集合
def get_collection(self, coll):
return self.db.get_collection(coll)
# 查询对象
def get(self, collection, query={}):
result = self.db[collection].find_one(query)
return result
# 统计数量
def count(self, collection, query={}):
return self.db[collection].count(query)
# 查询列表
def list(self, collection, query={}, fields=None, sort=[]):
if fields:
cursor = self.db[collection].find(query, fields, sort=sort)
else:
cursor = self.db[collection].find(query, sort=sort)
return list(cursor)
# 查询去重列表
def distinct(self, collection, dist_key, query={}, fields=None):
return self.db[collection].find(query, fields).distinct(dist_key)
# 聚合查询
def aggregate(self, collection, pipeline=[]):
cursor = self.db[collection].aggregate(pipeline, session=None, allowDiskUse=True)
return list(cursor)
# 查询分页列表
def list_with_page(self, collection, query={}, page_size=10000, fields=None):
rows = list()
total = self.db[collection].count(query)
if total > 0 and page_size > 0:
total_page = round(len(total) / page_size)
for page in range(0, total_page):
if fields:
cursor = self.db[collection].find(query, fields).skip(page_size * page).limit(page)
else:
cursor = self.db[collection].find(query).skip(page_size * page).limit(page)
curr_batch = list(cursor)
if curr_batch:
rows.append(curr_batch)
return rows
# 插入或更新
def insert_or_update(self, collection, data, id_key='_id'):
return self.db[collection].update({id_key: data[id_key]}, {'$set': data}, upsert=True)
# 更新
def update(self, collection, filter, data, multi=False):
return self.db[collection].update(filter, {'$set': data}, multi=multi)
# 以主键更新
def update_by_pk(self, collection, pk_val, data):
return self.db[collection].update({'_id': pk_val}, {'$set': data}, multi=False)
# 批量更新
def batch_update(self, collection, filter, datas, multi=False):
return self.db[collection].update(filter, datas, multi=multi)
# 更新
def delete(self, collection, filter):
return self.db[collection].delete_many(filter)
# 插入或更新
def bulk_write(self, collection, bulk_list):
if bulk_list:
return self.db[collection].bulk_write(bulk_list, ordered=False, bypass_document_validation=True)
# 关闭链接
def close(self):
self.client.close()
# 销毁关闭链接
def __del__(self):
if self.client:
self.client.close() | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/client/mongo_client.py | mongo_client.py |
import datetime
import time
from typing import Optional
from dateutil import tz
def now() -> datetime.datetime:
"""
get datetime instance of time of now
:return: time of now
"""
return datetime.datetime.now(tz.gettz('Asia/Shanghai'))
def __get_t(t: Optional[datetime.datetime] = None) -> datetime.datetime:
"""
get datetime instance
:param t: optional datetime instance
:return: datetime instance
"""
return t if isinstance(t, datetime.datetime) else now()
def to_str(t: Optional[datetime.datetime] = None,
fmt: str = '%Y-%m-%d %H:%M:%S.%f') -> str:
"""
get string formatted time
:param t: optional datetime instance
:param fmt: string format
:return:
"""
return __get_t(t).strftime(fmt)
def to_seconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to seconds
:param t: optional datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp())
def to_milliseconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to milliseconds
:param t: datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp() * 10 ** 3)
def to_microseconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to microseconds
:param t: datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp() * 10 ** 6)
def get_dt(start_t: datetime.datetime,
end_t: Optional[datetime.datetime] = None) -> datetime.timedelta:
"""
get delta time
:param start_t: start time
:param end_t: end time
:return: timedelta instance
"""
return __get_t(end_t) - start_t
def to_seconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to seconds
:param dt: timedelta instance
:return: seconds elapsed
"""
return int(dt.total_seconds())
def to_milliseconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to milliseconds
:param dt: timedelta instance
:return: milliseconds elapsed
"""
return int(dt.total_seconds() * 10 ** 3)
def to_microseconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to microseconds
:param dt: timedelta instance
:return: microseconds elapsed
"""
return int(dt.total_seconds() * 10 ** 6)
def parse_timestamp(time_str, tz_str='Asia/Shanghai'):
"""
将时间戳解析成本地时间(自动截断毫秒)
:param time_str:
:param tz_str:
:return:
"""
if time_str:
# 1576839034000
if len(str(time_str)) > 10:
# 截取掉毫秒
time_str = str(time_str)[0:10]
return datetime.datetime.fromtimestamp(int(time_str)).astimezone(tz.gettz(tz_str))
def time_to_batch_id(dt, delta_hour=0, delta_day=0, err=''):
"""
# 将采购时间转换为批次编号(delta为0时,时间应为iso时间),delta为正则加为负则减
"""
try:
if dt and isinstance(dt, datetime.datetime):
if delta_hour or delta_day:
time_delta = datetime.timedelta(hours=int(delta_hour), days=int(delta_day))
dt = dt + time_delta
return int(dt.strftime("%Y%m%d"))
except ValueError:
return err
def current_timestamp13():
"""
13位当前时间时间戳(毫秒级时间戳)
:return:
"""
return int(round(time.time() * 1000))
def current_timestamp10():
"""
10位当前时间时间戳(秒级时间戳)
:return:
"""
return int(time.time())
if __name__ == '__main__':
print(parse_timestamp(str(int(now().timestamp())))) | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/util/time.py | time.py |
from typing import List, Union, Dict
from pydantic import BaseModel, Field
from .base import BaseData
from .enums import CrawlJobStatus
from ..util import time as time_lib
class BatchTaskItem(BaseData):
"""
【输入】任务明细模型
"""
# 【输入】对象唯一序列号(全局唯一,可用于主键,等于_id)
sn: str = None
# 【输入】用户指定编号
rowId: str = None
# 【输入】商品链接
url: str = None
# 扩展字段,可是任意内容,透传
callback: Union[str, Dict, List] = None
class BatchTask(BaseModel):
"""
【输入】批次采集数据接收模型
"""
# 【输入】任务爬虫编码清单
spiderId: Union[List[str], str] = None
# 【输入】任务链接明细清单
taskItems: List[BatchTaskItem] = None
# 【输入】启动容器数量
containerCount: int = 1
# 【输入】文件名称配置键(主详图采集命名使用,非必要)
fileNameConfig: str = 'default'
class BatchApiData(BaseModel):
"""
【输入】批次采集接口接收模型
"""
# 【输入】批次编号
batchId: str
# 【输入】应用编码
appCode: str
# 【输入】任务明细清单
taskList: List[BatchTask]
# 【输入】是否为补采任务
supplyTask: bool = False
class BatchJobInst(BaseData):
"""
【输出】任务明细模型
"""
# 【输出】对象唯一序列号(全局唯一,可用于主键,等于_id)
jobId: str = None
# 【输出】爬虫容器ID
containerId: str = None
# 【输出】爬虫实例指纹,用于爬虫内部标记结果出处
instSn: str = None
# 【输出】job运行节点
node: Dict = None
# 【输出】job状态
status: str = None
# 【输出】job状态描述
statusText: str = None
# 创建时间
genTime: int = Field(
default_factory=time_lib.current_timestamp10
)
# 更新时间
updateTime: int = Field(
default_factory=time_lib.current_timestamp10
)
def set_status(self, status: CrawlJobStatus):
self.status = status.name
self.statusText = status.value
class BatchTaskResp(BaseModel):
"""
【输出】批次采集数据接收模型
"""
# 【输出】爬虫编码
spiderId: str
# 【输出】批次编号
batchId: str
# 【输出】任务链接明细清单
nodes: List[BatchJobInst] = []
class BatchSpiderGroupQuery(BaseModel):
"""
【输出】支持平台
"""
# 【输入】平台编码
platCodes: List[str] = None
# 【输入】组编码
groupCode: str = None
# 【输入】是否可用
enable: int = 1 | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/model/batch.py | batch.py |
import json
import random
import uuid
from typing import List
from ..util import logger, cfg
from ..util import time as time_lib
from ..util import sequence as sequence_util
from ..util import redis_key as redis_key_util
from ..util.exceptions import BizException
from ..model.batch import BatchTaskItem
from ..client.redis_client import Redis
from ..client.portainer_client import PortainerClient
from ..model.entity import PortainerNode
from ..model.batch import BatchApiData, BatchJobInst
from ..model.enums import CommonStatus, CrawlJobStatus
from ..dao import spider as spider_dao
from ..dao import node as node_dao
LOGGER = logger.get("任务")
client = PortainerClient()
def publish(api_data: BatchApiData):
"""
发起批次采集
"""
# 如果上游传递则直接使用,否则本系统生成批次编号
batch_id = api_data.batchId or sequence_util.gen_batch_id()
app_code = api_data.appCode
LOGGER.info(f"[启动]开始启动 app_code={app_code}, batch_id={batch_id}")
rs_list = list()
# 任务数据与爬虫配置校验
for task in api_data.taskList:
task_items = task.taskItems
spider_id = task.spiderId
file_name_config = task.fileNameConfig
if not spider_id or not task_items:
LOGGER.error(f"[启动]存在异常任务 batch_id={batch_id}, spider_id={spider_id}, task_items={len(task_items)}, app_code={app_code}")
continue
spider = spider_dao.get_batch_spider(spider_id)
if not spider or spider.status != CommonStatus.ON:
LOGGER.error(f"[启动]爬虫不存在或已停用 spider_id={spider_id}, app_code={app_code}, batch_id={batch_id}, task_items={len(task_items)}")
continue
# 创建容器 + 初始化队列
plat_code = spider.platCode
task_mode = spider.taskMode
batch_size = spider.batchSize
inst_sn = sequence_util.short_uuid()
# 初始化容器
init_task_queue(task_items, batch_id, spider_id, batch_size, plat_code, task_mode)
# 选择一个节点
node = node_balance_choice(spider.nodes)
if not node:
LOGGER.error(f"[启动]爬虫暂无可用运行节点 spider_id={spider_id}, nodes={spider.nodes}, app_code={app_code}, batch_id={batch_id}, task_items={len(task_items)}")
continue
# 创建容器
container_name = f'{spider.taskType}-{spider.platCode}-{batch_id}-{inst_sn}'
container_id = client.create_container(
spider=spider,
node=node,
container_name=container_name,
param_data={"batch_id": batch_id, "inst_sn": inst_sn, "app_code": app_code, "file_name_config": file_name_config}
)
if container_id:
job_inst = BatchJobInst(
jobId=f'{node.endpointId}:{container_id}',
containerId=container_id,
instSn=inst_sn,
node=node,
status=CrawlJobStatus.ERROR.name,
statusText=CrawlJobStatus.ERROR.value,
)
# 启动容器
rs = client.start(node, container_id)
if rs:
# 启动成功
job_inst.status = CrawlJobStatus.RUNNING.name
job_inst.statusText = CrawlJobStatus.RUNNING.value
rs_list.append(job_inst)
return rs_list
def cancel_job(node_id: str, container_id: str):
"""
取消采集任务
"""
node = node_dao.get_node(node_id)
if not node:
raise BizException(f"[任务]节点不存在或已停用 node_id={node_id}, container_id={container_id}")
rs = client.stop(node, container_id)
LOGGER.info(f"[任务]取消完成 endpoint_id={node.endpointId}, container_id={container_id}, rs={rs}")
return rs
def init_task_queue(task_items, batch_id, spider_id, batch_size, plat_code, task_mode) -> int:
"""
初始化任务队列
:param task_items:
:param batch_id:
:param batch_size:
:param spider_id:
:param app_code:
:param task_mode:
:param expire_seconds:
:return:
"""
# 初始化队列
LOGGER.info(f"[队列]任务队列初始化开始 task_mode={task_mode}")
tmp_queue_expire = cfg.get_int("ZCBOT_CORE_REDIS_TMP_QUEUE_EXPIRE") or 3600
if task_mode and task_mode == "multi":
# 批量模式(如:京东价格)
return _init_redis_batch(
batch_id=batch_id,
batch_size=batch_size,
spider_id=spider_id,
plat_code=plat_code,
rows=task_items,
expire_seconds=tmp_queue_expire
)
else:
# 单条采集模式
return _init_redis(batch_id=batch_id, spider_id=spider_id, plat_code=plat_code, rows=task_items, expire_seconds=tmp_queue_expire)
def _init_redis(batch_id: str, spider_id: str, plat_code: str, rows: List[BatchTaskItem], expire_seconds: int):
redis_key = redis_key_util.get_task_queue_key(batch_id, spider_id)
Redis().client.delete(redis_key)
for row in rows:
request_id = str(uuid.uuid4())
task = row.dict()
if plat_code:
task["platCode"] = plat_code
# 任务入队
Redis().client.lpush(redis_key, json.dumps(task))
# 加入重试源数据集合
add_to_retry_source_mapper(request_id, task, redis_key)
# 设置任务队列过期时间:默认12小时自动清理
set_expire(batch_id=batch_id, spider_id=spider_id, expire_seconds=expire_seconds)
count = Redis().client.llen(redis_key)
LOGGER.info(f"[队列]任务队列初始化完成 -> 单条模式 key={redis_key}, row={count}, count={len(rows)}")
return count
def _init_redis_batch(batch_id: str, batch_size: int, spider_id: str, plat_code: str, rows: List[BatchTaskItem], expire_seconds: int):
redis_key = redis_key_util.get_task_queue_key(batch_id, spider_id)
Redis().client.delete(redis_key)
batch_list = []
random.shuffle(rows)
for row in rows:
task = row.dict()
if plat_code:
task["platCode"] = plat_code
batch_list.append(task)
if len(batch_list) >= batch_size:
request_id = str(uuid.uuid4())
batch_row_data = {
"requestId": request_id,
"data": batch_list
}
# 任务入队
Redis().client.lpush(redis_key, json.dumps(batch_row_data))
# 加入重试源数据集合
add_to_retry_source_mapper(request_id, batch_row_data, redis_key)
batch_list = []
if batch_list:
request_id = str(uuid.uuid4())
batch_row_data = {
"requestId": request_id,
"data": batch_list
}
# 任务入队
Redis().client.lpush(redis_key, json.dumps(batch_row_data))
# 加入重试源数据集合
add_to_retry_source_mapper(request_id, batch_row_data, redis_key)
# 设置任务队列过期时间:默认12小时自动清理
set_expire(batch_id, spider_id, expire_seconds)
count = Redis().client.llen(redis_key)
LOGGER.info(f"[队列]任务队列初始化完成 -> 批量模式 key={redis_key}, row={count}, count={len(rows)}")
return len(rows)
def set_expire(batch_id: str, spider_id: str, expire_seconds=None):
"""
设置任务队列过期时间
1、分拣后入库:默认1小时(job未启动则清理)
2、job启动:重置过期时间(限制采集时效)
:param batch_id:
:param spider_id:
:param expire_seconds:
:return:
"""
_expire_seconds = expire_seconds or cfg.get_int("ZCBOT_CORE_REDIS_QUEUE_EXPIRE") or 12 * 3600
redis_key = redis_key_util.get_task_queue_key(batch_id, spider_id)
Redis().client.expire(redis_key, _expire_seconds)
def add_to_retry_source_mapper(request_id, task_data, task_queue_key):
"""
加入任务映射队列,用于重试任务源数据
"""
_data = {
"queue": task_queue_key,
"source": task_data,
"genTime": time_lib.current_timestamp10()
}
_expire_seconds = cfg.get_int("ZCBOT_CORE_REDIS_QUEUE_EXPIRE") or 12 * 3600
Redis().client.set(redis_key_util.get_retry_request_source_key(request_id), json.dumps(_data), ex=_expire_seconds)
# 节点选择,这里未来可做节点负载均衡
def node_balance_choice(nodes: List[str]) -> PortainerNode:
node_list = node_dao.get_node_list({'_id': {'$in': nodes}})
node = random.choice(node_list)
return node | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/service/batch.py | batch.py |
import json
import random
import uuid
from typing import List, Dict
from zcbot_url_parser import parser as url_parser
from ..util import logger, cfg
from ..util import time as time_lib
from ..util.exceptions import BizException
from ..client.redis_client import Redis
from ..model.enums import TaskType
from ..util import redis_key as redis_key_util
from ..model.stream import StreamApiData, StreamTaskItem
from ..dao import spider as spider_service
LOGGER = logger.get('流采')
def publish(api_data: StreamApiData):
"""
准备批次采集任务物料
"""
# 如果上游传递则直接使用,否则本系统生成批次编号
app_code = api_data.app_code
task_items = api_data.task_items or []
task_type = api_data.task_type
if not app_code:
raise BizException(f'[分拣]app_code未指定 api_data={api_data}')
if not task_items:
raise BizException(f'[分拣]任务清单为空 api_data={api_data}')
LOGGER.info(f'[分拣]开始流采分拣 app_code={app_code}, task_type={task_type}, rows={len(task_items)}')
# 物料分拣
result_map, classify_result_list, classify_ignore_list = classify(api_data)
# 初始化任务队列
init_job(result_map, task_type)
LOGGER.info(f'[分拣]流采分拣完成 app_code={app_code}, task_type={task_type}, rows={len(task_items)}')
return result_map, classify_result_list, classify_ignore_list
def simple_publish(api_data: StreamApiData):
"""
准备批次采集任务物料
"""
# 如果上游传递则直接使用,否则本系统生成批次编号
app_code = api_data.app_code
task_items = api_data.task_items or []
task_type = api_data.task_type
if not app_code:
raise BizException(f'[分拣]app_code未指定 api_data={api_data}')
if not task_items:
raise BizException(f'[分拣]任务清单为空 api_data={api_data}')
LOGGER.info(f'[分拣]开始流采分拣 app_code={app_code}, task_type={task_type}, rows={len(task_items)}')
# 简易分拣
result_map = dict()
for idx, row in enumerate(task_items):
if row.sn and row.platCode:
# 额外字段
row.appCode = row.appCode or app_code
task_list = result_map.get(row.platCode)
if not task_list:
task_list = list()
result_map[row.platCode] = task_list
task_list.append(row)
# 初始化任务队列
init_job(result_map, task_type)
LOGGER.info(f'[分拣]流采分拣完成 app_code={app_code}, task_type={task_type}, rows={len(task_items)}')
return result_map, task_items
def classify(api_data: StreamApiData) -> (Dict[str, List[StreamTaskItem]], List[dict]):
"""
任务分拣
:param api_data: 待分拣任务清单,
:return:
"""
app_code = api_data.app_code
# batch_id = api_data.batch_id
todo_list = api_data.task_items
LOGGER.info('[分拣]开始流采分拣 total=%s' % len(todo_list))
result_map = dict()
classify_result_list = list()
classify_ignore_list = list()
for idx, row in enumerate(todo_list):
# 解析链接组装任务
url_model = url_parser.parse_url(row.url)
link_id, plat_code, plat_name, ec_sku_id = url_model.link_sn, url_model.plat_code, url_model.plat_name, url_model.ec_sku_id
if link_id and plat_code and ec_sku_id:
row.linkId = link_id
row.platCode = plat_code
row.platName = plat_name
row.ecSkuId = ec_sku_id
# 额外字段
row.appCode = row.appCode or app_code
row.rowId = row.rowId or row.sn
else:
LOGGER.warning(f'[分拣]未识别的流采任务 url={row.url}, url_sn={link_id}, plat_code={plat_code}, ec_sku_id={ec_sku_id}')
classify_ignore_list.append(row)
continue
# 分拣
if row.sn and plat_code:
task_list = result_map.get(plat_code)
if not task_list:
task_list = list()
result_map[plat_code] = task_list
task_list.append(row)
classify_result_list.append(row.to_classify_result())
# 统计
count_map = dict()
for key in result_map.keys():
count_map[key] = len(result_map.get(key))
LOGGER.info('[分拣]流采分拣完成 %s' % count_map)
return result_map, classify_result_list, classify_ignore_list
def init_job(result_map: Dict[str, List[StreamTaskItem]], task_type: TaskType) -> (Dict[str, List[StreamTaskItem]], List[dict]):
# 初始化任务队列
for plat_code in result_map.keys():
spider_conf = spider_service.get_stream_spider_redis_key(plat_code, task_type.value)
if not spider_conf or not spider_conf.get('redis_key', None):
LOGGER.error(f'[发布]流采爬虫参数未配置 plat_code={plat_code}, task_type={task_type}')
continue
plat_task_list = result_map.get(plat_code)
plat_init_count = init_task_queue(spider_conf, plat_task_list)
LOGGER.info(f'[发布]初始化流采队列 plat_code={plat_code}, plat_init_count={plat_init_count}')
def init_task_queue(spider_conf, rows) -> int:
"""
初始化任务队列
:param spider_conf:
:param rows:
:return:
"""
# 初始化队列
task_mode = spider_conf.get('task_mode', None)
redis_key = spider_conf.get('redis_key', None)
LOGGER.info(f'[队列]任务队列初始化开始 redis_key={redis_key}, task_mode={task_mode}')
if task_mode and redis_key and task_mode == 'batch':
# 批量模式(如:京东价格)
return _init_redis_batch(redis_key, rows)
else:
# 单条采集模式
return _init_redis(redis_key, rows)
def add_to_task_mapper(request_id, task_data, task_queue_key):
"""
加入任务映射队列,用于重试任务源数据
"""
rds = Redis()
_data = {
'queue': task_queue_key,
'source': task_data,
'genTime': time_lib.current_timestamp10()
}
_expire_seconds = cfg.get_int('ZCBOT_CORE_REDIS_QUEUE_EXPIRE') or 12 * 3600
rds.client.set(redis_key_util.get_retry_request_source_key(request_id), json.dumps(_data), ex=_expire_seconds)
def _init_redis(redis_key, rows: List[StreamTaskItem]):
rds = Redis()
for row in rows:
request_id = str(uuid.uuid4())
task = {
'requestId': request_id,
"sn": str(row.sn),
"url": row.url,
"platCode": row.platCode,
"rowId": str(row.rowId),
"appCode": row.appCode,
"callback": row.callback,
}
# 任务入队
rds.client.lpush(redis_key, json.dumps(task))
# 加入重试源数据集合
add_to_task_mapper(request_id, task, redis_key)
count = rds.client.llen(redis_key)
LOGGER.info(f'[队列]任务队列初始化完成 -> 单条模式 key={redis_key}, row={count}, count={len(rows)}')
return count
def _init_redis_batch(redis_key, rows: List[StreamTaskItem]):
_expire_seconds = cfg.get_int('ZCBOT_CORE_REDIS_QUEUE_EXPIRE') or 12 * 3600
rds = Redis()
batch_list = []
random.shuffle(rows)
for row in rows:
task = {
"sn": str(row.sn),
"url": row.url,
"platCode": row.platCode,
"rowId": str(row.rowId),
"appCode": row.appCode,
"callback": row.callback,
}
batch_list.append(task)
if len(batch_list) >= BATCH_SIZE:
request_id = str(uuid.uuid4())
batch_row_data = {
'requestId': request_id,
'data': batch_list
}
# 任务入队
rds.client.lpush(redis_key, json.dumps(batch_row_data))
# 加入重试源数据集合
add_to_task_mapper(request_id, batch_row_data, redis_key)
batch_list = []
if batch_list:
request_id = str(uuid.uuid4())
batch_row_data = {
'requestId': request_id,
'data': batch_list
}
# 任务入队
rds.client.lpush(redis_key, json.dumps(batch_row_data))
# 加入重试源数据集合
add_to_task_mapper(request_id, batch_row_data, redis_key)
count = rds.client.llen(redis_key)
LOGGER.info(f'[队列]任务队列初始化完成 -> 批量模式 key={redis_key}, row={count}, count={len(rows)}')
return len(rows) | zcbot-crawl-core | /zcbot-crawl-core-0.0.2.tar.gz/zcbot-crawl-core-0.0.2/zcbot_crawl_core/service/stream.py | stream.py |
import json
import logging
from typing import List
from .model import StreamApiData, BatchApiData, BatchSpiderGroupQuery
from . import http_client
from . import exceptions
LOGGER = logging.getLogger(__name__)
class _Base(object):
def __init__(self, auth, endpoint, session=None, app_name='', timeout=60):
self.auth = auth
self.session = session or http_client.Session()
self.endpoint = endpoint.strip().strip('/')
self.timeout = timeout
self.app_name = app_name
def _send_request(self, method, url, **kwargs):
req = http_client.Request(method, url, app_name=self.app_name, **kwargs)
# 加入鉴权参数
self.auth.sign_request(req)
resp = self.session.send_request(req, timeout=self.timeout)
if resp.status != 200:
raise exceptions.BizException(resp)
return resp
def _get(self, url, **kwargs):
return self._send_request('GET', url, **kwargs)
def _post(self, url, **kwargs):
return self._send_request('POST', url, **kwargs)
@staticmethod
def _parse_result(rs):
if not rs or not rs.response_text:
raise exceptions.BizException(rs, '响应内容为空')
js = json.loads(rs.response_text)
if not js or not js.get('success', False) or js.get('code', -1) < 0:
raise exceptions.BizException(rs, '业务操作失败')
return js.get('data', {})
class ZcbotApi(_Base):
# ====================
# 批次采集接口
# ====================
def batch_publish(self, api_data: BatchApiData):
"""
批次采集任务发布接口(创建+启动)
:param api_data:
:return:
"""
rs = self._post(
url=f'{self.endpoint}/api/batch/publish',
data=api_data.json()
)
return self._parse_result(rs)
def batch_cancel(self, host_id: str, container_id: str):
"""
批次采集任务取消接口
:return:
"""
rs = self._get(
url=f'{self.endpoint}/api/batch/cancel',
params={'hostId': host_id, 'containerId': container_id},
)
return self._parse_result(rs)
# ====================
# 流式采集接口
# ====================
def stream_publish(self, api_data: StreamApiData):
"""
流式采集发布采集任务接口
:param api_data:
:return:
"""
rs = self._post(
url=f'{self.endpoint}/api/stream/publish',
data=api_data.json()
)
return self._parse_result(rs)
# ============================
# 获取所有平台
def get_all_platforms(self):
rs = self._get(
url=f'{self.endpoint}/api/meta/platforms',
)
return self._parse_result(rs)
# 根据爬虫组编号,获取可选的爬虫清单
def get_support_spiders_by_group(self, api_data: BatchSpiderGroupQuery):
rs = self._post(
url=f'{self.endpoint}/api/meta/batch-spider/group/spiders',
data=api_data.json()
)
return self._parse_result(rs)
# 根据爬虫组编号,获取支持的平台清单
def get_support_platforms_by_group(self, group_code: str = None):
rs = self._post(
url=f'{self.endpoint}/api/meta/batch-spider/group/platforms',
params={"groupCode": group_code}
)
return self._parse_result(rs) | zcbot-crawl-sdk | /zcbot-crawl-sdk-1.0.7.tar.gz/zcbot-crawl-sdk-1.0.7/zcbot_crawl_sdk/api/api.py | api.py |
from enum import Enum
from typing import List, Union, Dict
from pydantic import BaseModel, Field
from ..util import time as time_lib
# =============
# 基础模型
# =============
class BaseData(BaseModel):
"""
通用基础数据模型
"""
# 主键
_id: str = None
# 插入时间
genTime: int = Field(
default_factory=time_lib.current_timestamp10
)
class TaskType(str, Enum):
"""
任务类型枚举
"""
# 商品基础信息
SKU_INFO = 'sku_info'
# 商品价格
SKU_PRICE = 'sku_price'
# 商品基础信息及价格等详细信息
SKU_FULL = 'sku_full'
# 商品主详图
SKU_IMAGE = 'sku_image'
# 流式采集商品编码(如编码)
STREAM_SKU_ID = 'stream_sku_id'
# 流式采集商品价格信息
STREAM_SKU_PRICE = 'stream_sku_price'
# 流式采集商品基础信息及价格等详细信息
STREAM_SKU_FULL = 'stream_sku_full'
# 流式采集商品主详图
STREAM_SKU_IMAGE = 'stream_sku_image'
# =============
# 批次采集相关模型
# =============
class BatchTaskItem(BaseModel):
"""
任务明细模型
"""
# 【输入】对象唯一序列号(全局唯一,可用于主键,等于_id)
sn: str = None
# 【输入】用户指定编号
rowId: str = None
# 【输入】商品链接
url: str = None
# 扩展字段,可是任意内容,透传
callback: Union[str, Dict, List] = None
class BatchTask(BaseModel):
"""
批次任务接收模型
"""
# 【输入】任务爬虫编码清单
spiderId: str = None
# 【输入】任务链接明细清单
taskItems: List[BatchTaskItem] = []
# 启动容器数量
containerCount: int = 1
# 【输入】文件名称配置键(主详图采集命名使用,非必要)
fileNameConfig: str = 'default'
class BatchApiData(BaseModel):
"""
批次接口接收模型
"""
# 【输入】批次编号
batchId: str = None
# 【输入】应用编码
appCode: str = None
# 【输入】任务明细清单
taskList: List[BatchTask] = []
# 是否直接启动
autoStart: bool = True
class BatchTaskNodeResp(BaseModel):
"""
任务明细模型
"""
# 【输出】对象唯一序列号(全局唯一,可用于主键,等于_id)
nodeId: str = None
# 【输出】节点爬虫序列码,用于标记结果出处
nodeSn: str = None
# 【输出】容器编号
containerId: str = None
# 【输出】服务器编号
hostId: str = None
# 【输出】商品状态
status: str = None
# 【输出】商品状态描述
statusText: str = None
# 创建时间
genTime: int = None
# 更新时间
updateTime: int = None
class BatchTaskResp(BaseModel):
"""
批次采集结果响应模型
"""
# 【输出】爬虫编码
spiderId: str = None
# 【输出】批次编号
batchId: str = None
# 【输出】任务链接明细清单
nodes: List[BatchTaskNodeResp] = []
# =============
# 流式采集相关模型
# =============
class StreamTaskItem(BaseData):
"""
流式采集任务物料模型
"""
# 【输入】对象唯一序列号(全局唯一,可用于主键,等于_id)
sn: str = None
# 【输入】商品链接
url: str = None
# 电商平台编码
platCode: str = None
# 【输入】来源APP(流采模式必填)
appCode: str = None
# 任务编号(批次)
batchId: str = None
# 扩展字段,可是任意内容,透传
callback: Union[str, Dict, List] = None
class StreamApiData(BaseModel):
"""
流式采集通用数据接收模型
"""
# 【v2输入】任务爬虫编码清单
spiderId: str = None
# 【输入】应用编码
appCode: str = None
# 【v1输入】任务类型
taskType: TaskType = None
# 【输入】任务明细清单
taskItems: List[StreamTaskItem] = None
# 【输入】文件名称配置键
fileNameConfig: str = 'default'
# 【v2输入】任务队列后缀(用于拆分某些应用专用任务队列)
taskQueueSuffix: str = None
# =============
# 批次采集基础数据相关模型
# =============
class BatchSpiderGroupQuery(BaseModel):
"""
支持平台
"""
# 【输入】平台编码
platCodes: List[str] = None
# 【输入】组编码
groupCode: str = None
# 【输入】是否可用
enable: int = 1 | zcbot-crawl-sdk | /zcbot-crawl-sdk-1.0.7.tar.gz/zcbot-crawl-sdk-1.0.7/zcbot_crawl_sdk/api/model.py | model.py |
import datetime
import time
from typing import Optional
from dateutil import tz
def now() -> datetime.datetime:
"""
get datetime instance of time of now
:return: time of now
"""
return datetime.datetime.now(tz.gettz('Asia/Shanghai'))
def __get_t(t: Optional[datetime.datetime] = None) -> datetime.datetime:
"""
get datetime instance
:param t: optional datetime instance
:return: datetime instance
"""
return t if isinstance(t, datetime.datetime) else now()
def to_str(t: Optional[datetime.datetime] = None,
fmt: str = '%Y-%m-%d %H:%M:%S.%f') -> str:
"""
get string formatted time
:param t: optional datetime instance
:param fmt: string format
:return:
"""
return __get_t(t).strftime(fmt)
def to_seconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to seconds
:param t: optional datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp())
def to_milliseconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to milliseconds
:param t: datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp() * 10 ** 3)
def to_microseconds(t: Optional[datetime.datetime] = None) -> int:
"""
datetime to microseconds
:param t: datetime instance
:return: timestamp in seconds
"""
return int(__get_t(t).timestamp() * 10 ** 6)
def get_dt(start_t: datetime.datetime,
end_t: Optional[datetime.datetime] = None) -> datetime.timedelta:
"""
get delta time
:param start_t: start time
:param end_t: end time
:return: timedelta instance
"""
return __get_t(end_t) - start_t
def to_seconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to seconds
:param dt: timedelta instance
:return: seconds elapsed
"""
return int(dt.total_seconds())
def to_milliseconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to milliseconds
:param dt: timedelta instance
:return: milliseconds elapsed
"""
return int(dt.total_seconds() * 10 ** 3)
def to_microseconds_dt(dt: datetime.timedelta) -> int:
"""
delta time to microseconds
:param dt: timedelta instance
:return: microseconds elapsed
"""
return int(dt.total_seconds() * 10 ** 6)
def parse_timestamp(time_str, tz_str='Asia/Shanghai'):
"""
将时间戳解析成本地时间(自动截断毫秒)
:param time_str:
:param tz_str:
:return:
"""
if time_str:
# 1576839034000
if len(str(time_str)) > 10:
# 截取掉毫秒
time_str = str(time_str)[0:10]
return datetime.datetime.fromtimestamp(int(time_str)).astimezone(tz.gettz(tz_str))
def time_to_batch_id(dt, delta_hour=0, delta_day=0, err=''):
"""
# 将采购时间转换为批次编号(delta为0时,时间应为iso时间),delta为正则加为负则减
"""
try:
if dt and isinstance(dt, datetime.datetime):
if delta_hour or delta_day:
time_delta = datetime.timedelta(hours=int(delta_hour), days=int(delta_day))
dt = dt + time_delta
return int(dt.strftime("%Y%m%d"))
except ValueError:
return err
def current_timestamp13():
"""
13位当前时间时间戳(毫秒级时间戳)
:return:
"""
return int(round(time.time() * 1000))
def current_timestamp10():
"""
10位当前时间时间戳(秒级时间戳)
:return:
"""
return int(time.time())
if __name__ == '__main__':
print(parse_timestamp(str(int(now().timestamp())))) | zcbot-crawl-sdk | /zcbot-crawl-sdk-1.0.7.tar.gz/zcbot-crawl-sdk-1.0.7/zcbot_crawl_sdk/util/time.py | time.py |
import json
import logging
import time
import pika
import traceback
from pika.exceptions import StreamLostError
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, Union
from ..util.decator import singleton
from .rabbit_keys import RabbitKeys
from .processor import AbstractMessageProcess
from .processor import AbstractStreamMessageProcess
LOGGER = logging.getLogger(__name__)
@singleton
class BatchResultReceiver(object):
"""
【单例】异步采集结果接收
1、通道随批次创建,随采集完成自动销毁
2、消息与队列自动创建自动删除
3、通道按批次区分
"""
_biz_inited = False
def __init__(self, processor: Union[AbstractMessageProcess, Callable], rabbit_uri: str, qos: int = None, queue_expires: int = None, inactivity_timeout: int = None, max_workers: int = None):
self.processor = processor
self.rabbit_uri = rabbit_uri
self.qos = qos or 10
self.queue_expires = queue_expires or 28800
self.inactivity_timeout = inactivity_timeout or 1800
self.executor = ThreadPoolExecutor(max_workers=max_workers)
LOGGER.info(f'[采集结果]监听器初始化 rabbit_uri={self.rabbit_uri}')
def submit_receiving_task(self, app_code: str, tenant_code: str, batch_id: str):
try:
self.executor.submit(self._receive_message, app_code, batch_id, tenant_code)
except Exception:
LOGGER.error(traceback.format_exc())
def _receive_message(self, app_code: str, batch_id: str, tenant_code: str):
"""
连接消息队列并启动消费,阻塞队列(需要独立线程运行或挂在后台任务运行)
"""
LOGGER.info(f'[消息接收]开始接收 batch_id={batch_id}')
if isinstance(self.processor, AbstractMessageProcess):
_process_func = self.processor.process_message
else:
_process_func = self.processor
_exchange_name = RabbitKeys.get_result_exchange_key(app_code)
_routing_name = RabbitKeys.get_result_routing_key(app_code, batch_id)
_queue_name = RabbitKeys.get_result_queue_key(app_code, batch_id)
connection = pika.BlockingConnection(pika.URLParameters(self.rabbit_uri))
channel = connection.channel()
try:
# 定义
channel.queue_declare(queue=_queue_name, auto_delete=True, arguments={'x-expires': self.queue_expires * 1000})
channel.exchange_declare(exchange=_exchange_name)
channel.queue_bind(queue=_queue_name, exchange=_exchange_name, routing_key=_routing_name)
LOGGER.info(f'[消息接收]队列信息 queue={_queue_name}, exchange={_exchange_name}, routing={_routing_name}')
# 接收
for method, properties, body in channel.consume(_queue_name, inactivity_timeout=self.inactivity_timeout, auto_ack=False):
# 通道无活动消息一定时间后,自动终止消费(退出循环)
if not method and not properties:
break
try:
headers = properties.headers
if not headers:
LOGGER.error(f'[消息接收]消息结构异常 properties={properties}, body={body}')
continue
# 消息解析处理
msg_type = headers.get('msg_type', None)
body_json = json.loads(body.decode())
_process_func(msg_type, body_json, tenant_code, batch_id)
except (StreamLostError, ConnectionAbortedError):
LOGGER.error(f'[消息接收]服务端关闭链接通道 batch_id={batch_id}')
except Exception:
LOGGER.error(f'[消息接收]解析异常 batch_id={batch_id}, {traceback.format_exc()}')
# 消息确认
channel.basic_ack(method.delivery_tag)
except Exception:
LOGGER.error(f'[消息接收]接收过程异常 queue={_queue_name}, {traceback.format_exc()}')
finally:
try:
# 关闭链接和通道(链接关闭通道自动关闭)
channel.close()
connection.close()
LOGGER.info(f'[消息接收]销毁队列 batch_id={batch_id}, queue={_queue_name}')
except Exception:
LOGGER.error(f'[消息接收]关闭链接异常 queue={_queue_name}, {traceback.format_exc()}')
LOGGER.info(f'[消息接收]接收完成 batch_id={batch_id}')
@singleton
class StreamResultReceiver(object):
"""
【单例】异步采集结果接收
1、通道随批次创建,随采集完成自动销毁
2、消息与队列自动创建自动删除
3、通道按批次区分
"""
def __init__(self, processor: Union[AbstractStreamMessageProcess, Callable], rabbit_uri: str, max_watcher_count: int = None, max_processor_count: int = None):
self.processor = processor
self.rabbit_uri = rabbit_uri
# 默认值,和zcbot_spider保持一致
self.queue_expires = 28800
_max_watcher_count = max_watcher_count or 4
_max_processor_count = max_processor_count or 24
self.watcher_executor = ThreadPoolExecutor(max_workers=_max_watcher_count, thread_name_prefix='stream-watcher')
self.processor_executor = ThreadPoolExecutor(max_workers=_max_processor_count, thread_name_prefix='stream-processor')
LOGGER.info(f'[流采结果]监听器初始化 rabbit_uri={self.rabbit_uri}')
def watch(self, exchange_name: str, routing_name: str, queue_name: str):
self.watcher_executor.submit(self._receive_message, exchange_name, routing_name, queue_name)
def _declare(self, exchange_name: str, routing_name: str, queue_name: str):
# 定义
connection = pika.BlockingConnection(pika.URLParameters(self.rabbit_uri))
channel = connection.channel()
channel.queue_declare(queue=queue_name, arguments={'x-expires': self.queue_expires * 1000})
channel.exchange_declare(exchange=exchange_name)
channel.queue_bind(queue=queue_name, exchange=exchange_name, routing_key=routing_name)
LOGGER.info(f'[流采结果]队列信息 queue={queue_name}, exchange={exchange_name}, routing={routing_name}')
return connection, channel
def _receive_message(self, exchange_name: str, routing_name: str, queue_name: str):
"""
连接消息队列并启动消费,阻塞队列(需要独立线程运行或挂在后台任务运行)
"""
LOGGER.info(f'[流采结果]开始接收')
_process_func = self.processor
if isinstance(self.processor, AbstractStreamMessageProcess):
_process_func = self.processor.process_message
connection, channel = self._declare(exchange_name, routing_name, queue_name)
try:
# 接收
for method, properties, body in channel.consume(queue_name, auto_ack=False):
# 保持通道激活状态
if not method or not properties or not properties.headers:
LOGGER.error(f'[流采结果]无效消息 method={method}, properties={properties}, body={body}')
continue
# 消息解析并发处理
msg_type = properties.headers.get('msg_type', None)
body_json = json.loads(body.decode())
self.processor_executor.submit(_process_func, msg_type, body_json)
# 消息确认
channel.basic_ack(method.delivery_tag)
except (StreamLostError, ConnectionAbortedError):
LOGGER.error(f'[流采结果]服务端关闭链接通道StreamLostError,ConnectionAbortedError -> 重连 {traceback.format_exc()}')
except pika.exceptions.ConnectionClosedByBroker:
LOGGER.error(f'[流采结果]链接关闭异常ConnectionClosedByBroker -> 重连 {traceback.format_exc()}')
except pika.exceptions.AMQPChannelError:
LOGGER.error(f'[流采结果]通道关闭异常AMQPChannelError -> 重连 {traceback.format_exc()}')
except pika.exceptions.AMQPConnectionError:
LOGGER.error(f'[流采结果]链接关闭异常AMQPConnectionError -> 重连 {traceback.format_exc()}')
except Exception:
LOGGER.error(f'[流采结果]接收过程异常 -> 重连 {traceback.format_exc()}')
finally:
try:
# 关闭链接和通道(链接关闭通道自动关闭)
channel.close()
connection.close()
LOGGER.info(f'[流采结果]销毁队列')
except Exception:
LOGGER.error(f'[流采结果]关闭链接异常 {traceback.format_exc()}')
# 递归重试
time.sleep(10)
LOGGER.warning(f'[流采结果]异常重试中...')
self._receive_message(exchange_name, routing_name, queue_name) | zcbot-crawl-sdk | /zcbot-crawl-sdk-1.0.7.tar.gz/zcbot-crawl-sdk-1.0.7/zcbot_crawl_sdk/receive/receiver.py | receiver.py |
import logging
import threading
import traceback
from typing import Union, Dict, List
from .model import MsgType
LOGGER = logging.getLogger(__name__)
class AbstractMessageProcess(object):
"""
抽象消息处理器
"""
def process_message(self, msg_type: str, msg_data: Union[Dict, List] = None, tenant_code: str = None, batch_id: str = None):
try:
if msg_type == MsgType.DATA_SKU_TEXT.name:
self.process_sku_text_data(tenant_code, batch_id, msg_data)
elif msg_type == MsgType.DATA_SKU_IMAGES.name:
self.process_sku_images_data(tenant_code, batch_id, msg_data)
elif msg_type == MsgType.ACT_CLOSED.name:
self.process_closed_action(tenant_code, batch_id, msg_data)
elif msg_type == MsgType.ACT_OPENED.name:
self.process_opened_action(tenant_code, batch_id, msg_data)
else:
self.process_others(tenant_code, batch_id, msg_data, msg_type)
except Exception:
LOGGER.error(f'[消息处理]解析异常 batch_id={batch_id}, msg_data={msg_data}, except={traceback.format_exc()}')
def process_sku_text_data(self, tenant_code: str, batch_id: str, msg_data: Union[Dict, List]):
print(f'[{threading.current_thread().name}]process: msg_type=process_sku_text_data, msg_data={msg_data}, tenant_code={tenant_code}, batch_id={batch_id}')
pass
def process_sku_images_data(self, tenant_code: str, batch_id: str, msg_data: Union[Dict, List]):
print(f'[{threading.current_thread().name}]process: msg_type=process_sku_images_data, msg_data={msg_data}, tenant_code={tenant_code}, batch_id={batch_id}')
pass
def process_opened_action(self, tenant_code: str, batch_id: str, msg_data: Union[Dict, List]):
print(f'[{threading.current_thread().name}]process: msg_type=process_opened_action, msg_data={msg_data}, tenant_code={tenant_code}, batch_id={batch_id}')
pass
def process_closed_action(self, tenant_code: str, batch_id: str, msg_data: Union[Dict, List]):
print(f'[{threading.current_thread().name}]process: msg_type=process_closed_action, msg_data={msg_data}, tenant_code={tenant_code}, batch_id={batch_id}')
pass
def process_others(self, tenant_code: str, batch_id: str, msg_data: Union[Dict, List], msg_type: str):
LOGGER.error(f'[消息处理]未知类型消息 msg_type={msg_type}, msg_data={msg_data}, tenant_code={tenant_code}, batch_id={batch_id}')
class AbstractStreamMessageProcess(object):
"""
抽象流式采集消息处理器
"""
def process_message(self, msg_type: str, msg_data: Union[Dict, List] = None):
try:
if msg_type == MsgType.STREAM_SKU_TEXT.name:
self.process_stream_sku_text(msg_data)
elif msg_type == MsgType.STREAM_SKU_IMAGES.name:
self.process_stream_sku_images(msg_data)
else:
self.process_others(msg_data, msg_type)
except Exception:
LOGGER.error(f'[流采消息]解析异常 {traceback.format_exc()}')
LOGGER.error(f'[流采消息]解析异常 msg_data={msg_data}, except={traceback.format_exc()}')
def process_stream_sku_text(self, msg_data: dict):
print(msg_data)
pass
def process_stream_sku_images(self, msg_data: dict):
print(msg_data)
pass
def process_others(self, msg_data: Union[Dict, List], msg_type: str):
LOGGER.error(f'[流采消息]未知类型消息 msg_type={msg_type}, msg_data={msg_data}') | zcbot-crawl-sdk | /zcbot-crawl-sdk-1.0.7.tar.gz/zcbot-crawl-sdk-1.0.7/zcbot_crawl_sdk/receive/processor.py | processor.py |
from redis import Redis
from typing import Callable, Dict, Union, List
from celery import Celery, Task, group
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from .common.exceptions import BizException
from .common.utils import obj_to_ref, singleton
from .common.keys import get_result_key
from .model.callback import Callback
LOGGER = get_task_logger(__name__)
# @singleton
class PredictClient(object):
"""
Celery服务客户端
注意:此类不建议手动初始化,可通过PredictClientHolder初始化,方便各service类自动获取
"""
def __init__(self, celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
self.broker_url = celery_broker_url
self.backend_uri = celery_result_backend
self.monitor_redis_uri = monitor_redis_uri
self.app_code = app_code
self.default_expire_seconds = 12 * 3600
self.celery_client = Celery(
'zcbot-predict',
broker=self.broker_url,
backend=self.backend_uri,
task_acks_late=True
)
self.rds_client = Redis.from_url(url=monitor_redis_uri, decode_responses=True)
# def apply_group(self, task_name: str, task_params_list: List[Dict] = None, options: Dict = None, callback: Callback = None, queue_name: str = None, timeout: float = None, **kwargs):
# """
# 服务组调用
# :param task_name: 任务名称
# :param task_params_list: 任务参数清单
# :param timeout: 【可选参数】超时时间
# :param queue_name: 【可选参数】任务队列名称,默认`task.{task_name}`
# :param options: 【可选参数】配置项
# :param callback: 【可选参数】回调函数
# :param kwargs:
# :return:
# """
# try:
# # 同步/异步
# _headers = {'app_code': self.app_code}
# if callback and callback.app_code:
# _headers['app_code'] = callback.app_code or self.app_code
#
# # 调用
# task_list = []
# for task_params in task_params_list:
# task_list.append(self.get_task_by_name(task_name).signature(kwargs=task_params, options=options))
# task_group = group(task_list)
# _queue_name = queue_name or f'task.{task_name}'
# async_result = task_group.apply_async(queue=_queue_name, headers=_headers)
#
# # 结果
# if callback:
# # 【异步】绑定回调处理函数
# LOGGER.info(f'[服务组]异步调用 task={task_name}')
# self._bind_callback(task_name, async_result, callback)
# return async_result
# else:
# # 【同步】等待结果
# LOGGER.info(f'[服务组]同步调用 task={task_name}')
# _timeout = timeout or 60
# if not timeout and kwargs and kwargs.get('timeout', None):
# _timeout = float(kwargs.get('timeout'))
# async_result.successful()
# rs = async_result.get(timeout=_timeout)
# async_result.forget()
# return rs
# except Exception as e:
# LOGGER.error(f'处理异常: task_name={task_name}, kwargs={len(task_params_list)}, e={e}')
# raise e
def apply(self, task_name: str, task_params: Dict = None, callback: Callback = None, queue_name: str = None, timeout: float = None, **kwargs):
"""
单任务请求调用
:param task_name: 任务名称
:param task_params: 任务参数字典
:param callback:【可选参数】回调函数
:param queue_name:【可选参数】任务队列名称,默认`task.{task_name}`
:param timeout:【可选参数】超时时间
:param kwargs:
:return:
"""
try:
# 同步/异步
_headers = {'app_code': self.app_code}
if callback and callback.app_code:
_headers['app_code'] = callback.app_code or self.app_code
# 调用
_queue_name = queue_name or f'task.{task_name}'
task = self.get_task_by_name(task_name)
async_result = task.apply_async(kwargs=task_params, queue=_queue_name, headers=_headers)
# 结果
if callback:
# 【异步】绑定回调处理函数
LOGGER.info(f'[服务]异步调用 task={task_name}, client={task.app.conf}')
self._bind_callback(task_name, async_result, callback)
return async_result
else:
# 【同步】等待结果
LOGGER.info(f'[服务]同步调用 task={task_name}')
_timeout = timeout or 60
if not timeout and kwargs and kwargs.get('timeout', None):
_timeout = float(kwargs.get('timeout'))
rs = async_result.get(timeout=_timeout)
async_result.forget()
return rs
except Exception as e:
LOGGER.error(f'处理异常: task_name={task_name}, kwargs={task_params}, e={e}')
raise e
# 缓存Celery任务对象
def get_task_by_name(self, task_name: str):
task = Task()
task.bind(self.celery_client)
task.name = task_name
return task
# 异步结果处理函数绑定
def _bind_callback(self, task_name: str, async_result: AsyncResult, callback: Callback):
rs_key = get_result_key(app_code=self.app_code, task_name=task_name, task_id=async_result.id)
self.rds_client.set(rs_key, callback.json(), ex=self.default_expire_seconds)
# 构建回调对象
def build_callback(self, callback_func: Union[str, Callable] = None, callback_data: Union[str, Dict, List] = None, app_code: str = None, tenant_code: str = None):
callback = Callback()
callback.app_code = app_code or self.app_code
callback.tenant_code = tenant_code or None
callback.callback_data = callback_data or None
if isinstance(callback_func, str):
callback.callback_func = callback_func
else:
callback.callback_func = obj_to_ref(callback_func)
return callback
class PredictClientHolder(object):
__default_instance = None
@staticmethod
def init_default_instance(celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
if not PredictClientHolder.__default_instance:
PredictClientHolder.__default_instance = PredictClient(celery_broker_url, celery_result_backend, monitor_redis_uri, app_code)
LOGGER.error(f'PredictClientHolder初始化默认实例: monitor_redis={monitor_redis_uri}, broker_url={celery_broker_url}, result_backend={celery_result_backend}')
@staticmethod
def get_default_instance():
if not PredictClientHolder.__default_instance:
raise BizException(f'默认实例尚未初始化,请先初始化实例!')
return PredictClientHolder.__default_instance | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/client.py | client.py |
import json
import traceback
from threading import Thread
from celery import Celery, exceptions
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redis import Redis
from .common import thread_pool
from .common.keys import get_result_key_filter, get_task_id_from_key
from .common.utils import ref_to_obj, singleton
LOGGER = get_task_logger(__name__)
@singleton
class PredictRedisResultMonitor(object):
"""
每个应用中仅可启动一个实例
"""
def __init__(self, celery_broker_url: str, celery_result_backend: str, monitor_redis_uri: str, app_code: str):
self.broker_url = celery_broker_url
self.backend_uri = celery_result_backend
self.monitor_redis_uri = monitor_redis_uri
self.app_code = app_code
self.error_retry = 0
self.celery_client = Celery(
'zcbot-predict-monitor',
broker=self.broker_url,
backend=self.backend_uri,
task_acks_late=True
)
self.rds_client = Redis.from_url(url=monitor_redis_uri, decode_responses=True)
def start(self):
Thread(target=self._watch, name='predict-monitor').start()
LOGGER.info(f'启动Predict结果监听服务...')
def _watch(self):
try:
# 当前:每个app一个结果监视器
while True:
filter_key = get_result_key_filter(app_code=self.app_code)
keys = self.rds_client.keys(filter_key)
if keys:
for key in keys:
task_id = get_task_id_from_key(key)
if task_id:
async_result = AsyncResult(id=task_id, app=self.celery_client)
if async_result.successful():
# 完成
try:
result = async_result.get()
callback = json.loads(self.rds_client.get(key))
callback_func = callback.get('callback_func', None)
callback_data = callback.get('callback_data', None)
if callback_func:
# TODO 兼容线程与协程
func = ref_to_obj(callback_func)
thread_pool.submit(func, result, callback_data)
LOGGER.info(f'回调执行: callback_func={callback_func}, callback_data={callback_data}')
else:
LOGGER.warning(f'无回调: task={task_id}')
# 清理
self._remove_task(key, async_result)
except exceptions.TimeoutError as te:
LOGGER.error(f'异常: 结果获取超时 task={task_id}, e={traceback.format_exc()}')
except LookupError as le:
LOGGER.error(f'异常: 回调函数反序列化异常 task={task_id}, e={traceback.format_exc()}')
except Exception as e:
LOGGER.error(f'异常: 结果处理异常 task={task_id}, e={traceback.format_exc()}')
elif async_result.failed():
# 失败
self._remove_task(key, async_result)
LOGGER.error(f'失败: task={task_id}')
except Exception:
LOGGER.error(f'监控异常: {traceback.format_exc()}')
self.error_retry = self.error_retry + 1
finally:
LOGGER.info(f'监控异常后重试: {self.error_retry}')
self._watch()
def _remove_task(self, key, async_result):
# 清理任务
async_result.forget()
self.rds_client.delete(key) | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/monitor.py | monitor.py |
import asyncio
import functools
import typing
from concurrent.futures import ThreadPoolExecutor
T = typing.TypeVar('T')
# worker pool starts in main process
_PREDICT_TASK_POOL: typing.Optional[ThreadPoolExecutor] = None
def get_worker_pool() -> ThreadPoolExecutor:
"""
get worker pool instance
should be executed in main process
main process -> worker 1 -> task 1
task 2
task 3
worker 2 -> task 1
task 2
task 3
worker 3 -> task 1
task 2
task 3
task pool should be released once tasks are finished
while worker pool should be always held at background
:return: worker pool instance
"""
global _PREDICT_TASK_POOL
if not _PREDICT_TASK_POOL:
_PREDICT_TASK_POOL = ThreadPoolExecutor(thread_name_prefix='consumer')
return _PREDICT_TASK_POOL
def get_pool_size() -> int:
"""
:return: worker pool instance
"""
global _PREDICT_TASK_POOL
if _PREDICT_TASK_POOL:
return _PREDICT_TASK_POOL._work_queue.qsize()
return 0
def submit(func: typing.Callable[..., T],
*args: typing.Any,
**kwargs: typing.Any) -> T:
"""
run single task in worker
:param func: function
:param args: args
:param kwargs: keyword args
:return: result of the function
"""
future = get_worker_pool().submit(func, *args)
return future
async def run_in_worker(func: typing.Callable[..., T],
*args: typing.Any,
**kwargs: typing.Any) -> T:
"""
run single task in worker
:param func: function
:param args: args
:param kwargs: keyword args
:return: result of the function
"""
loop = asyncio.get_event_loop()
f = functools.partial(func, **kwargs)
worker_pool = get_worker_pool()
return await loop.run_in_executor(worker_pool, f, *args) | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/common/thread_pool.py | thread_pool.py |
import time
import six
from functools import wraps, partial
from inspect import isclass, ismethod
def singleton(cls):
_instance = {}
@wraps(cls)
def _singlenton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singlenton
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isclass(f_self) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given callable.
:rtype: str
:raises TypeError: if the given object is not callable
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
function
"""
if isinstance(obj, partial):
raise ValueError('Cannot create a reference to a partial()')
name = get_callable_name(obj)
if '<lambda>' in name:
raise ValueError('Cannot create a reference to a lambda')
if '<locals>' in name:
raise ValueError('Cannot create a reference to a nested function')
if ismethod(obj):
if hasattr(obj, 'im_self') and obj.im_self:
# bound method
module = obj.im_self.__module__
elif hasattr(obj, 'im_class') and obj.im_class:
# unbound method
module = obj.im_class.__module__
else:
module = obj.__module__
else:
module = obj.__module__
return '%s:%s' % (module, name)
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, six.string_types):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref)
def time_stat(func):
def inner():
print(f'开始计时')
start = time.time()
func()
end = time.time()
print(f'耗时: {end - start}秒')
return inner | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/common/utils.py | utils.py |
from enum import IntEnum
from typing import Dict, Any, List
"""
Basic models
"""
from typing import Tuple, Any
from pydantic import BaseModel
# basic function call return value, with only success flag and err msg
PCallRet = Tuple[bool, str]
class ExecRet(BaseModel):
"""
basic ret value for executing a specific task
"""
success: bool = True
msg: str = None
data: Any = None
@classmethod
def ok(cls, **kwargs):
return cls(success=True, **kwargs)
@classmethod
def err(cls, **kwargs):
return cls(success=False, **kwargs)
class ErrCode(IntEnum):
"""
error codes
"""
SUCCESS = 0
ERROR = -1
class Resp(ExecRet):
"""
response body
"""
code: IntEnum = ErrCode.SUCCESS
@classmethod
def ok(cls, data: Any = None, msg: str = None) -> Dict[str, Any]:
"""
generate success response body
:param data: data
:param msg: msg
:return: json dict resp body
"""
return cls(
success=True,
data=data,
msg=msg,
code=ErrCode.SUCCESS
).dict(exclude_none=True)
@classmethod
def err(cls, data: Any = None, msg: str = None, code: IntEnum = ErrCode.ERROR) -> Dict[str, Any]:
"""
generate error response body with external status code 200
:param data: data
:param msg: msg
:param code: user defined (not http) status code
:return: json dict resp body
"""
if code == ErrCode.SUCCESS:
code = ErrCode.ERROR
return cls(
success=False,
data=data,
msg=msg,
code=code
).dict(exclude_none=True)
@classmethod
def page(cls, rows: List = [], total_count: int = 0, msg: str = None) -> Dict[str, Any]:
"""
generate page response body
:param rows:
:param total:
:param msg:
:return:
"""
return cls(
success=True,
data={'list': rows, 'total': total_count},
msg=msg,
code=ErrCode.SUCCESS
).dict(exclude_none=True)
@staticmethod
def is_success(result: Dict) -> bool:
"""
判断响应是否为成功
:param result:
:return:
"""
if not result or not result.get('success', False):
return False
return True
@staticmethod
def get_data(result: dict, force: bool = False) -> Any:
"""
判断响应是否为成功
:param result: 响应结果
:param force: 是否跳过响应状态验证
:return:
"""
if not force:
if not Resp.is_success(result):
return None
return result.get('data', None) | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/model/response.py | response.py |
from typing import Dict, List, Union
from .base import BaseService
from ..model.callback import Callback
from ..model.param import TextParam
def _params_convert(task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]]):
text_list = list()
# 批量
if isinstance(task_params, List):
for task_param in task_params:
if isinstance(task_param, TextParam):
text_list.append(task_param.dict())
elif isinstance(task_params, str):
text_list.append({'text': task_params})
else:
text_list.append(task_param)
else:
# 单个
if isinstance(task_params, TextParam):
text_list.append(task_params.dict())
elif isinstance(task_params, str):
text_list.append({'text': task_params})
else:
text_list.append(task_params)
return text_list
class StaplesSkuTagService(BaseService):
def predict_catalog1(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.staples.catalog1', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_catalog4(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.staples.catalog4', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_catalog6(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.staples.catalog6', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_brand(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.staples.brand', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
class JslinkSkuTagService(BaseService):
def predict_catalog1(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.jslink.catalog1', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_catalog4(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.jslink.catalog4', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_brand(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.jslink.brand', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
class ZjmiSkuTagService(BaseService):
def predict_catalog1(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.zjmi.catalog1', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_catalog3(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.zjmi.catalog3', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
def predict_brand(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.zjmi.brand', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs)
class XhgjSkuTagService(BaseService):
def predict_catalog4(self, task_params: Union[str, Dict, TextParam, List[Union[str, TextParam, Dict]]] = None, threshold: float = 0.7, callback: Callback = None, **kwargs):
return self.get_client().apply(task_name='sku_tag.xhgj.catalog4', task_params={'text_list': _params_convert(task_params), 'threshold': threshold}, callback=callback, **kwargs) | zcbot-predict-sdk | /zcbot-predict-sdk-0.0.26.tar.gz/zcbot-predict-sdk-0.0.26/zcbot_predict_sdk/service/sku_tag.py | sku_tag.py |
import asyncio
import functools
import typing
from concurrent.futures import ThreadPoolExecutor
T = typing.TypeVar('T')
# worker pool starts in main process
_WORKER_POOL: typing.Optional[ThreadPoolExecutor] = None
# task pool launched in workers
_TASK_POOL: typing.Optional[ThreadPoolExecutor] = None
def get_worker_pool() -> ThreadPoolExecutor:
"""
get worker pool instance
should be executed in main process
main process -> worker 1 -> task 1
task 2
task 3
worker 2 -> task 1
task 2
task 3
worker 3 -> task 1
task 2
task 3
task pool should be released once tasks are finished
while worker pool should be always held at background
:return: worker pool instance
"""
global _WORKER_POOL
if not _WORKER_POOL:
_WORKER_POOL = ThreadPoolExecutor()
return _WORKER_POOL
def get_task_pool() -> ThreadPoolExecutor:
"""
get task pool instance
should be executed in worker logics
:return: task pool instance
"""
global _TASK_POOL
if not _TASK_POOL:
_TASK_POOL = ThreadPoolExecutor()
return _TASK_POOL
def release_task_pool():
"""
release task pool instance to avoid memory leak
:return: None
"""
global _TASK_POOL
if not _TASK_POOL:
return
_TASK_POOL.shutdown()
_TASK_POOL = None
async def run_in_worker(func: typing.Callable[..., T],
*args: typing.Any,
**kwargs: typing.Any) -> T:
"""
run single task in worker
:param func: function
:param args: args
:param kwargs: keyword args
:return: result of the function
"""
loop = asyncio.get_event_loop()
f = functools.partial(func, **kwargs)
worker_pool = get_worker_pool()
return await loop.run_in_executor(worker_pool, f, *args) | zcbot-service-sdk | /zcbot_service_sdk-0.0.2.tar.gz/zcbot_service_sdk-0.0.2/zcbot_service_sdk/multi_thread.py | multi_thread.py |
import logging
from typing import Callable, Dict, List, Union
from celery import Celery, Task
from celery.result import AsyncResult
from redis import Redis
from .exceptions import BizException
from .callback import Callback
from .keys import get_result_key
LOGGER = logging.getLogger(__name__)
class CeleryClient(object):
def __init__(self, celery_broker_url: str, celery_result_backend: str, client_redis_uri: str, app_code: str):
self.broker_url = celery_broker_url
self.backend_uri = celery_result_backend
self.client_redis_uri = client_redis_uri
self.app_code = app_code
self.default_expire_seconds = 12 * 3600
self.celery_client = Celery(
'zcbot-service',
broker=self.broker_url,
backend=self.backend_uri,
task_acks_late=True
)
self.rds_client = Redis.from_url(url=client_redis_uri, decode_responses=True)
self.task_map = dict()
# 异步结果处理函数绑定
def bind_callback(self, task_name: str, async_result: AsyncResult, callback_func: Callable = None, callback_data: dict = None):
rs_key = get_result_key(app_code=self.app_code, task_name=task_name, task_id=async_result.id)
callback = Callback(callback_func, callback_data)
self.rds_client.set(rs_key, callback.as_json(), ex=self.default_expire_seconds)
# 异步请求
def apply_async(self, task_name: str, args: list = None, callback_func: Callable = None, callback_data: dict = None):
LOGGER.info(f'[服务]异步调用 task={task_name}')
task = self.task_map.get(task_name)
if not task:
task = Task()
task.bind(self.celery_client)
task.name = task_name
self.task_map[task_name] = task
_args = args or []
if not isinstance(_args, list):
_args = [_args]
try:
async_result = task.apply_async(_args)
self.bind_callback(task_name, async_result, callback_func, callback_data)
except Exception as e:
LOGGER.error(f'处理异常:task_name={task_name}, args={args}, callback={callback_data}, e={e}')
raise e
# 同步请求
def sync_get(self, task_name: str, args: list = None, timeout: float = None):
LOGGER.info(f'[服务]同步调用 task={task_name}')
task = self.task_map.get(task_name)
if not task:
task = Task()
task.bind(self.celery_client)
task.name = task_name
self.task_map[task_name] = task
_args = args or []
if not isinstance(_args, list):
_args = [_args]
try:
async_result = task.apply_async(_args)
return async_result.get(timeout=timeout)
except Exception as e:
LOGGER.error(f'处理异常:task_name={task_name}, args={args}, e={e}')
raise e
def call(self, task_name: str, args: Union[List, Dict], is_async: bool = True, callback_func: Callable = None, callback_data: dict = None) -> dict:
"""
兼容方式请求调用
"""
_args = [args]
if is_async:
# 异步
if not callback_func:
raise BizException(f'异步调用必须指定回调方法 task_name={task_name}, is_sync={is_async}')
result = self.apply_async(task_name=task_name, args=_args, callback_func=callback_func, callback_data=callback_data)
else:
# 同步
result = self.sync_get(task_name=task_name, args=_args)
return result
class CeleryApi(CeleryClient):
def get_tax_by_baiwang(self, args: list = None, callback_func: Callable = None, callback_data: dict = None):
return self.apply_async('tax.baiwang', args, callback_func, callback_data)
def search_same_sku(self, keyword: str = None, platform: str = None, page: int = 1) -> dict:
args = [keyword, page]
result = self.sync_get(task_name=f"same_sku.{platform}", args=args)
return result
def search_same_sku_jd(self, keyword: str, page: int = 1) -> dict:
"""
京东pc端,搜同款接口
"""
args = [keyword, page]
result = self.sync_get(task_name=f"same_sku.jd_pc", args=args)
return result
def search_same_sku_sn(self, keyword: str, page: int = 1) -> dict:
"""
苏宁pc端,搜同款接口
"""
args = [keyword, page]
result = self.sync_get(task_name=f"same_sku.sn_pc", args=args)
return result
def search_same_sku_mmbpc(self, keyword: str, page: int = 1) -> dict:
"""
慢慢买pc端,搜同款接口
"""
args = [keyword, page]
result = self.sync_get(task_name=f"same_sku.mmb_pc", args=args)
return result
def search_same_sku_mmbm(self, keyword: str, page: int = 1) -> dict:
"""
慢慢买手机端端,搜同款接口
"""
args = [keyword, page]
result = self.sync_get(task_name=f"same_sku.mmb_m", args=args)
return result
def sku_extract_by_chatgpt(self, item: Dict, is_async: bool = False, callback_func: Callable = None, callback_data: dict = None):
"""
chatgpt 抽取信息
"""
args = [item]
result = self.call(task_name="sku_extract.chat_gpt", args=args, is_async=is_async, callback_func=callback_func, callback_data=callback_data)
return result
def batch_sku_extract_by_chatgpt(self, item_list: List[Dict], is_async: bool = False, callback_func: Callable = None, callback_data: dict = None):
"""
chatgpt 批量抽取信息
"""
args = [item_list]
result = self.call(task_name="sku_extract.chat_gpt_batch", args=args, is_async=is_async, callback_func=callback_func, callback_data=callback_data)
return result | zcbot-service-sdk | /zcbot_service_sdk-0.0.2.tar.gz/zcbot_service_sdk-0.0.2/zcbot_service_sdk/client.py | client.py |
from functools import wraps, partial
from inspect import isclass, ismethod
import six
def singleton(cls):
_instance = {}
@wraps(cls)
def _singlenton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singlenton
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
:rtype: str
"""
# the easy case (on Python 3.3+)
if hasattr(func, '__qualname__'):
return func.__qualname__
# class methods, bound and unbound methods
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
f_class = f_self if isclass(f_self) else f_self.__class__
else:
f_class = getattr(func, 'im_class', None)
if f_class and hasattr(func, '__name__'):
return '%s.%s' % (f_class.__name__, func.__name__)
# class or class instance
if hasattr(func, '__call__'):
# class
if hasattr(func, '__name__'):
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
def obj_to_ref(obj):
"""
Returns the path to the given callable.
:rtype: str
:raises TypeError: if the given object is not callable
:raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
function
"""
if isinstance(obj, partial):
raise ValueError('Cannot create a reference to a partial()')
name = get_callable_name(obj)
if '<lambda>' in name:
raise ValueError('Cannot create a reference to a lambda')
if '<locals>' in name:
raise ValueError('Cannot create a reference to a nested function')
if ismethod(obj):
if hasattr(obj, 'im_self') and obj.im_self:
# bound method
module = obj.im_self.__module__
elif hasattr(obj, 'im_class') and obj.im_class:
# unbound method
module = obj.im_class.__module__
else:
module = obj.__module__
else:
module = obj.__module__
return '%s:%s' % (module, name)
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
:type ref: str
"""
if not isinstance(ref, six.string_types):
raise TypeError('References must be strings')
if ':' not in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename, fromlist=[rest])
except ImportError:
raise LookupError('Error resolving reference %s: could not import module' % ref)
try:
for name in rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: error looking up object' % ref) | zcbot-service-sdk | /zcbot_service_sdk-0.0.2.tar.gz/zcbot_service_sdk-0.0.2/zcbot_service_sdk/utils.py | utils.py |
import re
import logging
import urllib.parse as parser
from typing import List, Union
from pymongo import MongoClient
from .constant import ZCBOT_PUBLIC_MONGO_URL, ZCBOT_PUBLIC_MONGO_DATABASE, ZCBOT_PUBLIC_MONGO_COLLECTION_SPIDERS, ZCBOT_PUBLIC_MONGO_COLLECTION_RULES
from .parser import _parse_sku_by_param, _parse_sku_by_path
from .utils import singleton, clean_url
LOGGER = logging.getLogger(__name__)
@singleton
class SpiderUrlParser(object):
"""
常见电商平台商品编码解析规则
"""
def __init__(self, spider_id: Union[str, List], mongo_url: str = None, mongo_database: str = None, mongo_collection_spiders: str = None, mongo_collection_rules: str = None):
self.spider_id = spider_id
self.mongo_url = mongo_url or ZCBOT_PUBLIC_MONGO_URL
self.mongo_database = mongo_database or ZCBOT_PUBLIC_MONGO_DATABASE
self.mongo_collection_spiders = mongo_collection_spiders or ZCBOT_PUBLIC_MONGO_COLLECTION_SPIDERS
self.mongo_collection_rules = mongo_collection_rules or ZCBOT_PUBLIC_MONGO_COLLECTION_RULES
self.rule_map = dict()
self._init_rule_map()
LOGGER.info(f'===================')
LOGGER.info(f'有效链接解析规则: {len(self.rule_map)}条')
LOGGER.info(f'===================')
def parse(self, url) -> Union[str, tuple]:
ec_sku_id = ''
_url = clean_url(url)
if _url:
host = parser.urlparse(_url).hostname
rule = self.rule_map.get(host)
if host and rule:
sku_param = rule.get('sku_param', [])
if sku_param:
ec_sku_id = _parse_sku_by_param(_url, sku_param, None)
patterns = rule.get('patterns', [])
if patterns and not ec_sku_id:
ec_sku_id = _parse_sku_by_path(_url, patterns, None)
return ec_sku_id
def _init_rule_map(self):
client = MongoClient(self.mongo_url)
try:
if isinstance(self.spider_id, list):
match = {'$in': self.spider_id}
else:
match = self.spider_id
rs = client.get_database(self.mongo_database).get_collection(self.mongo_collection_spiders).aggregate([
{'$lookup': {'from': self.mongo_collection_rules, 'localField': 'patterns', 'foreignField': '_id', 'as': 'rules'}},
{'$match': {'_id': match}},
{'$project': {'rules': 1}}
])
rs = list(rs)
if rs:
rows = rs[0].get('rules')
for row in rows:
patterns = []
regex_list = row.get('regex', [])
for regex in regex_list:
try:
patterns.append(re.compile(regex))
except re.error:
LOGGER.error(f'错误规则: regex={regex}, row={row}')
plat_code = row.get('plat_code')
self.rule_map[row.get('_id')] = {
'plat_code': plat_code,
'sku_param': row.get('params', []),
'patterns': [
re.compile(x) for x in row.get('regex', [])
],
}
except Exception as e:
print(e)
finally:
client.close() | zcbot-url-parser | /zcbot-url-parser-1.0.5.tar.gz/zcbot-url-parser-1.0.5/zcbot_url_parser/spider.py | spider.py |
import logging
import re
from pymongo import MongoClient
from .constant import ZCBOT_PUBLIC_MONGO_URL, ZCBOT_PUBLIC_MONGO_DATABASE, ZCBOT_PUBLIC_MONGO_COLLECTION_PLATFORMS, ZCBOT_PUBLIC_MONGO_COLLECTION_RULES
from .utils import singleton
LOGGER = logging.getLogger(__name__)
@singleton
class RuleHolder(object):
"""
常见电商平台商品编码解析规则
"""
rule_map = {}
def __init__(self, mongo_url: str = None, mongo_database: str = None, mongo_collection_platforms: str = None, mongo_collection_rules: str = None):
self.mongo_url = mongo_url or ZCBOT_PUBLIC_MONGO_URL
self.mongo_database = mongo_database or ZCBOT_PUBLIC_MONGO_DATABASE
self.mongo_collection_platforms = mongo_collection_platforms or ZCBOT_PUBLIC_MONGO_COLLECTION_PLATFORMS
self.mongo_collection_rules = mongo_collection_rules or ZCBOT_PUBLIC_MONGO_COLLECTION_RULES
self.reload()
def get_rule(self, host):
return self.rule_map.get(host)
def reload(self):
"""
加载最新规则,可挂到定时任务上定期更新
:return:
"""
plats = self._fetch_platforms()
plat_map = {}
for plat in plats:
plat_map[plat.get('_id')] = plat.get('name')
rows = self._fetch_url_rules()
for row in rows:
regex_list = row.get('regex', [])
patterns = []
for regex in regex_list:
try:
patterns.append(re.compile(regex))
except re.error:
LOGGER.error(f'错误规则: regex={regex}, row={row}')
plat_code = row.get('plat_code')
plat_name = row.get('plat_name', None) or plat_map.get(plat_code) or plat_code
self.rule_map[row.get('_id')] = {
'plat_code': plat_code,
'plat_name': plat_name,
'sku_param': row.get('params', []),
'patterns': [
re.compile(x) for x in row.get('regex', [])
],
}
LOGGER.info(f'更新链接分拣规则: {len(self.rule_map)}条')
def _fetch_platforms(self):
try:
client = MongoClient(self.mongo_url)
rs = client.get_database(self.mongo_database).get_collection(self.mongo_collection_platforms).find()
rows = list(rs)
client.close()
return rows
except Exception as e:
print(e)
return []
def _fetch_url_rules(self):
try:
client = MongoClient(self.mongo_url)
rs = client.get_database(self.mongo_database).get_collection(self.mongo_collection_rules).find()
rows = list(rs)
client.close()
return rows
except Exception as e:
print(e)
return [] | zcbot-url-parser | /zcbot-url-parser-1.0.5.tar.gz/zcbot-url-parser-1.0.5/zcbot_url_parser/rule.py | rule.py |
import logging
import urllib.parse as parser
from typing import Union
from pydantic import BaseModel
from .rule import RuleHolder
from .utils import clean_url
LOGGER = logging.getLogger(__name__)
rule_holder = RuleHolder()
class UrlModel(BaseModel):
"""
通用基础数据模型
"""
# 链接序列号(全局唯一) 如:jd:5129155、tmall:576748721316,3985068128611
link_sn: str = None
# 网站编码 如:jd
plat_code: str = None
# 网站名称 如:京东
plat_name: str = None
# 链接商品编码(多编码链接,编码之间逗号分隔) 如:5129155、576748721316,3985068128611
ec_sku_id: Union[str, tuple] = None
def parse_url(url) -> Union[UrlModel, None]:
"""
解析url链接
有效链接返回UrlModel,无效链接返回None
"""
plat_code, plat_name, ec_sku_id = _match_url(url)
if plat_code and ec_sku_id:
link_sn = _build_link_sn(plat_code, ec_sku_id)
return UrlModel(link_sn=link_sn, plat_code=plat_code, plat_name=plat_name, ec_sku_id=ec_sku_id)
return None
def _build_link_sn(plat_code, ec_sku_id):
"""
构建link_sn编码规则
"""
if plat_code and ec_sku_id:
return f'{plat_code}:{ec_sku_id}'
return None
def _match_url(url):
plat_code = ''
plat_name = ''
ec_sku_id = ''
_url = clean_url(url)
if _url:
host = parser.urlparse(_url).hostname
rule = rule_holder.get_rule(host)
if host and rule:
plat_code = rule.get('plat_code')
plat_name = rule.get('plat_name')
sku_param = rule.get('sku_param', [])
if sku_param:
ec_sku_id = _parse_sku_by_param(_url, sku_param)
patterns = rule.get('patterns', [])
if patterns and not ec_sku_id:
ec_sku_id = _parse_sku_by_path(_url, patterns)
return plat_code, plat_name, ec_sku_id
def _match_plat_code(url, default='*'):
plat_code = default
if url:
host = parser.urlparse(url).hostname
rule = rule_holder.get_rule(host)
if host and rule:
plat_code = rule.get('plat_code')
return plat_code
def _parse_sku_by_path(url, patterns, token=','):
"""
根据url中path部分解析skuId
:param url:
:param patterns:
:return:
"""
try:
for ptn in patterns:
arr = ptn.findall(url.strip())
if len(arr):
rs = arr[0]
if token and isinstance(rs, tuple):
return token.join(rs)
else:
return rs
except Exception as e:
LOGGER.error('match sku error[parse_sku_by_path]: url=%s, ex=%s' % (url, e))
return ''
def _parse_sku_by_param(url, sku_param, token=','):
"""
根据url中param部分解析skuId
:param url:
:param sku_param:
:return:
"""
try:
params = parser.parse_qs(parser.urlparse(url).query)
rs_list = []
if len(params) and sku_param:
for param in sku_param:
if param in params and params.get(param)[0].strip():
rs_list.append(str(params.get(param)[0].strip()))
if token:
return token.join(rs_list)
else:
if len(rs_list) == 1:
return rs_list[0]
else:
return tuple(rs_list)
except Exception as e:
LOGGER.error('match sku error[parse_sku_by_param]: url=%s, ex=%s' % (url, e))
return '' | zcbot-url-parser | /zcbot-url-parser-1.0.5.tar.gz/zcbot-url-parser-1.0.5/zcbot_url_parser/parser.py | parser.py |
# ZCC-HELPER
The ZIMI library is a basic python API and command line tool that supports the zimi Cloud Connect device to manage Powermesh home network equipment.
## Installation
You can install zimi from PyPi:
```
pip install zcc-helper
```
The module is only supported in python3.
## How to use
The module can be used both as part of an embedded python program and as a command line tool.
### Embedded Program
In order to control the zimi Cloud Connect (ZCC) and associated devices your program should create an instance of a ControlPoint object which will be used to manipulate the associated devices. There is a multi-step process to do so described below.
#### Step One - discover details of the Zimi Controller and create a ControlPointDescription object
If you are connected to the local LAN with the Zimi Controller, then you can auto discover the ZCC otherwise you need to know the IP address and port number of the ZCC.
To discover ZCC and devices on the local LAN use the ControlPointDiscoveryService.discover() async method to obtain a ControlPointDescription object with details of host, port etc as per the code snippet below:
```python
import asyncio
from zcc import ControlPoint, ControlPointDescription, ControlPointDiscoveryService
async def discover():
return await ControlPointDiscoveryService().discover()
async def main():
controller_description = await discover()
print(controller_description)
asyncio.run(main())
```
When this is run it produces output like:
```python
ControlPointDescription(brand='zimi', product='zcc', mac='c4ffbc90bf73', host='192.168.1.105', port=5003, available_tcps=6)
```
#### Step Two - Create a ControlPoint object and connect to the controller
Once you have discovered details of the ZIMI controller your program should create a ControlPoint instance and use the async connect() method to authorise and start a session with the ZIMI controller as well as build a catalogue of all associated devices.
Use some code as per the snippet below:
```python
import asyncio
from zcc import ControlPoint, ControlPointDescription, ControlPointDiscoveryService
async def discover():
return await ControlPointDiscoveryService().discover()
async def main():
description = await discover()
controller = ControlPoint(description=description)
await controller.connect()
controller.print_description()
asyncio.run(main())
```
When this is run it produces output like:
```text
+----------------------------------------------------------------------------------------------------------------------------------+
| ControlPoint: c4ffbc90bf73 zcc zimi 59 devices 192.168.1.105:5003 6 Tcps |
+----------------------------------------------------------------------------------------------------------------------------------+
bddf0500-4d15-4457-b063-c12ed208a0b0_3 Study Pendant/Upstairs switch Off { TurnOn TurnOff }
bddf0500-4d15-4457-b063-c12ed208a0b0_4 Lounge/Upstairs switch Off { TurnOn TurnOff }
37bd164e-d867-4ba7-b64c-e7d4c4d0f418_1 Kitchen Downlights/Kitchen switch Off { TurnOn TurnOff }
```
It is also possible to connect to a known ZCC host with ip address and port number wrapped in a ControlPointDescription object:
```python
controller = ControlPoint(description=ControlPointDescription(host='192.168.1.105', port=5003))
```
#### Step Three - Control devices connected to the controller
Once the device ID is known then it can be used to control a particular device by using the controller.devices[device_id] instance that represents an individual device.
```python
>>> dev = zcc.devices['bddf0500-4d15-4457-b063-c12ed208a0b0_3']
>>> print(dev)
{'actions': {'actions': {'TurnOff': {'actionParams': {}},
'TurnOn': {'actionParams': {}}}},
'controller': <zcc.controller.ControlPoint object at 0x7f70a9f117f0>,
'identifier': 'bddf0500-4d15-4457-b063-c12ed208a0b0_3',
'properties': {'controlPointType': 'switch',
'name': 'Study Pendant',
'roomId': 5,
'roomName': 'Up Stairs Passage'},
'states': {'controlState': {'switch': {'isOn': False}}, 'isConnected': True}}
>>> dev.turn_on()
>>> print(dev)
{'actions': {'actions': {'TurnOff': {'actionParams': {}},
'TurnOn': {'actionParams': {}}}},
'controller': <zcc.controller.ControlPoint object at 0x7f70a9f117f0>,
'identifier': 'bddf0500-4d15-4457-b063-c12ed208a0b0_3',
'properties': {'controlPointType': 'switch',
'name': 'Study Pendant',
'roomId': 5,
'roomName': 'Up Stairs Passage'},
'states': {'controlState': {'switch': {'isOn': True}}, 'isConnected': True}}
>>> dev.turn_off()
```
Depending upon the type of device it will support various actions as defined in ControlPointDevice.
Available actions include:
```python
async def close_door(self):
'''CloseDoor if the action is supported'''
async def fade(self, brightness, timeperiod):
'''SetBrightness if the action is supported'''
async def open_door(self):
'''OpenDoor if the action is supported'''
async def open_to_percentage(self, percentage):
'''OpenToPercentage if the action is supported'''
async def set_brightness(self, brightness):
'''SetBrightness if the action is supported'''
async def set_fanspeed(self, fanspeed):
'''SetFanSpeed if the action is supported'''
async def turn_on(self):
'''TurnOn the device if the action is supported'''
async def turn_off(self):
'''TurnOff the device if the action is supported'''
```
Available properties include:
```python
def battery_level(self) -> int | None:
'''Return the battery level of an attached sensor.'''
def brightness(self) -> int | None:
'''Returns brightness from 0 to 100 or None.'''
def door_temp(self) -> int | None:
'''Return the external temperature of an attached sensor.'''
def fanspeed(self) -> int | None:
'''Returns fanspeed from 0 to 7 or None.'''
def garage_humidity(self) -> int | None:
'''Return the internal garage humidity of an attached sensor.'''
def garage_temp(self) -> int | None:
'''Return the internal garage temperature of an attached sensor.'''
def is_closing(self) -> bool:
'''Returns True if door is closing.'''
def is_closed(self) -> bool:
'''Returns True if door is closed.'''
def is_connected(self) -> bool:
'''Returns True if connected is on.
When a device has been disconnected from the mesh it show False.'''
def is_off(self) -> bool:
'''Returns True if status is off.'''
def is_on(self) -> bool:
'''Returns True if status is on.'''
def is_opening(self) -> bool:
'''Returns True if door is opening.'''
def is_open(self) -> bool
'''Returns True if door is open.'''
def location(self) -> str:
'''Gets a descriptive string of the device location'''
def name(self) -> str:
'''Gets a descriptive string of the device name'''
def percentage(self) -> int | None:
'''Return the open to percentage'''
def room(self) -> str:
'''Gets a descriptive string of the device room'''
def type(self) -> str:
'''Gets a descriptive string of the device type'''
```
In addition, you can subscribe to a notification for changes to the device state by using the following methods of the device object.
```python
def subscribe(self, observer):
'''Subscribe an observer object for state changes.
Observer object must include notify(self, observable, *args, **kwargs) method.'''
def unsubscribe(self, observer):
'''Unsubscribe an observer object.'''
```
The observer object must have a notify(observable) method.
Finally, you can initiate a watchdog function that will periodically refresh the device states from the ZCC. This can be useful for long lived connections that may time-out as it will trigger a re-connection if needed.
```python
def start_watchdog(self, timer: int):
'''Start a periodic timeout that resets every time a status update is received.'''
def stop_watchdog(self):
'''Stop the periodic timeout.'''
```
### Command Line Program
ZCC can also be used as a command line tool to discover ZCC devices and/or execute actions upon them.
```
$ python3 -m zcc
usage: zcc [-h] [--verbosity VERBOSITY] (--discover | --execute) [--host HOST] [--port PORT] [--timeout TIMEOUT] [--device DEVICE]
[--action {CloseDoor,OpenDoor,TurnOn,TurnOff,OpenToPercentage,SetBrightness,SetFanSpeed}] [--value VALUE]
zcc: error: one of the arguments --discover --execute is required
```
To discover devices use:
```
$ python -m zcc --discover
+-----------------------------------------------------------------------------------------------------------------+
| ControlPoint: c4ffbc90bf73 zcc zimi 34 devices 192.168.1.105:5003 6 Tcps |
+-----------------------------------------------------------------------------------------------------------------+
0a872922-73e0-4699-89c5-29156f0686f8_1 LED strip/Lounge dimmer Off { TurnOn TurnOff SetBrightness }
0da922e4-1f04-4a80-b267-ade8529194c9_1 Water Feature Pu switch { TurnOn TurnOff }
```
To execute an action use:
```
python -m zcc --execute --device 'bddf0500-4d15-4457-b063-c12ed208a0b0_3' --action 'TurnOn'
```
This version of the command is relatively slow as it first of all discovers the ZCC on the local LAN, builds a device inventory and then executes the action.
| zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/README.md | README.md |
from __future__ import annotations
import asyncio
import json
from json.decoder import JSONDecodeError
import logging
import socket
from typing import Tuple
from zcc.constants import LEVEL_BY_VERBOSITY
from zcc.description import ControlPointDescription
from zcc.errors import ControlPointError
from zcc.protocol import ControlPointProtocol
class ControlPointDiscoveryProtocol(asyncio.DatagramProtocol):
'''Listens for ZCC announcements on the defined UDP port.'''
def __init__(self, discovery_complete: asyncio.Future, discovery_result: object) -> None:
super().__init__()
self.discovery_complete: asyncio.Future = discovery_complete
self.discovery_result = discovery_result
self.transport: asyncio.transports.DatagramTransport = None
self.logger = logging.getLogger('ControlPointDiscoveryService')
def connection_lost(self, exc) -> None:
self.transport.close()
return super().connection_lost(exc)
def connection_made(self, transport: asyncio.transports.DatagramTransport) -> None:
self.transport = transport
return super().connection_made(transport)
def datagram_received(self, data: bytes, addr: Tuple[str, int]) -> None:
data = str(data.decode('UTF-8'))
self.logger.debug(
'datagram_received() from %s\n%s', str(addr), data)
lines = data.split('\n')
for line in lines:
try:
response = json.loads(line)
if response:
self.discovery_result.brand = response['brand']
self.discovery_result.product = response['product']
self.discovery_result.mac = response['mac']
self.discovery_result.host = addr[0]
self.discovery_result.port = response['tcp']
self.discovery_result.available_tcps = response['availableTcps']
if api_version := response.get('apiVersion'):
self.discovery_result.api_version = api_version
if firmware_version := response.get('firmwareVersion'):
self.discovery_result.firmware_version = firmware_version
self.discovery_complete.set_result(True)
except JSONDecodeError:
break
return super().datagram_received(data, addr)
class ControlPointDiscoveryService:
'''Provides a ZCC discovery service to discover ZIMI controllers on the local LAN.'''
def __init__(self, verbosity: int = 0):
self.logger = logging.getLogger('ControlPointDiscoveryService')
if verbosity > 2:
verbosity = 2
self.logger.setLevel(LEVEL_BY_VERBOSITY[verbosity])
self.loop = asyncio.get_event_loop()
self.discovery_complete = self.loop.create_future()
self.discovery_result = ControlPointDescription()
async def discover(self) -> ControlPointDescription:
'''Discover local ZIMI controllers on LAN and return (host,port).'''
transport, _ = await self.loop.create_datagram_endpoint(
lambda: ControlPointDiscoveryProtocol(
self.discovery_complete, self.discovery_result),
local_addr=('0.0.0.0', ControlPointProtocol.UDP_RECV_PORT))
send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server_address = ('255.255.255.255',
ControlPointProtocol.UDP_SEND_PORT)
message = ControlPointProtocol.discover()
send_socket.sendto(message.encode(), server_address)
self.logger.info('Sending discovery message on local network')
try:
await asyncio.wait_for(self.discovery_complete, timeout=10)
self.logger.info("Success - discovered ZIMI controller at %s port %d\n%s",
self.discovery_result.host,
self.discovery_result.port,
self.discovery_result)
transport.close()
return self.discovery_result
except asyncio.exceptions.TimeoutError as error:
raise ControlPointError(
"Failure - Unable to discover ZCC by UDP broadcast.") from error | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/discovery.py | discovery.py |
import asyncio
import argparse
import sys
from zcc import ControlPoint, ControlPointDescription, ControlPointDiscoveryService, ControlPointError
def __options(args):
parser = argparse.ArgumentParser(prog='zcc',
description='''
A command line interface to a ZIMI home controller.
Operates in two basic modes: (a) discovery mode where
the local network is scanned for a ZCC and the details
are printed, or (b) an execute mode where an action is
sent to a device''')
parser.add_argument("--verbosity", action="store", type=int, default=0,
help="verbosity level between 0 and 2")
command_group = parser.add_argument_group('command')
cxg = command_group.add_mutually_exclusive_group(required=True)
cxg.add_argument("--discover", action="store_true",
help="discover ZCC and print devices")
cxg.add_argument("--execute", action="store_true",
help="execute an action on a device")
host_group = parser.add_argument_group('host')
host_group.add_argument("--host", action="store",
help="zcc host name|address")
host_group.add_argument("--port", action="store", type=int,
help="zcc port")
host_group.add_argument("--timeout", action="store", type=int, default=3,
help="zcc timeout value")
device_group = parser.add_argument_group('device')
device_group.add_argument("--device", action="store",
help="device identifier")
device_group.add_argument("--action", action="store", type=str,
choices=["CloseDoor", "OpenDoor", "TurnOn", "TurnOff",
"OpenToPercentage", "SetBrightness", "SetFanSpeed"])
device_group.add_argument("--value", action="store",
help="device action value (for actions that require)")
return parser.parse_args(args)
async def main(args):
'''Main function.'''
options = __options(args)
if options.host and options.port:
description = ControlPointDescription(
host=options.host, port=options.port)
else:
description = await ControlPointDiscoveryService(
verbosity=options.verbosity).discover()
controller = ControlPoint(
description=description, verbosity=options.verbosity)
await controller.connect()
if options.discover:
controller.print_description()
if options.execute:
if options.action and options.device:
device = controller.devices.get(options.device, None)
if device:
if 'CloseDoor' in options.action:
await device.close_door()
elif 'OpenDoor' in options.action:
await device.open_door()
elif 'TurnOn' in options.action:
await device.turn_on()
elif 'TurnOff' in options.action:
await device.turn_off()
elif 'OpenToPercentage' in options.action:
if options.value:
await device.open_to_percentage(options.value)
else:
raise ControlPointError(
'OpenToPercentage requires --value')
elif 'SetBrightness' in options.action:
if options.value:
await device.set_brightness(options.value)
else:
raise ControlPointError(
'SetBrightness requires --value')
elif 'SetFanSpeed' in options.action:
if options.value:
await device.set_fanspeed(options.value)
else:
raise ControlPointError(
'SetFanSpeed requires --value')
else:
raise ControlPointError(
'No valid --device and --action')
else:
raise ControlPointError(f'No device {options.device} exists')
else:
raise ControlPointError(
'--execute needs --device and --action options')
if controller:
controller.disconnect()
if __name__ == '__main__':
try:
asyncio.run(main(sys.argv[1:]))
except ControlPointError as error:
print(f"zcc {' '.join(sys.argv[1:])}: {error}") | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/__main__.py | __main__.py |
from __future__ import annotations
import asyncio
from pprint import pformat
import logging
import socket
from typing import Dict, List
import uuid
from zcc.constants import LEVEL_BY_VERBOSITY, NAME, VERSION
from zcc.description import ControlPointDescription
from zcc.device import ControlPointDevice
from zcc.errors import ControlPointError
from zcc.socket import ControlPointSocket
from zcc.protocol import ControlPointProtocol
from zcc.watchdog import ControlPointWatchdog
import time
class ControlPoint:
'''Represents the ZCC controller which connects to individual devices'''
def __init__(self, description: ControlPointDescription = None,
timeout: int = 2, verbosity: int = 0):
self.logger = logging.getLogger(f'ControlPoint@{id(self)}')
if verbosity > 2:
verbosity = 2
self.logger.setLevel(LEVEL_BY_VERBOSITY[verbosity])
self.host = description.host
self.port = description.port
if not (self.host and self.port):
raise ControlPointError(
'ControlPoint initialisation failed - must provide at least host and port.')
self.logger.info(
'Setting up %s version %s', NAME, VERSION)
self.brand = description.brand
self.product = description.product
self.mac = description.mac
self.available_tcps = description.available_tcps
self.api_version = description.api_version
self.firmware_version = description.firmware_version
self.num_devices = None
self.num_control_points = None
self.network_name = None
self.uptime = None
self.timeout = timeout
self.verbosity = verbosity
self.devices: Dict[ControlPointDevice] = {}
self.actions_received = 0
self.properties_received = 0
self.states_received = 0
self.device_mac = format(uuid.getnode(), '=012x')
self.access_token = None
self.session_authorised = False
self.session_started = False
self.socket = None
self.closed_socket = None
self.ready = False
self.loop = asyncio.get_event_loop()
self.session_authorised: asyncio.Future = None
self.session_started: asyncio.Future = None
self.properties_ready: asyncio.Future = None
self.actions_ready: asyncio.Future = None
self.states_ready: asyncio.Future = None
self.connected = asyncio.Event()
self.watchdog_timer: ControlPointWatchdog = None
@ property
def doors(self) -> List[ControlPointDevice]:
'''Return an array with all doors'''
return list(filter(lambda device: device.type == 'garagedoor', self.devices.values()))
@ property
def fans(self) -> List[ControlPointDevice]:
'''Return an array with all fans'''
return list(filter(lambda device: device.type == 'fan', self.devices.values()))
@ property
def lights(self) -> List[ControlPointDevice]:
'''Return an array with all lights (i.e. switch or dimmer type)'''
return list(filter(lambda device:
device.type == 'light' or
device.type == 'switch' or
device.type == 'dimmer', self.devices.values()))
@ property
def outlets(self) -> List[ControlPointDevice]:
'''Return an array with all outlets'''
return list(filter(lambda device: device.type == 'outlet', self.devices.values()))
@ property
def sensors(self) -> List[ControlPointDevice]:
'''Return an array with all sensors'''
return list(filter(lambda device: device.type == 'garagedoor', self.devices.values()))
async def connect(self, fast: bool = False) -> bool:
'''Connect to ZCC, build device table and subscribe to updates'''
self.logger.info('Connecting to ZCC %s:%d', self.host, self.port)
self.connected.clear()
if self.session_authorised:
self.session_authorised.cancel()
self.session_authorised = self.loop.create_future()
if self.session_started:
self.session_started.cancel()
self.session_started = self.loop.create_future()
if not self.socket:
self.socket = ControlPointSocket(
self.host, self.port, timeout=self.timeout, verbosity=self.verbosity)
try:
await self.socket.connect()
except ConnectionRefusedError as error:
description = f'Connection refused when connecting to ZCC {self.host}:{self.port}'
self.logger.error(description)
raise ControlPointError(description) from error
except socket.error as error:
description = f'Socket error when connecting to ZCC {self.host}:{self.port}'
self.logger.error(description)
raise ControlPointError(description) from error
except Exception as error:
self.logger.error(description)
raise ControlPointError(
'Unknown error when connecting to ZCC') from error
self.socket.subscribe(self)
await self.socket.sendall(ControlPointProtocol.authorise(self.device_mac),
response_expected=False)
try:
self.logger.info('Waiting for authorisation')
await asyncio.wait_for(self.session_authorised,
timeout=ControlPointProtocol.AUTH_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
self.logger.error('Timeout waiting for authorisation')
raise ControlPointError(
"Unable to authorise connection to ZCC.") from error
await self.socket.sendall(ControlPointProtocol.start(
self.device_mac, self.access_token), response_expected=False)
try:
self.logger.info('Waiting for session start')
await asyncio.wait_for(self.session_started,
timeout=ControlPointProtocol.START_SESSION_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
self.logger.error('Timeout waiting for session start')
raise ControlPointError(
"Unable to start session.") from error
await self.request_gateway_properties()
if not fast:
await self.__get_devices()
await self.socket.sendall(ControlPointProtocol.subscribe(), response_expected=False)
await asyncio.sleep(ControlPointProtocol.SUBSCRIBE_TIMEOUT)
self.ready = True
self.connected.set()
self.logger.info('Connected to ZCC %s:%d with %d/%d/%d actions/properties/states',
self.host, self.port,
self.actions_received, self.states_received, self.properties_received)
def __del__(self):
self.disconnect()
def describe(self) -> str:
'''Return a string representation of ZCC including devices'''
header = '+' + '-' * 130 + '+'
if self.host:
description = header + '\n'
description += '| Device mac: %35s Brand: %8s Product: %8s |\n' % (
self.mac if self.mac else "n/a",
self.brand if self.brand else "n/a",
self.product if self.product else "n/a")
description += '| Host: %35s Port: %8d API: %8s |\n' % (
self.host if self.host else "n/a",
self.port if self.port else "n/a",
self.api_version if self.api_version else "n/a")
description += '| Firmware: %35s Num Devices: %8s Num Control Points: %8s |\n' % (
self.firmware_version if self.firmware_version else "n/a",
self.num_devices if self.num_devices else "n/a",
self.num_control_points if self.num_control_points else "n/a")
description += '| Network Name: %35s Uptime: %8ss Tcps: %8s |\n' % (
self.network_name if self.network_name else "n/a",
self.uptime if self.uptime else "n/a",
str(self.available_tcps) if self.available_tcps else "n/a")
description += header + '\n'
for key in self.devices:
description += self.devices[key].describe()
return description
else:
return 'ControlPoint: not found'
def disconnect(self):
'''Disconnect from zimi controller'''
self.ready = False
if self.socket:
self.socket.close()
self.socket = None
async def __get_devices(self):
'''Get initial device data from controller.'''
self.properties_ready = self.loop.create_future()
self.actions_ready = self.loop.create_future()
self.states_ready = self.loop.create_future()
self.logger.info('Getting initial device properties')
await self.socket.sendall(ControlPointProtocol.get('properties'), response_expected=False)
try:
await asyncio.wait_for(self.properties_ready,
timeout=ControlPointProtocol.DEVICE_GET_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
raise ControlPointError(
"ZCC connection failed - didn't receive any properties.") from error
await asyncio.sleep(ControlPointProtocol.DEVICE_GET_TIMEOUT)
self.logger.info('Getting initial device actions')
await self.socket.sendall(ControlPointProtocol.get('actions'), response_expected=False)
try:
await asyncio.wait_for(self.actions_ready,
timeout=ControlPointProtocol.DEVICE_GET_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
raise ControlPointError(
"ZCC connection failed - didn't receive any actions.") from error
await asyncio.sleep(ControlPointProtocol.DEVICE_GET_TIMEOUT)
self.logger.info('Getting initial device states')
await self.socket.sendall(ControlPointProtocol.get('states'), response_expected=False)
try:
await asyncio.wait_for(self.states_ready,
timeout=ControlPointProtocol.DEVICE_GET_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
raise ControlPointError(
"ZCC connection failed - didn't receive any states.") from error
await asyncio.sleep(ControlPointProtocol.DEVICE_GET_TIMEOUT)
async def __get_states(self):
'''Get latest state data from controller and reset watchdog.'''
self.states_ready = self.loop.create_future()
self.logger.info('Refreshing device states')
await self.socket.sendall(ControlPointProtocol.get('states'))
try:
await asyncio.wait_for(self.states_ready,
timeout=ControlPointProtocol.DEVICE_GET_TIMEOUT)
except asyncio.exceptions.TimeoutError as error:
raise ControlPointError(
"ZCC connection failed - didn't receive updated states.") from error
if self.watchdog_timer:
self.watchdog_timer.reset()
def print_description(self):
'''Print description of the ZCC controller.'''
print(self.describe())
async def notify(self, notifier):
'''Receive a notification of an updated object.
Pull the data off the queue. If None is received then
assume the socket has been closed and needs re-opening.'''
response = notifier.get()
if not response:
if self.ready:
await self.re_connect()
return
self.logger.debug('notify() received:\n%s', response)
if response.get(ControlPointProtocol.AUTH_APP_FAILED, None):
self.logger.error("Authorisation failed\n%s", pformat(response))
if response.get(ControlPointProtocol.AUTH_APP_SUCCESS, None):
self.logger.info("Authorisation success\n%s", pformat(response))
self.access_token = response[ControlPointProtocol.AUTH_APP_SUCCESS]['accessToken']
self.session_authorised.set_result(True)
if response.get(ControlPointProtocol.START_SESSION_FAILED, None):
self.logger.error("Start session failed\n%s", pformat(response))
if response.get(ControlPointProtocol.START_SESSION_SUCCESS, None):
self.logger.info("Start session success\n%s", pformat(response))
self.session_started.set_result(True)
if response.get(ControlPointProtocol.CONTROLPOINT_ACTIONS, None):
self.__update_devices(response.get(
ControlPointProtocol.CONTROLPOINT_ACTIONS, None), 'actions')
if response.get(ControlPointProtocol.CONTROLPOINT_PROPERTIES, None):
self.__update_devices(response.get(
ControlPointProtocol.CONTROLPOINT_PROPERTIES, None), 'properties')
if response.get(ControlPointProtocol.CONTROLPOINT_STATES, None):
self.__update_devices(response.get(
ControlPointProtocol.CONTROLPOINT_STATES, None), 'states')
if response.get(ControlPointProtocol.CONTROLPOINT_STATES_EVENTS, None):
self.__update_devices(response.get(
ControlPointProtocol.CONTROLPOINT_STATES_EVENTS, None), 'states')
if response.get(ControlPointProtocol.GATEWAY_PROPERTIES, None):
self.__update_properties(response.get(
ControlPointProtocol.GATEWAY_PROPERTIES, None))
if response.get(ControlPointProtocol.ZCC_STATUS, None):
self.logger.error("ZCC status message\n%s", pformat(response))
await self.re_connect(reset=True)
async def re_connect(self, reset: bool = False):
'''Re-connect to a new socket and resend any queued messages.'''
if reset:
self.logger.error('Preparing re-connect to new socket after ZCC intiated reset')
else:
self.logger.error('Preparing re-connect to new socket as existing socket closed')
if self.watchdog_timer:
self.logger.debug('Pausing existing watchdog timer during re-connection')
self.watchdog_timer.pause()
self.socket.unsubscribe(self)
self.socket.close()
self.closed_socket = self.socket
self.ready = False
while not self.ready:
self.socket = None
try:
self.logger.info('Re-connecting to ZCC with new socket')
await self.connect(fast=True)
while True:
message = self.closed_socket.unsent()
if message:
self.logger.error(
'Re-sending message:\n%s', message)
await self.socket.sendall(message)
else:
break
self.closed_socket = None
except ControlPointError as error:
self.logger.error(
"Re-connection to ZCC failed with ControlPointError: %s - will retry in %d",
error, ControlPointProtocol.RETRY_TIMEOUT)
await asyncio.sleep(ControlPointProtocol.RETRY_TIMEOUT)
if self.watchdog_timer:
self.logger.debug('Re-starting existing watchdog timer after re-connection')
self.watchdog_timer.reset()
async def set(self, identifier: str, action: str, params: object = None):
'''Sends an action for a device.'''
while not self.connected.is_set():
self.logger.error(
'Controller not ready to accept commands - attempting to re-connect')
try:
await self.re_connect()
except ControlPointError as error:
self.logger.error(
'Connection failed when attempting to re-connect - trying again')
message = ControlPointProtocol.set(identifier, action, params)
success = False
while not success:
success = await self.socket.sendall(message)
self.logger.info("Sending %s request to %s (%s)",
action, self.devices[identifier].location, identifier)
def __str__(self):
return pformat(vars(self)) + '\n'
def start_watchdog(self, timer: int):
'''Start a periodic timeout that resets every time a status update is received.'''
if self.watchdog_timer:
self.stop_watchdog()
self.logger.info("Starting Watchdog for %s seconds", timer)
self.watchdog_timer = ControlPointWatchdog(
timer, self.trigger_watchdog)
self.watchdog_timer.start()
def stop_watchdog(self):
'''Stop the periodic timeout.'''
self.logger.info("Stopping existing Watchdog")
if self.watchdog_timer:
self.watchdog_timer.cancel()
self.watchdog_timer = None
async def trigger_watchdog(self):
'''Trigger the watchdog function - which will reset the connection.'''
self.logger.error(
"Triggering the watchdog timer - will fetch new states to refresh connection")
await self.__get_states()
def __update_devices(self, devices, target):
'''Update device target with JSON data for all devices.'''
for device in devices:
updates_made = False
identifier = device['id']
if not self.devices.get(identifier):
self.devices[identifier] = ControlPointDevice(self, identifier)
if 'actions' in target and self.devices[identifier].actions != device[target]:
self.devices[identifier].actions = device[target]
self.actions_received += 1
updates_made = True
if 'properties' in target and self.devices[identifier].properties != device[target]:
self.devices[identifier].properties = device[target]
self.properties_received += 1
updates_made = True
if 'states' in target and self.devices[identifier].states != device[target]:
self.devices[identifier].states = device[target]
self.states_received += 1
updates_made = True
if updates_made:
if self.watchdog_timer:
self.watchdog_timer.reset()
if self.ready:
self.logger.info(
'Received %s update for %s (%s):\n%s',
target, self.devices[identifier].location, identifier, device[target])
else:
self.logger.debug(
'Received %s update for %s (%s):\n%s',
target, self.devices[identifier].location, identifier, device[target])
self.devices[identifier].notify_observers()
if self.actions_received > 0 and not self.actions_ready.done():
self.actions_ready.set_result(True)
if self.properties_received > 0 and not self.properties_ready.done():
self.properties_ready.set_result(True)
if self.states_received > 0 and not self.states_ready.done():
self.states_ready.set_result(True)
def __update_properties(self, properties):
'''Updates zcc gateway properties'''
properties = properties.get("properties")
if properties:
self.brand = properties['brand']
self.product = properties['product']
self.mac = properties['mac']
self.available_tcps = properties['availableTcps']
if api_version := properties.get('apiVersion'):
self.api_version = api_version
if firmware_version := properties.get('firmwareVersion'):
self.firmware_version = firmware_version
self.num_devices = properties['numberOfDevices']
self.num_control_points = properties['numberOfControlPoints']
self.network_name = properties['networkName']
self.uptime = int(time.time()) - properties['uptime']
async def request_gateway_properties(self):
'''Sends GET request to fetch zcc gateway level properties'''
self.logger.info('Requesting zcc gateway level properties')
await self.socket.sendall(ControlPointProtocol.inforequest()) | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/controller.py | controller.py |
import asyncio
from codecs import StreamReader, StreamWriter
import json
from json.decoder import JSONDecodeError
import logging
import queue
import socket
from zcc.constants import LEVEL_BY_VERBOSITY
class ControlPointSocket:
'''A TCP/IP socket that includes recvall method with timeout'''
def __init__(self, host: str, port: int, timeout: int = None, verbosity: int = 0):
'''Create new TCP/IP socket to host and port with optional timeout.
Update objects are made available via a FIFO queue.'''
self.host = host
self.port = port
self._observers = []
self.listen_task: asyncio.Task = None
self.listen_queu = queue.Queue()
self.send_queu = queue.Queue()
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger(f'ControlPointSocket@{id(self)}')
self.logger.setLevel(LEVEL_BY_VERBOSITY[verbosity])
self.sock: socket.socket = None
self.reader: StreamReader = None
self.writer: StreamWriter = None
self.timeout = timeout
async def connect(self):
'''Connect to an OS socket and begin listening.'''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.sock and self.timeout:
self.sock.settimeout(self.timeout)
try:
self.sock.connect((self.host, self.port))
self.reader, self.writer = await asyncio.open_connection(sock=self.sock)
self.listen_task = asyncio.create_task(self.__listen())
except asyncio.CancelledError:
self.logger.error("Connection cancelled")
await self.listen_task
except ConnectionRefusedError as error:
self.logger.error(
'Connection refused %s error from %s:%s',
error, self.host, str(self.port))
raise error
except socket.error as error:
self.logger.error(
'Connection received socket %s error from %s:%s',
error, self.host, str(self.port))
raise error
async def __listen(self):
'''Worker thread to monitor state change events and update objects.
Sends a notify to observers when new items are put in the queue OR the
underlying socket is closed.'''
while True:
try:
line = await asyncio.wait_for(self.reader.readline(), timeout=1)
if line == b'':
continue
response = json.loads(line).get('response')
if response:
self.listen_queu.put(response)
for obs in self._observers:
await obs.notify(self)
self.send_queu.get(block=False)
except asyncio.CancelledError:
self.logger.debug("Listening connection cancelled")
break
except asyncio.TimeoutError:
if self.sock and self.sock.fileno() != -1 and self.reader:
pass
else:
self.logger.debug(
'Listening connection socket failure after timeout')
break
except AttributeError:
self.logger.debug(
"Listening connection cancelled during timeout")
break
except (ConnectionResetError, ConnectionAbortedError) as error:
self.logger.debug(
"Llstening connection reset error %s", error)
break
except JSONDecodeError as error:
self.logger.debug(
"Listening connection JSON decode error %s:\n%s", error, line)
break
except queue.Empty:
pass
self.logger.debug("Listening connection socket closed")
self.listen_task = None # Remove reference before closing
self.close()
for obs in self._observers:
await obs.notify(self)
def close(self) -> None:
'''Close socket'''
if self.listen_task:
self.listen_task.cancel()
if self.sock:
self.sock.close()
self.reader = None
self.writer = None
self.sock = None
def get(self):
'''Fetch an item from the queue. Returns None if the queue is empty.
Notify for an empty queue should have only been sent if the socket has
been closed and an extra notify() was sent.'''
try:
response = self.listen_queu.get(block=False)
except queue.Empty:
response = None
return response
async def sendall(self, message: str, response_expected: bool = True) -> bool:
'''Send all bytes of string to socket end-point. If the socket is closed
send a None notify to prompt re-connect. If response_expected is set
the message is saved in a queu incase it needs to be retrieved to send again.'''
if response_expected:
self.send_queu.put(message)
if self.sock and self.sock.fileno() != -1 and self.writer and not self.writer.is_closing():
self.logger.debug('sendall()\n%s', message)
self.writer.write(message.encode())
await self.writer.drain()
return True
else:
self.logger.error('Send failed with no socket:\n%s', message)
self.close()
for obs in self._observers:
await obs.notify(self)
return False
def subscribe(self, observer):
'''Subscribe an observer object for state changes.
Observer object must include an async notify(self, observable, *args, **kwargs) method.'''
self._observers.append(observer)
def unsubscribe(self, observer):
'''Unsubscribe an observer object.'''
try:
self._observers.remove(observer)
except ValueError as error:
self.logger.debug(
"Unsubscribe failed with value error: %s for %s", error, observer)
def unsent(self):
'''Fetch an item from the unsent queue. Returns None if the queue is empty.'''
try:
response = self.send_queu.get(block=False)
except queue.Empty:
response = None
return response | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/socket.py | socket.py |
import json
from .constants import APP_ID, APP_TOKEN
class ControlPointProtocol:
'''Static methods for the Zimi Controller protocol.'''
AUTH_APP_FAILED = 'auth_app_failed'
AUTH_APP_SUCCESS = 'auth_app_success'
AUTH_TIMEOUT = 30
START_SESSION_FAILED = 'start_session_failed'
START_SESSION_SUCCESS = 'start_session_success'
START_SESSION_TIMEOUT = 30
DEVICE_GET_TIMEOUT = 10
CONTROLPOINT_PROPERTIES = 'controlpoint_properties'
CONTROLPOINT_STATES = 'controlpoint_states'
CONTROLPOINT_ACTIONS = 'controlpoint_actions'
CONTROLPOINT_SETACTIONS = 'controlpoint_setactions'
CONTROLPOINT_STATES_EVENTS = 'controlpoint_states_events'
ZCC_STATUS = 'zcc_status'
SUBSCRIBE_TIMEOUT = 5
GATEWAY_PROPERTIES = 'gateway_properties'
GATEWAY_PROPERTIES_TIMOUT = 10
RETRY_TIMEOUT = 90
RESET_TIMEOUT = 90
UDP_SEND_PORT = 5001
UDP_RECV_PORT = 5002
@classmethod
def authorise(cls, device_mac: str) -> str:
'''Returns the authorise message.'''
request = {
"request": {
"type": "auth_app",
"params": {
"appId": APP_ID,
"appToken": APP_TOKEN,
"deviceMac": device_mac
}
}
}
return json.dumps(request)
@classmethod
def discover(cls) -> str:
'''Return the discover message to be broadcast on LAN.'''
return 'ZIMI'
@classmethod
def get(cls, target: str) -> str:
'''Returns the get messages assocated with a target'''
request = {"request": {
"path": "api/v1/controlpoint/", "method": "GET"}}
if target in ('actions', 'properties', 'states'):
request["request"]["path"] += target
return json.dumps(request)
else:
raise KeyError(
'Only "actions", "properties" and "states" are supported')
@classmethod
def set(cls, identifier: str, action: str, params: object) -> str:
'''Returns the set messages assocated with an action'''
request = {"request": {
"path": "api/v1/controlpoint/actions", "method": "POST"}}
if action in ("TurnOn", "TurnOff", "OpenDoor", "CloseDoor"):
body = {"actions": [{"id": identifier, "action": action}]}
request["request"]["body"] = body
elif action in ("Fade", "OpenToPercentage", "SetBrightness", "SetFanSpeed") and params:
body = {"actions": [
{"id": identifier,
"action": action,
"actionParams": params}]}
request["request"]["body"] = body
else:
raise KeyError('Combination of input parameters are not supported')
return json.dumps(request)
@classmethod
def start(cls, device_mac: str, access_token: str) -> str:
'''Returns the start session message.'''
request = {
"request": {
"type": "start_session",
"params": {
"appId": APP_ID,
"deviceMac": device_mac,
"accessToken": access_token
}
}
}
return json.dumps(request)
@classmethod
def subscribe(cls) -> str:
'''Returns the message assocated with state subscribe'''
request = {"request": {
"path": "api/v1/subscribe/controlpoint/states", "method": "POST"}}
return json.dumps(request)
@classmethod
def unsubscribe(cls) -> str:
'''Returns the message assocated with state subscribe'''
request = {"request": {
"path": "api/v1/unsubscribe/controlpoint/states", "method": "POST"}}
return json.dumps(request)
@classmethod
def inforequest(cls) -> str:
'''Returns the message assocated with gateway properties request'''
request = {"request": {
"path": "api/v1/gateway/properties", "method": "GET"}}
return json.dumps(request) | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/protocol.py | protocol.py |
from __future__ import annotations
from pprint import pformat
from zcc.errors import ControlPointError
class ControlPointDevice:
'''Represents any ControlPointDevice.'''
def __init__(self, controller, identifier: str):
'''Create a new device associated with a controller and identifed by identifier'''
self.controller = controller
self.identifier = identifier
self.actions = {}
self.properties = {}
self.states = {}
self._target_percentage = None
self._observers = []
def __str__(self):
return pformat(vars(self))
async def __action(self, action: str, params: object = None) -> bool:
try:
if self.actions['actions'][action]:
await self.controller.set(self.identifier, action, params=params)
except Exception as exception_info:
raise ControlPointError(
action + " is not supported for " + self.identifier) from exception_info
@property
def __actions(self) -> str:
'''Gets a descriptive string of what actions are supported'''
description = '{ '
try:
if self.actions['actions'].get("TurnOn", None):
description += "TurnOn TurnOff "
if self.actions['actions'].get("SetBrightness", None):
description += "SetBrightness "
if self.actions['actions'].get("OpenDoor", None):
description += "OpenDoor CloseDoor "
if self.actions['actions'].get("OpenToPercentage", None):
description += "OpenToPercentage "
if self.actions['actions'].get("SetFanSpeed", None):
description += "SetFanSpeed "
except KeyError:
pass
description += '}'
if description == '{ }':
description = ''
return description
@property
def __states(self) -> str:
'''Gets a descriptive string of the device state'''
description = ''
try:
key = list(self.states['controlState'].keys())[0]
state = self.states['controlState'][key]
description += "On" if state.get('isOn', False) else "Off"
brightness = state.get('brightness', None)
if brightness:
description += '/' + str(brightness)
fan_speed = state.get('fanspeed', None)
if fan_speed:
description += '/' + str(fan_speed)
except IndexError:
pass
except KeyError:
pass
return description
@property
def battery_level(self) -> int | None:
'''Return the battery level of an attached sensor.'''
try:
state = self.states['controlState']['sensor']
return state.get('batterylevel', 0)
except KeyError:
return None
@property
def brightness(self) -> int | None:
'''Returns brightness from 0 to 100 or None.'''
try:
key = list(self.states['controlState'].keys())[0]
state = self.states['controlState'][key]
brightness = state.get('brightness', None)
if brightness:
return brightness
except KeyError:
return False
async def close_door(self):
'''CloseDoor if the action is supported'''
self._target_percentage = 0
await self.__action("CloseDoor")
def describe(self) -> str:
'''Returns a description of a device'''
return '%-40s %-40.40s %-8.8s %-8s %s\n' % \
(self.identifier,
self.location,
self.type,
self.__states,
self.__actions)
@property
def door_temp(self) -> int | None:
'''Return the external temperature of an attached sensor.'''
try:
state = self.states['controlState']['sensor']
return state.get('doortemp', 0)
except KeyError:
return None
async def fade(self, brightness, timeperiod):
'''SetBrightness if the action is supported'''
await self.__action("SetBrightness", params={
"brightness": int(brightness),
"timeperiod": int(timeperiod)})
@ property
def fanspeed(self) -> int | None:
'''Returns fanspeed from 0 to 7 or None.'''
try:
key = list(self.states['controlState'].keys())[0]
state = self.states['controlState'][key]
fanspeed = state.get('fanspeed', None)
if fanspeed:
return fanspeed
except KeyError:
return False
@property
def garage_temp(self) -> int | None:
'''Return the internal garage temperature of an attached sensor.'''
try:
state = self.states['controlState']['sensor']
return state.get('garagetemp', 0)
except KeyError:
return None
@property
def garage_humidity(self) -> int | None:
'''Return the internal garage humidity of an attached sensor.'''
try:
state = self.states['controlState']['sensor']
return state.get('garagehumidity', 0)
except KeyError:
return None
@ property
def is_closing(self) -> bool:
'''Returns True if door is closing.'''
if self.percentage:
if self.percentage > self._target_percentage:
return True
else:
return False
return False
@ property
def is_closed(self) -> bool:
'''Returns True if door is closed.'''
if self.percentage:
if self.percentage == 0:
return True
else:
return False
return True
@ property
def is_connected(self) -> bool:
'''Returns True if connected is on.
When a device has been disconnected from the mesh it show False.'''
try:
return self.states['isConnected']
except KeyError:
return False
@ property
def is_off(self) -> bool:
'''Returns True if status is off.'''
return not self.is_on
@ property
def is_on(self) -> bool:
'''Returns True if status is on.'''
try:
key = list(self.states['controlState'].keys())[0]
state = self.states['controlState'][key]
return state.get('isOn', False)
except KeyError:
return False
@ property
def is_opening(self) -> bool:
'''Returns True if door is opening.'''
if self.percentage:
if self.percentage < self._target_percentage:
return True
else:
return False
return False
@ property
def is_open(self) -> bool:
'''Returns True if door is open.'''
if self.percentage:
if self.percentage > 0:
return True
else:
return False
return False
@ property
def location(self) -> str:
'''Gets a descriptive string of the device location'''
description = self.properties.get('name', '-') + '/'
description += self.properties.get('roomName', '-')
return description
@ property
def name(self) -> str:
'''Gets a descriptive string of the device name'''
return self.properties.get('name', '-')
def notify_observers(self):
'''Notify all observers that a state change has occurred.'''
for obs in self._observers:
obs.notify(self)
async def open_door(self):
'''OpenDoor if the action is supported'''
self._target_percentage = 100
await self.__action("OpenDoor")
async def open_to_percentage(self, percentage):
'''OpenToPercentage if the action is supported'''
self._target_percentage = percentage
await self.__action("OpenToPercentage", params={
"openpercentage": int(percentage)})
@ property
def percentage(self) -> int | None:
'''Return the open to percentage'''
try:
key = list(self.states['controlState'].keys())[0]
state = self.states['controlState'][key]
return state.get('openpercentage', 0)
except KeyError:
return None
def print_description(self):
'''Print device description.'''
print(self.describe())
@ property
def room(self) -> str:
'''Gets a descriptive string of the device room'''
return self.properties.get('roomName', '-')
async def set_brightness(self, brightness):
'''SetBrightness if the action is supported'''
await self.__action("SetBrightness", params={"brightness": int(brightness)})
async def set_fanspeed(self, fanspeed):
'''SetFanSpeed if the action is supported'''
await self.__action("SetFanSpeed", params={"fanspeed": int(fanspeed)})
def subscribe(self, observer):
'''Subscribe an observer object for state changes.
Observer object must include notify(self, observable, *args, **kwargs) method.'''
self._observers.append(observer)
async def turn_on(self):
'''TurnOn the device if the action is supported'''
await self.__action("TurnOn")
async def turn_off(self):
'''TurnOff the device if the action is supported'''
await self.__action("TurnOff")
@ property
def type(self) -> str:
'''Gets a descriptive string of the device type'''
return self.properties.get('controlPointType', '-')
def unsubscribe(self, observer):
'''Unsubscribe an observer object.'''
self._observers.remove(observer) | zcc-helper | /zcc-helper-3.2rc1.tar.gz/zcc-helper-3.2rc1/zcc/device.py | device.py |
from colorama import Fore
from voluptuous import Schema, ALLOW_EXTRA, All, Any, Required, Length, Object, Boolean, Range, Number, Coerce, IsFile, truth, error
from zcfd.utils import config
@truth
def iscallable(f):
return callable(f)
def validate(d):
Vector3d = All(Any([Number(), Number(), Number()], (Number(), Number(), Number())), Length(min=3, max=3))
base_schema = Schema(
{
'units': 'SI',
'scale': Any(Vector3d, iscallable),
Required('reference'): All(str, Length(min=1)),
Required('partitioner', default='metis'): Any('metis', 'high order load balancing'),
Required('safe', default=False): bool,
'initial': Any(str, {Required('name'): str, 'func': iscallable}),
Required('restart', default=False): bool,
'restart casename': All(str, Length(min=1)),
'restart ignore history': bool,
'preconditioner': {Required('minimum mach number'): Number()},
Required('equations'):
Any('euler', 'RANS', 'viscous', 'LES',
'DGviscous', 'DGRANS', 'DGeuler', 'DGLES'),
'report': {Required('frequency'): All(Coerce(int), Range(min=1)),
'monitor': dict,
'forces': dict,
Required('Scale residuals by volume', default=False): bool},
'time marching': dict,
'cell order': list,
'Nodal Locations': {
'Line': dict,
'Tetrahedron': dict,
'Tri': dict
},
Required('material', default='air'): All(str, Length(min=1)),
'write output': {Required('frequency'): All(Coerce(int), Range(min=1)),
Required('format'): Any('none', 'vtk', 'ensight', 'native'),
Required('no volume vtk', default=False): bool,
'surface variables': list,
'volume variables': list,
'surface interpolate': list,
'volume interpolate': list,
'start output real time cycle':
All(Coerce(int), Range(min=0)),
'output real time cycle frequency':
All(Coerce(int), Range(min=1)),
'variable_name_alias': dict,
'unsteady restart file output frequency': All(Coerce(int), Range(min=1))
},
}, extra=ALLOW_EXTRA)
d = base_schema(d)
material_key = d['material']
reference_key = d['reference']
equations_key = d['equations']
ic_keys = [key for key in d.keys() if key.startswith('IC_')]
bc_keys = [key for key in d.keys() if key.startswith('BC_')]
fz_keys = [key for key in d.keys() if key.startswith('FZ_')]
material_schema = Schema(
{
Required('gamma', default=1.4): Number(),
Required('gas constant', default=287.0): Number(),
Required('Sutherlands const', default=110.4): Number(),
Required('Prandtl No', default=0.72): Number(),
Required('Turbulent Prandtl No', default=0.9): Number(),
'gravity': Vector3d,
'latitude': Number()
})
ic_schema = Schema(
{
'pressure': Number(),
'temperature': Number(),
'V': {
'vector': Vector3d,
'Mach': Number(),
},
'Reference Length': Number(),
'Reynolds No': Number(),
'turbulence intensity': Number(),
'eddy viscosity ratio': Number(),
'ambient turbulence intensity': Number(),
'ambient eddy viscosity ratio': Number(),
'location': Vector3d,
'profile': {
'ABL': {
'roughness length': Number(),
'friction velocity': Number(),
'surface layer height': Number(),
'Monin-Obukhov length': Number(),
'TKE': Number(),
'z0': Number(),
},
'field': All(str, IsFile()),
'local profile': bool
},
'static pressure ratio': Number(),
'total pressure ratio': Number(),
'total temperature ratio': Number(),
'reference': str,
'viscosity': Number()
}, extra=ALLOW_EXTRA)
timemarching_schema = Schema(
{
'unsteady': {
'total time': Number(),
'time step': Number(),
'order': Any('first', 'second', 1, 2),
'start': Coerce(int)
},
Required('scheme'): {
'name': Any('euler', 'runge kutta',
'lu-sgs'),
'stage': Any(1, 'rk third order tvd', 4, 5),
'class': Object,
'kind': Any('local timestepping', 'global timestepping'),
'linear gradients': bool
},
Required('lu-sgs', default={}): {
Required('Include Backward Sweep', default=True): bool,
Required('Number Of SGS Cycles', default=8): All(Coerce(int), Range(min=1)),
Required('Jacobian Epsilon', default=1.0e-8): Number(),
Required('Include Relaxation', default=True): bool,
Required('Jacobian Update Frequency', default=1): All(Coerce(int), Range(min=1)),
Required('Finite Difference Jacobian', default=False): bool,
Required('Use Rusanov Flux For Jacobian', default=True): bool
},
Required('cfl'): Number(),
'cfl transport': Number(),
'cfl coarse': Number(),
'cfl ramp factor': {
Required('growth'): Number(),
Required('initial'): Number()
},
'cfl transport for pmg levels': list,
'cfl for pmg levels': list,
'ramp func': iscallable,
Required('cycles'): All(Coerce(int), Range(min=1)),
'multigrid': All(Coerce(int), Range(min=1)),
'multigrid cycles': All(Coerce(int), Range(min=1)),
'multigrid ramp': Number(),
'prolong factor': Number(),
'prolong transport factor': Number(),
Required('multipoly', default=False): bool,
'multipoly cycle pattern': list,
'multipoly convect only': bool,
'multipoly relaxation': Number(),
'High Order Filter Frequency': Coerce(int),
'number of time step smoothing iterations': Coerce(int),
Required('cfl viscous factor', default=1.0): Number()
})
fv_euler_schema = Schema(
{
Required('order'): Any('first', 'second', 'euler_second'),
Required('limiter', default='vanalbada'): 'vanalbada',
Required('precondition', default=False): bool
})
viscous_schema = fv_euler_schema.extend(
{
Required('turbulence', default={}): {
Required('les', default='none'): Any('none', 'WALE'),
}
})
rans_schema = fv_euler_schema.extend(
{
Required('turbulence', default={}): {
Required('model'): Any('sst', 'sas', 'sa-neg'),
Required('les', default='none'): Any('none', 'DES', 'DDES', 'IDDES', 'SAS'),
Required('betastar', default=0.09): Number(),
'limit mut': bool,
'CDES_kw': Number(),
'CDES_keps': Number(),
'production': Coerce(int),
'rotation correction': bool,
'CDES': Number()
}
})
dg_euler_schema = Schema(
{
Required('order'): Any(0, 1, 2, 3, 4),
Required('precondition', default=False): bool,
Required('c11 stability parameter', default=0.0): Number(),
Required('c11 stability parameter transport', default=0.0): Number(),
Required('LDG upwind parameter', default=0.5): Number(),
'LDG upwind parameter aux': Number(),
Required('Use MUSCL Reconstruction', default=False): bool,
'Approximate curved boundaries': bool,
'Filtering Cut-on Order': Coerce(int),
'Filtering Epsilon': Coerce(int),
'Filtering Strength': Coerce(int),
'Inviscid Flux Scheme': Any('HLLC', 'Rusanov')
})
dg_viscous_schema = dg_euler_schema.extend(
{
Required('BR2 Diffusive Flux Scheme', default=False): bool,
'Shock Sensing': bool,
'Shock Sensing k': Number(),
'Shock Sensing Viscosity Scale': Number(),
'Shock Sensing Variable': Any('density', 'temperature', 'mach', 'turbulence')
})
dg_rans_schema = dg_euler_schema.extend(
{
Required('turbulence', default={}): {
Required('model'): Any('sst', 'sas', 'sa-neg'),
Required('les', default='none'): Any('none', 'DES', 'DDES', 'IDDES', 'SAS'),
Required('betastar', default=0.09): Number(),
'limit mut': bool,
'CDES_kw': Number(),
'CDES_keps': Number(),
'production': Coerce(int),
'rotation correction': bool,
'CDES': Number(),
'Cw': Number()
},
Required('BR2 Diffusive Flux Scheme', default=False): bool,
Required('Use Rusanov for turbulence equations', default=False): bool,
'Shock Sensing': bool,
'Shock Sensing k': Number(),
'Shock Sensing Viscosity Scale': Number(),
'Shock Sensing Variable': Any('density', 'temperature', 'mach', 'turbulence')
})
wall_schema = Schema(
{
'ref': 3,
Required('type'): 'wall',
'kind': Any('slip', 'noslip', 'wallfunction'),
'zone': list,
'roughness': {
'type': Any('height', 'length'),
'scalar': Number(),
'field': All(str, IsFile())
},
'V': Any({
Required('linear'): {
Required('vector'): Vector3d,
Required('Mach'): Number()
}
},
),
'temperature': Any({
'scalar': Number()
},
{
'field': All(str, IsFile())
})
})
farfield_schema = Schema(
{
'ref': 9,
'zone': list,
Required('type'): 'farfield',
Required('kind'): Any('riemann', 'pressure', 'supersonic', 'preconditioned'),
'condition': str,
'profile': {
'ABL': {
'roughness length': Number(),
'friction velocity': Number(),
'surface layer height': Number(),
'Monin-Obukhov length': Number(),
'TKE': Number(),
'z0': Number()
}
},
'turbulence': {
'length scale': All(str, IsFile()),
'reynolds tensor': All(str, IsFile())
}
})
inflow_schema = Schema(
{
'ref': 4,
'zone': list,
Required('type'): 'inflow',
Required('kind'): 'default',
'condition': str
}
)
outflow_schema = Schema(
{
'ref': 5,
'zone': list,
Required('type'): 'outflow',
Required('kind'): Any('pressure', 'massflow', 'radial pressure gradient'),
'reference radius': Number(),
'condition': str
}
)
symmetry_schema = Schema(
{
'ref': 7,
'zone': list,
Required('type'): 'symmetry',
})
periodic_schema = Schema(
{
'zone': list,
Required('type'): 'periodic',
Required('kind'): Any({
Required('rotated'): {
Required('theta'): Number(),
Required('axis'): Vector3d,
Required('origin'): Vector3d
}
},
{
Required('linear'): {
Required('vector'): Vector3d
}
})
}
)
bc_schema = Any(lambda x: periodic_schema(x), lambda x: symmetry_schema(x),
lambda x: outflow_schema(x), lambda x: inflow_schema(x),
lambda x: farfield_schema(x), lambda x: wall_schema(x))
bc_to_schema = {
'wall': wall_schema,
'periodic': periodic_schema,
'symmetry': symmetry_schema,
'inflow': inflow_schema,
'outflow': outflow_schema,
'farfield': farfield_schema,
}
equations_to_schema = {
'euler': fv_euler_schema,
'RANS': rans_schema,
'viscous': viscous_schema,
'LES': viscous_schema,
'DGviscous': dg_viscous_schema,
'DGRANS': dg_rans_schema,
'DGeuler': dg_euler_schema,
'DGLES': dg_rans_schema,
}
d[material_key] = material_schema(d.get(material_key, {}))
d['time marching'] = timemarching_schema(d['time marching'])
d[equations_key] = equations_to_schema[equations_key](d[equations_key])
for k in ic_keys:
try:
d[k] = ic_schema(d[k])
except error.MultipleInvalid as e:
e.add(error.Invalid(Fore.RED + "Error in IC configuration for key %s: field %s did not validate (either missing or invalid value)" % (k, e.path) + Fore.RESET,path=[k]))
raise e
for k in bc_keys:
try:
d[k] = bc_schema(d[k])
except Exception:
try:
bc_to_schema[d[k]['type']](d[k])
except error.MultipleInvalid as e:
e.add(error.Invalid(Fore.RED + "Error in BC configuration for key %s: field %s did not validate (either missing or invalid value)" % (k, e.path) + Fore.RESET, path=[k]))
raise e
for k in fz_keys:
if d[k]['type'] in ('rotating', 'translating'):
raise error.MultipleInvalid(errors=[error.Invalid(Fore.RED + "Rotating fluid zones are not supported at the moment" + Fore.RESET, path=[k])])
return d | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd_validate/__init__.py | __init__.py |
import os
import sys
import os.path
import pkgutil
import json
import datetime
from colorama import Fore
import yaml
from . import solvers, MPI
from .utils import (
config,
commandline,
Parameters,
Logger,
md5sum
)
class zCFDSolver:
"""Worlds Fastest CFD Solver"""
def main(self):
self.initialise()
self.read_commandline()
self.init_logger()
self.show_banner()
self.list_solvers()
if self.read_controlfile() and self.load_solver():
self.initialise_solver()
self.start_solver()
# self.terminate()
def __del__(self):
self.terminate()
def initialise(self):
import importlib
# Load dispatch library to initialise cuda context
options = commandline.ZOption().parse()
module_name = 'libzCFDDispatch'
if options.device == "gpu":
module_name += '_CUDA'
else:
module_name += '_INTEL'
try:
dispatchlib = importlib.import_module(module_name)
except Exception as e:
# Failed to find library - try intel
if options.device == "gpu":
module_name = 'libzCFDDispatch_INTEL'
dispatchlib = importlib.import_module(module_name)
else:
raise e
dispatchlib.initialise_dispatch()
MPI.Init_thread(MPI.THREAD_MULTIPLE)
if MPI.Query_thread() != MPI.THREAD_MULTIPLE:
print 'ERROR: make sure MPI is configured with thread support'
self.terminate()
def show_banner(self):
# Show copyright banner
# Show product banner (see http://patorjk.com/software/taag/)
config.logger.info(Fore.BLUE + r" _______________________/\\\\\\\\\___/\\\\\\\\\\\\\\\___/\\\\\\\\\\\\____" + "\n" +
r" ____________________/\\\////////___\/\\\///////////___\/\\\////////\\\__" + "\n" +
r" __________________/\\\/____________\/\\\______________\/\\\______\//\\\_" + "\n" +
r" __/\\\\\\\\\\\___/\\\______________\/\\\\\\\\\\\______\/\\\_______\/\\\_" + "\n" +
r" _\///////\\\/___\/\\\______________\/\\\///////_______\/\\\_______\/\\\_" + "\n" +
r" ______/\\\/_____\//\\\_____________\/\\\______________\/\\\_______\/\\\_" + "\n" +
r" ____/\\\/________\///\\\___________\/\\\______________\/\\\_______/\\\__" + "\n" +
r" __/\\\\\\\\\\\_____\////\\\\\\\\\__\/\\\______________\/\\\\\\\\\\\\/___" + "\n" +
r" _\///////////_________\/////////___\///_______________\////////////_____" + "\n\n" + Fore.RESET)
config.logger.info(Fore.BLUE + r" _____ ______ __ ____ __ __ " + "\n" +
r" ____ _ /__ / ___ ____ ____/_ __/___ _____/ /_ / __ \_________ ____/ /__ _______/ /_" + "\n" +
r" / __ `/ / / / _ \/ __ \/ __ \/ / / _ \/ ___/ __ \ / /_/ / ___/ __ \/ __ // / / / ___/ __/" + "\n" +
r"/ /_/ / / /__/ __/ / / / /_/ / / / __/ /__/ / / / / ____/ / / /_/ / /_/ // /_/ / /__/ /_ " + "\n" +
r"\__,_/ /____/\___/_/ /_/\____/_/ \___/\___/_/ /_/ /_/ /_/ \____/\__,_/ \__,_/\___/\__/ " + Fore.RESET)
def terminate(self):
# time.sleep(1)
# MPI.finalize()
if config.zmq != 0:
config.zmq.stop()
config.zmq.ts.join()
MPI.Finalize()
def message_wait(self):
if config.options.mq:
# Wait for start command
exit(-1)
def init_compute_device(self):
pass
def init_logger(self):
rank = MPI.COMM_WORLD.Get_rank()
nparts = MPI.COMM_WORLD.Get_size()
directory = str(config.options.case_name) + \
"_P" + str(nparts) + "_OUTPUT"
log_path = directory + '/LOGGING'
config.output_dir = directory
if rank == 0:
self.ensure_dir(directory)
self.ensure_dir(log_path)
vis_path = directory + '/VISUALISATION'
self.ensure_dir(vis_path)
import libzCFDVersion as zversion
# initialise status file
with open(str(config.options.case_name) + '_status.txt', 'w') as f:
json.dump({'num processor': nparts,
'case': config.options.case_name,
'problem': config.options.problem_name,
'version': zversion.get_project_version(),
'date': datetime.datetime.now().strftime("%H:%M:%S %d-%m-%Y"),
'case md5': md5sum(config.options.case_name + '.py'),
},
f, indent=4)
MPI.COMM_WORLD.Barrier()
config.logger = Logger.Logger(rank, filename=os.path.join(
log_path, config.options.case_name + "." + str(rank) + ".log"), connector=config.zmq)
# config.logger = libzCFDLogger.getLogger()
# fh = libzCFDLogger.FileLogger(config.problem_name+"."+str(rank)+".log");
# config.logger.addHandler(fh)
# config.filelogger = fh
# if rank == 0:
# ch = libzCFDLogger.StdOutLogger()
# config.logger.addHandler(ch)
# config.streamlogger = ch
#
# config.logger.debug('Initialised Logging')
"""
#logging.config.fileConfig('zcfd/logging.conf')
logging.basicConfig(level=logging.NOTSET)
# create logger
config.logger = logging.getLogger("zCFD")
config.logger.propagate = 0
# Add FileHandler to logger
fh = logging.FileHandler(config.problem_name+"."+str(rank)+".log","w")
formatter = logging.Formatter('%(levelname)s - %(message)s')
fh.setFormatter(formatter)
numeric_level = getattr(logging, config.loglevel.upper(), None)
#print config.loglevel.upper(), numeric_level
fh.setLevel(numeric_level)
config.logger.addHandler(fh)
# Add console handler just for master rank
if rank == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
config.logger.addHandler(ch)
# 'application' code
config.logger.debug('Initialised Logging')
"""
def list_solvers(self):
config.logger.debug("Listing available Solvers")
pkgpath = os.path.dirname(solvers.__file__)
config.solver_names = [name for _, name,
_ in pkgutil.iter_modules([pkgpath])]
config.logger.info("Solvers Available: " +
", ".join(map(str, config.solver_names)))
# print names
def read_commandline(self):
# config.logger.debug("Reading commandline")
options = commandline.ZOption()
config.options = options.parse(MPI.COMM_WORLD.Get_rank() > 0)
case_filename = None
problem_filename = None
# Remove .py extension from case name
if config.options.case_name.endswith('.py') or config.options.case_name.endswith('.h5'):
case_filename = config.options.case_name
config.options.case_name = config.options.case_name[:-3]
else:
case_filename = config.options.case_name + '.py'
if config.options.problem_name.endswith('.h5'):
problem_filename = config.options.problem_name
config.options.problem_name = config.options.problem_name[:-3]
else:
problem_filename = config.options.problem_name + '.h5'
# Check if files exist
ok = True
if not os.path.isfile(case_filename):
ok = False
if MPI.COMM_WORLD.Get_rank() == 0:
print(" Control file: %s not found" % (case_filename))
Parameters.Parameters().write(case_filename)
if not os.path.isfile(problem_filename):
ok = False
if MPI.COMM_WORLD.Get_rank() == 0:
print(" Mesh file: %s not found" % (problem_filename))
MPI.COMM_WORLD.Barrier()
if not ok:
self.terminate()
def read_controlfile(self):
config.logger.debug("Reading controlfile")
cfilename = config.options.case_name
if cfilename is None:
cfilename = config.options.problem_name
config.controlfile = os.path.abspath(cfilename) + ".py"
if os.path.isfile(config.controlfile):
config.logger.info("Control file: " + config.controlfile)
p = Parameters.Parameters()
p.read(cfilename)
config.logger.info("Parameters: " + str(config.parameters))
return True
else:
config.logger.info("Creating missing control file with defaults")
Parameters.Parameters().write(config.controlfile)
config.logger.error("Control file: " +
config.controlfile + " not found")
return False
def load_solver(self):
config.logger.debug("Loading Solver")
equations = config.parameters['equations']
solver_name_map = {
'euler': 'EulerSolver',
'viscous': 'ViscousSolver',
'RANS': 'TurbulentSolver',
'LES': 'ViscousSolver',
'DGeuler': 'DGExplicitSolver',
'DGviscous': 'DGExplicitSolver',
'DGLES': 'DGExplicitSolver',
'DGRANS': 'DGExplicitSolver'
}
solver_name = solver_name_map.get(equations, None)
if solver_name and config.solver_names.count(solver_name):
indx = config.solver_names.index(solver_name)
solver = 'zcfd.solvers.' + config.solver_names[indx]
__import__(solver)
config.solver = getattr(
sys.modules[solver], config.solver_names[indx])(equations)
return True
else:
config.logger.error("Failed to load solver")
return False
def initialise_solver(self):
config.logger.debug("Initialising Solver")
config.solver.initialise()
def start_solver(self):
config.logger.debug("Starting Solver")
config.solver.solve()
config.logger.debug("Terminating Solver")
config.solver = 0
def ensure_dir(self, d):
if not os.path.exists(d):
os.makedirs(d)
if __name__ == "__main__":
zcfd = zCFDSolver()
zcfd.main() | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/zCFDSolver.py | zCFDSolver.py |
from zcfd.utils import config
from zcfd.solvers.ExplicitSolver import ExplicitSolver
from zcfd.solvers.utils.RuntimeLoader import load_solver_runtime
class ViscousSolver(ExplicitSolver):
"""Viscous Solver"""
def initialise(self):
precondition = config.parameters[self.equations][
'precondition']
solver_name = "Viscous Solver"
if precondition:
solver_name += " (Low M Preconditioned)"
solver_name += " Solver Initialise"
config.logger.info(solver_name)
config.solver_native = 0
solver_type = "VISCOUS"
self.parameter_update()
self.solver = load_solver_runtime({"dg": False,
"type": solver_type,
"medium": "air",
"device": config.options.device
},
config.parameters)
num_mesh = config.parameters['time marching']['multigrid']
self.solver.read_mesh(config.options.problem_name,
config.options.case_name, num_mesh)
num_mesh = self.solver.init_storage()
config.parameters['time marching']['multigrid'] = num_mesh
config.cycle_info = self.solver.init_solution(config.options.case_name)
def parameter_update(self):
super(ViscousSolver, self).parameter_update()
self.space_order = config.get_space_order('viscous')
def march(self, rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling, cfl, cfl_transport,
real_time_step, time_order, safe_mode, use_rusanov):
config.logger.debug("Explicit March")
valid = self.solver.march(rk_index, rk_coeff,
rk_stage_scaling, rk_time_level_scaling,
cfl, cfl_transport,
real_time_step, time_order, self.space_order, safe_mode, use_rusanov)
return valid
def copy_solution(self):
config.logger.debug("Copy solution")
self.solver.copy_solution()
def copy_time_history(self):
self.solver.copy_time_history()
def sync(self):
self.solver.sync()
def output(self, case_dir, case_name, surface_variable_list, volume_variable_list, real_time_cycle, solve_cycle, real_time, results_only):
self.solver.output(case_name, surface_variable_list, volume_variable_list,
real_time_cycle, solve_cycle, real_time, results_only)
def host_sync(self):
self.solver.host_sync()
def report(self, residual_only=False):
"""
"""
return self.solver.report(residual_only)
def calculate_rhs(self, real_time_step, time_order):
self.solver.calculate_rhs(real_time_step, time_order, self.space_order)
def add_stored_residual(self):
self.solver.add_stored_residual()
def store_residual(self):
self.solver.store_residual()
def add_const_time_derivative(self, real_time_step, time_order):
self.solver.add_const_time_derivative(real_time_step, time_order)
def restrict(self):
self.solver.restrict()
def update_halos(self):
self.solver.update_halos(True)
def prolongate(self, prolongation_factor, prolongation_transport_factor):
self.solver.prolongate(prolongation_factor,
prolongation_transport_factor)
def set_mesh_level(self, mesh_level):
self.solver.set_mesh_level(mesh_level) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/ViscousSolver.py | ViscousSolver.py |
from zcfd.utils import config
from zcfd.solvers.ExplicitSolver import ExplicitSolver
from zcfd.solvers.utils.RuntimeLoader import load_solver_runtime
class EulerSolver(ExplicitSolver):
"""Euler Solver"""
def initialise(self):
precondition = config.parameters['euler'].get('precondition', False)
solver_name = "Euler Solver"
if precondition:
solver_name += " (Low M Preconditioned)"
solver_name += " Solver Initialise"
config.logger.info(solver_name)
config.solver_native = 0
self.parameter_update()
solver_type = "EULER"
self.solver = load_solver_runtime({"dg": False,
"type": solver_type,
"medium": "air",
"device": config.options.device
},
config.parameters)
num_mesh = config.parameters['time marching']['multigrid']
self.solver.read_mesh(config.options.problem_name,
config.options.case_name, num_mesh)
num_mesh = self.solver.init_storage()
config.parameters['time marching']['multigrid'] = num_mesh
config.cycle_info = self.solver.init_solution(config.options.case_name)
def parameter_update(self):
super(EulerSolver, self).parameter_update()
self.space_order = config.get_space_order('euler')
def march(self, rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling, cfl, cfl_transport,
real_time_step, time_order, safe_mode, use_rusanov):
config.logger.debug("Explicit March")
valid = self.solver.march(rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling,
cfl, cfl_transport,
real_time_step,
time_order, self.space_order, safe_mode,
use_rusanov)
return valid
def copy_solution(self):
config.logger.debug("Copy solution")
self.solver.copy_solution()
def copy_time_history(self):
self.solver.copy_time_history()
def sync(self):
self.solver.sync()
def output(self, case_dir, case_name, surface_variable_list, volume_variable_list, real_time_cycle, solve_cycle, real_time, results_only):
self.solver.output(case_name, surface_variable_list, volume_variable_list,
real_time_cycle, solve_cycle, real_time, results_only)
def host_sync(self):
self.solver.host_sync()
def report(self, residual_only=False):
"""
"""
return self.solver.report(residual_only)
def calculate_rhs(self, real_time_step, time_order):
self.solver.calculate_rhs(real_time_step, time_order, self.space_order)
def add_stored_residual(self):
self.solver.add_stored_residual()
def store_residual(self):
self.solver.store_residual()
def add_const_time_derivative(self, real_time_step, time_order):
self.solver.add_const_time_derivative(real_time_step, time_order)
def restrict(self):
self.solver.restrict()
def update_halos(self):
self.solver.update_halos(True)
def prolongate(self, prolongation_factor, prolongation_transport_factor):
self.solver.prolongate(prolongation_factor,
prolongation_transport_factor)
def set_mesh_level(self, mesh_level):
self.solver.set_mesh_level(mesh_level) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/EulerSolver.py | EulerSolver.py |
from zcfd.utils import config
from zcfd.solvers.ExplicitSolver import ExplicitSolver
from zcfd.solvers.utils.RuntimeLoader import load_solver_runtime
class TurbulentSolver(ExplicitSolver):
"""Turbulent Solver"""
def initialise(self):
config.solver_native = 0
# Read in mesh
# from libzCFDMeshEULERAIR import Mesh
# m = Mesh()
# m.read_mesh(config.options.problem_name)
# Initialise solution (need ability for user to specify initialiser)
# Initialise outputs
transition = config.parameters['RANS'].get('transition', False)
precondition = config.parameters['RANS'].get('precondition', False)
sas = False
sas = (config.parameters['RANS']['turbulence']['model'] == 'sas')
sst = False
sst = (config.parameters['RANS']['turbulence']['model'] == 'sst')
saneg = False
saneg = (config.parameters['RANS']['turbulence']['model'] == 'sa-neg')
solver_name = "Turbulent"
if sas:
solver_name += " Menter SAS"
solver_type = "MENTER"
if sst:
solver_name += " Menter SST"
solver_type = "MENTER"
if saneg:
solver_name += " Spalart Allmaras Neg"
solver_type = "SANEG"
if transition:
solver_name += " Menter SST Transition"
solver_type = "MENTERTRANS"
if precondition:
solver_name += " (Low M Preconditioned)"
solver_name += " Solver Initialise"
config.logger.info(solver_name)
self.parameter_update()
self.solver = load_solver_runtime({"dg": False,
"type": solver_type,
"medium": "air",
"device": config.options.device
},
config.parameters)
num_mesh = config.parameters['time marching']['multigrid']
self.solver.read_mesh(config.options.problem_name,
config.options.case_name, num_mesh)
num_mesh = self.solver.init_storage()
config.parameters['time marching']['multigrid'] = num_mesh
config.cycle_info = self.solver.init_solution(config.options.case_name)
def parameter_update(self):
super(TurbulentSolver, self).parameter_update()
self.space_order = config.get_space_order('RANS')
def march(self, rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling, cfl, cfl_transport, real_time_step,
time_order, safe_mode, use_rusanov):
config.logger.debug("Explicit March")
valid = self.solver.march(rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling,
cfl, cfl_transport,
real_time_step,
time_order, self.space_order, safe_mode, use_rusanov)
return valid
def copy_solution(self):
config.logger.debug("Copy solution")
self.solver.copy_solution()
def copy_time_history(self):
self.solver.copy_time_history()
def sync(self):
self.solver.sync()
def output(self, case_dir, case_name, surface_variable_list, volume_variable_list, real_time_cycle, solve_cycle, real_time, results_only):
self.solver.output(case_name, surface_variable_list, volume_variable_list,
real_time_cycle, solve_cycle, real_time, results_only)
def host_sync(self):
self.solver.host_sync()
def report(self, residual_only=False):
"""
"""
return self.solver.report(residual_only)
def calculate_rhs(self, real_time_step, time_order):
self.solver.calculate_rhs(real_time_step, time_order, self.space_order)
def add_stored_residual(self):
self.solver.add_stored_residual()
def store_residual(self):
self.solver.store_residual()
def add_const_time_derivative(self, real_time_step, time_order):
self.solver.add_const_time_derivative(real_time_step, time_order)
def restrict(self):
self.solver.restrict()
def update_halos(self):
self.solver.update_halos(True)
def prolongate(self, prolongation_factor, prolongation_transport_factor):
self.solver.prolongate(prolongation_factor,
prolongation_transport_factor)
def set_mesh_level(self, mesh_level):
self.solver.set_mesh_level(mesh_level) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/TurbulentSolver.py | TurbulentSolver.py |
from zcfd.utils import config
from zcfd.solvers.ExplicitSolver import ExplicitSolver
from zcfd.solvers.ExplicitSolver import InvalidSolution
from zcfd.solvers.utils.RuntimeLoader import load_solver_runtime
from mpi4py import MPI
import math
class DGExplicitSolver(ExplicitSolver):
"""DG Explicit Solver"""
def additional_checks(self):
if config.parameters[self.equations].get('BR2 Diffusive Flux Scheme', False):
#
# Currently don't support wall functions with BR2 scheme
#
for key in config.parameters:
if key[:3] == 'BC_':
if config.parameters[key].get('kind', '') == 'wallfunction':
raise ValueError("Wall functions are not currently supported with the BR2 scheme")
#
# Currently don't support MUSCL with BR2 scheme
#
if config.parameters[self.equations].get('Use MUSCL Reconstruction', False):
raise ValueError("MUSCL reconstruction not currently supported with BR2 scheme")
#
# Currently don't support MUSCL with Euler solvers
#
if self.equations == 'DGeuler':
if config.parameters[self.equations].get('Use MUSCL Reconstruction', False):
raise ValueError("Gradients are only computed for viscous solvers, therefore MUSCL reconstruction is only supported with viscous solvers")
#
# GPU not yet supported
#
if config.options.device == 'gpu':
raise ValueError("DG Code running on GPU is not currently supported. Please contact [email protected] if you are interested in this functionality")
#
# Warn user if using curvature approximation in parallel
#
if config.parameters[self.equations].get('Approximate curved boundaries', False):
comm = MPI.COMM_WORLD
size = comm.Get_size()
if size > 1:
config.logger.info("\n\n WARNING: Curvature approximation is computed locally within each partition and does not take into account curved boundaries on neighbouring partitions\n\n\n")
def initialise(self):
precond = config.parameters[self.equations]['precondition']
if self.equations == 'DGeuler':
config.logger.info('DG Euler Solver Initialise')
config.solver_native = 0
solver_type = "DGEULER"
if self.equations == 'DGviscous':
config.logger.info("DG Viscous Solver Initialise")
config.solver_native = 0
solver_type = "DGVISCOUS"
if self.equations == 'DGLES':
config.logger.info("DG LES Solver Initialise")
config.solver_native = 0
solver_type = "DGLESWALE"
if self.equations == 'DGRANS':
if config.parameters['DGRANS']['turbulence']['model'] == 'sst':
config.logger.info("DG RANS SST Solver Initialise")
config.solver_native = 0
solver_type = "DGMENTER"
if config.parameters['DGRANS']['turbulence']['model'] == 'sa-neg':
config.logger.info("DG RANS SA-Neg Solver Initialise")
config.solver_native = 0
solver_type = "DGSANEG"
self.additional_checks()
self.parameter_update()
self.solver = load_solver_runtime({"dg": True,
"space_order": self.space_order,
"type": solver_type,
"medium": "air",
"device": config.options.device,
},
config.parameters)
if 'Nodal Locations' not in config.parameters:
from zcfd.solvers.utils.DGNodalLocations import nodal_locations_default
config.parameters.update(nodal_locations_default)
num_mesh = config.parameters['time marching']['multigrid']
self.solver.read_mesh(config.options.problem_name,
config.options.case_name, num_mesh)
num_mesh = self.solver.init_storage()
config.parameters['time marching']['multigrid'] = num_mesh
config.cycle_info = self.solver.init_solution(config.options.case_name)
def parameter_update(self):
super(DGExplicitSolver, self).parameter_update()
self.space_order = config.get_space_order(self.equations)
if 'kind' in config.parameters['time marching']['scheme']:
if config.parameters['time marching']['scheme']['kind'] == 'global timestepping':
self.local_timestepping = False
if 'multipoly' in config.parameters['time marching']:
if config.parameters['time marching']['multipoly'] and not self.lusgs:
self.PMG = True
if 'cfl for pmg levels' in config.parameters['time marching']:
tmp_list = config.parameters[
'time marching']['cfl for pmg levels']
if len(tmp_list) > self.space_order:
self.cfl.cfl_pmg = tmp_list
self.cfl.max_cfl = self.cfl.cfl_pmg[0]
if 'cfl transport for pmg levels' in config.parameters['time marching']:
tmp_list = config.parameters[
'time marching']['cfl transport for pmg levels']
if len(tmp_list) > self.space_order:
self.cfl.transport_cfl_pmg = tmp_list
self.cfl.transport_cfl = self.cfl.transport_cfl_pmg[0]
if 'multipoly cycle pattern' in config.parameters['time marching']:
self.pmg_pattern = config.parameters['time marching']['multipoly cycle pattern']
for cycle in self.pmg_pattern:
if cycle > self.space_order:
raise ValueError("Cycle pattern defined by \'multipoly cycle pattern\' incompatible with \'order\' - Please correct")
if len(self.pmg_pattern) > 1:
if self.pmg_pattern[0] > self.pmg_pattern[1]:
raise ValueError("\'multipoly cycle pattern\' does not appear to be correctly defined. E.g. for a V-cycle with P2 as finest level use [0,1,2,1,0]")
else:
# Default is a V-cycle
self.pmg_pattern = []
for index in xrange(self.space_order + 1):
self.pmg_pattern.append(index)
for index in xrange(self.space_order - 1, -1, -1):
self.pmg_pattern.append(index)
self.finite_volume_solver = False
def advance_solution(self, cfl, real_time_step, time_order, mesh_level):
"""
"""
if (self.PMG and self.solve_cycle <= self.multigrid_cycles):
# Perform time step at highest order (1)
self.copy_solution()
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[
rk_index], cfl.pmg_cfl(0), cfl.pmg_transport(0), real_time_step, time_order, self.safe_mode, 0)
if self.safe_mode and not valid:
raise InvalidSolution
computeForcing = True
for pmg_cycle in xrange(len(self.pmg_pattern) - 1):
if self.pmg_pattern[pmg_cycle] < self.pmg_pattern[pmg_cycle + 1] and computeForcing:
# Restrict solution and residual to poly level below
# Compute force terms
self.solver.restrictToPolynomialLevel(self.pmg_pattern[pmg_cycle], self.pmg_pattern[pmg_cycle + 1], real_time_step, time_order, cfl.pmg_cfl(0))
# March lower level (4)
self.copy_solution()
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[rk_index], cfl.pmg_cfl(pmg_cycle), cfl.pmg_transport(pmg_cycle), real_time_step, time_order, self.safe_mode, self.pmg_pattern[pmg_cycle + 1])
if self.safe_mode and not valid:
raise InvalidSolution
elif self.pmg_pattern[pmg_cycle] < self.pmg_pattern[pmg_cycle + 1] and not computeForcing:
self.solver.restrictSolutionOnlyToPolynomialLevel(self.pmg_pattern[pmg_cycle], self.pmg_pattern[pmg_cycle + 1])
# March lower level (4)
self.copy_solution()
self.update_halos(self.pmg_pattern[pmg_cycle + 1])
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[rk_index], cfl.pmg_cfl(pmg_cycle), cfl.pmg_transport(pmg_cycle), real_time_step, time_order, self.safe_mode, self.pmg_pattern[pmg_cycle + 1])
if self.safe_mode and not valid:
raise InvalidSolution
elif self.pmg_pattern[pmg_cycle] > self.pmg_pattern[pmg_cycle + 1]:
self.solver.addPolyCorrections(self.pmg_pattern[pmg_cycle], self.pmg_pattern[pmg_cycle + 1])
if self.pmg_pattern[pmg_cycle + 1] != 0:
computeForcing = False
# March lower level (4)
self.copy_solution()
self.update_halos(self.pmg_pattern[pmg_cycle + 1])
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[rk_index], cfl.pmg_cfl(pmg_cycle), cfl.pmg_transport(pmg_cycle), real_time_step, time_order, self.safe_mode, self.pmg_pattern[pmg_cycle + 1])
if self.safe_mode and not valid:
raise InvalidSolution
elif self.pmg_pattern[pmg_cycle] == self.pmg_pattern[pmg_cycle + 1]:
self.copy_solution()
self.update_halos(self.pmg_pattern[pmg_cycle + 1])
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[rk_index], cfl.pmg_cfl(pmg_cycle), cfl.pmg_transport(pmg_cycle), real_time_step, time_order, self.safe_mode, self.pmg_pattern[pmg_cycle + 1])
if self.safe_mode and not valid:
raise InvalidSolution
else:
self.copy_solution()
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index], self.rk.rkstage_scaling()[rk_index], self.rk.time_leveln_scaling()[
rk_index], cfl.cfl, cfl.transport, real_time_step, time_order, self.safe_mode, 0)
if self.safe_mode and not valid:
raise InvalidSolution
def march(self, rk_index, rk_coeff, tk_scale, tn_scale, cfl, cfl_transport, real_time_step, time_order, safe_mode, polyLevel):
config.logger.debug("Explicit March")
valid = self.solver.march(rk_index, rk_coeff, tk_scale, tn_scale, cfl,
cfl_transport,
real_time_step, time_order, self.space_order, polyLevel, safe_mode)
return valid
def copy_solution(self):
config.logger.debug("Copy solution")
self.solver.copy_solution()
def copy_time_history(self):
self.solver.copy_time_history()
def sync(self):
self.solver.sync()
def output(self, case_dir, case_name, surface_variable_list, volume_variable_list, real_time_cycle, solve_cycle, real_time, results_only):
self.solver.output(case_name, surface_variable_list, volume_variable_list,
real_time_cycle, solve_cycle, real_time, results_only)
def host_sync(self):
self.solver.host_sync()
def report(self):
"""
"""
return self.solver.report()
def calculate_rhs(self, real_time_step, time_order):
self.solver.calculate_rhs(real_time_step, time_order, self.space_order)
def add_stored_residual(self):
self.solver.add_stored_residual()
def store_residual(self):
self.solver.store_residual()
def add_const_time_derivative(self, real_time_step, time_order):
self.solver.add_const_time_derivative(real_time_step, time_order)
def restrict(self):
self.solver.restrict()
def update_halos(self, level):
self.solver.update_halos(level, False)
def prolongate(self, prolongation_factor, prolongation_transport_factor):
self.solver.prolongate(prolongation_factor,
prolongation_transport_factor)
def set_mesh_level(self, mesh_level):
self.solver.set_mesh_level(mesh_level) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/DGExplicitSolver.py | DGExplicitSolver.py |
import os
import threading
import time
import pandas as pd
from mpi4py import MPI
from colorama import Fore
import numpy
from . import utils
from .cfl import CFL
from zcfd.utils import config, TimeSpent, Parameters
class InvalidSolution(Exception):
pass
class ExplicitSolver(object):
def __init__(self, equations):
self.rk = []
self.cfl = CFL(0.0)
self.cfl_ramp_func = None
self.cycles = 0
self.num_mesh_levels = 1000
self.multigrid_cycles = 0
self.multigrid_ramp = 1.0
self.prolongation_factor = 0.0
self.prolongation_transport_factor = 0.0
self.total_time = 0.0
self.real_time_step = 0.0
self.real_time_cycle = 0.0
self.time_order = 0
self.unsteady_start = 0.0
self.surface_variable_list = 0.0
self.volume_variable_list = 0.0
self.output_frequency = 0.0
self.unsteady_restart_output_frequency = 1
self.report_frequency = 0.0
self.safe_mode = False
self.local_timestepping = True
self.lusgs = False
self.PMG = False
self.equations = equations
self.finite_volume_solver = True
self.include_backward_sweep = True
self.num_sgs_sweeps = 8
self.lusgs_epsilon = 1.0E-08
self.lusgs_add_relaxation = True
self.lusgs_jacobian_update_frequency = 1
self.filter_freq = 1000000
self.lusgs_fd_jacobian = False
self.lusgs_use_rusanov_jacobian = True
self.start_output_real_time_cycle = 0
self.output_real_time_cycle_freq = 1
def parameter_update(self):
self.cfl.max_cfl = config.parameters['time marching']['cfl']
self.cfl.min_cfl = self.cfl.max_cfl
self.cfl.transport_cfl = config.parameters[
'time marching'].get('cfl transport', self.cfl.max_cfl)
self.cfl.coarse_cfl = config.parameters[
'time marching'].get('cfl coarse', self.cfl.max_cfl)
if 'cfl ramp factor' in config.parameters['time marching']:
self.cfl.cfl_ramp = config.parameters[
'time marching']['cfl ramp factor'].get('growth', 1.0)
self.cfl.min_cfl = min(self.cfl.max_cfl, config.parameters[
'time marching']['cfl ramp factor']['initial'])
if 'ramp func' in config.parameters['time marching']:
self.cfl_ramp_func = config.parameters[
'time marching']['ramp func']
# Default to 5 stage RK
rk_scheme_data = '5'
# Check for overrides
if 'scheme' in config.parameters['time marching']:
if 'name' in config.parameters['time marching']['scheme']:
if config.parameters['time marching']['scheme']['name'] == 'euler':
rk_scheme_data = 'euler'
else:
if 'stage' in config.parameters['time marching']['scheme']:
rk_scheme_data = str(
config.parameters['time marching']['scheme']['stage'])
elif 'class' in config.parameters['time marching']['scheme']:
rk_scheme_data = config.parameters[
'time marching']['scheme']['class']
self.lusgs = config.parameters['time marching']['scheme']['name'] == 'lu-sgs'
if 'kind' in config.parameters['time marching']['scheme']:
if config.parameters['time marching']['scheme']['kind'] == 'global timestepping':
self.local_timestepping = False
self.rk = utils.getRungeKutta(rk_scheme_data)
self.cycles = config.parameters['time marching']['cycles']
self.num_mesh_levels = min(self.num_mesh_levels, config.parameters[
'time marching']['multigrid'])
self.multigrid_cycles = self.cycles
if 'multigrid cycles' in config.parameters['time marching']:
self.multigrid_cycles = config.parameters[
'time marching']['multigrid cycles']
if 'multigrid ramp' in config.parameters['time marching']:
self.multigrid_ramp = config.parameters[
'time marching']['multigrid ramp']
self.prolongation_factor = config.parameters[
'time marching'].get('prolong factor', 0.75)
self.prolongation_transport_factor = config.parameters[
'time marching'].get('prolong transport factor', 0.3)
if 'unsteady' in config.parameters['time marching']:
self.total_time = config.parameters[
'time marching']['unsteady']['total time']
self.real_time_step = config.parameters[
'time marching']['unsteady']['time step']
if 'order' in config.parameters['time marching']['unsteady']:
self.time_order = config.get_time_order(
config.parameters['time marching']['unsteady']['order'])
self.unsteady_start = config.parameters[
'time marching']['unsteady'].get('start', 0)
self.surface_variable_list = config.parameters[
'write output']['surface variables']
self.volume_variable_list = config.parameters[
'write output']['volume variables']
if self.real_time_step < self.total_time:
self.output_frequency = config.parameters['write output']['frequency']
else:
self.output_frequency = min(config.parameters['write output'][
'frequency'], max(self.cycles, 1))
if 'unsteady restart file output frequency' in config.parameters['write output']:
self.unsteady_restart_output_frequency = config.parameters['write output']['unsteady restart file output frequency']
if self.real_time_step < self.total_time:
self.report_frequency = config.parameters['report']['frequency']
else:
self.report_frequency = min(config.parameters['report'][
'frequency'], max(self.cycles, 1))
if 'start output real time cycle' in config.parameters['write output']:
self.start_output_real_time_cycle = config.parameters[
'write output']['start output real time cycle']
if 'output real time cycle frequency' in config.parameters['write output']:
self.output_real_time_cycle_freq = config.parameters[
'write output']['output real time cycle frequency']
if 'safe' in config.parameters:
self.safe_mode = config.parameters['safe']
if 'scheme' in config.parameters['time marching'] and 'name' in config.parameters['time marching']['scheme']:
if config.parameters['time marching']['scheme']['name'] == 'lu-sgs':
if 'lu-sgs' in config.parameters['time marching']:
if 'Include Backward Sweep' in config.parameters['time marching']['lu-sgs']:
self.include_backward_sweep = config.parameters[
'time marching']['lu-sgs']['Include Backward Sweep']
if 'Number Of SGS Cycles' in config.parameters['time marching']['lu-sgs']:
self.num_sgs_sweeps = config.parameters[
'time marching']['lu-sgs']['Number Of SGS Cycles']
if 'Jacobian Epsilon' in config.parameters['time marching']['lu-sgs']:
self.lusgs_epsilon = config.parameters[
'time marching']['lu-sgs']['Jacobian Epsilon']
if 'Include Relaxation' in config.parameters['time marching']['lu-sgs']:
self.lusgs_add_relaxation = config.parameters[
'time marching']['lu-sgs']['Include Relaxation']
if 'Jacobian Update Frequency' in config.parameters['time marching']['lu-sgs']:
self.lusgs_jacobian_update_frequency = config.parameters[
'time marching']['lu-sgs']['Jacobian Update Frequency']
if 'Finite Difference Jacobian' in config.parameters['time marching']['lu-sgs']:
self.lusgs_fd_jacobian = config.parameters['time marching'][
'lu-sgs']['Finite Difference Jacobian']
if 'Use Rusanov Flux For Jacobian' in config.parameters['time marching']['lu-sgs']:
self.lusgs_use_rusanov_jacobian = config.parameters['time marching'][
'lu-sgs']['Use Rusanov Flux For Jacobian']
if 'High Order Filter Frequency' in config.parameters['time marching']:
self.filter_freq = config.parameters[
'time marching']['High Order Filter Frequency']
# Ensure global timestepping state
if not self.local_timestepping:
self.num_real_time_cycle = 0
self.unsteady_start = 0
self.cycles = int(self.total_time / self.real_time_step)
self.num_mesh_levels = 1
def solve(self):
"""
Explicit Solver loop
"""
config.logger.debug("Solver solve")
self.report_list = [[]]
# Update parameters from control dictionary
self.parameter_update()
self.report_thread = 0
output_name = config.options.case_name
restart_realtime_cycle = config.cycle_info[0]
restart_solve_cycle = config.cycle_info[1]
total_cycles = restart_solve_cycle
# if restart_realtime_cycle > 0:
# total_cycles = self.unsteady_start + restart_realtime_cycle*self.cycles + restart_solve_cycle
# if restarting make sure the report file is appropriately truncated
self.report_initialise(output_name, total_cycles,
restart_realtime_cycle)
self.multigrid_cycles = max(self.multigrid_cycles, self.unsteady_start)
# Disable unsteady start if not on first real time step
if restart_realtime_cycle > 0:
self.unsteady_start = 0
config.logger.info("Starting Cycle info " +
str(restart_realtime_cycle) + " " + str(restart_solve_cycle))
# Check if we are restarting from a solution at the end of a real time
# step
if restart_solve_cycle >= max(self.cycles, self.unsteady_start):
restart_solve_cycle = 0
restart_realtime_cycle = restart_realtime_cycle + 1
config.logger.info(
Fore.RED + "Starting real time cycle: %s" % (restart_realtime_cycle))
self.copy_time_history()
config.start_time = time.time()
timeSpent = TimeSpent.TimeSpent()
# Real time step loop
self.real_time_cycle = restart_realtime_cycle
self.num_real_time_cycle = int(self.total_time / self.real_time_step)
# Check for steady run
if self.total_time == self.real_time_step:
self.num_real_time_cycle = 0
self.unsteady_start = 0
# Check for global timestep unsteady
if not self.local_timestepping:
self.num_real_time_cycle = 0
self.unsteady_start = 0
self.cycles = int(self.total_time / self.real_time_step)
self.num_mesh_levels = 1
while self.real_time_cycle < self.num_real_time_cycle + 1:
# for self.real_time_cycle in xrange(config.cycle_info[0],int(self.total_time/self.real_time_step)):
# Set volume for time step
# Reset unsteady start
if self.real_time_cycle > 0:
self.unsteady_start = 0
# Pseudo time step
self.solve_cycle = restart_solve_cycle + 1
while self.solve_cycle < max(self.cycles + 1, self.unsteady_start + 1):
if self.solve_cycle > self.multigrid_cycles:
self.num_mesh_levels = 1
if self.local_timestepping:
config.logger.info(Fore.RED + "Cycle %s (real time cycle: %s time: %s)" % (self.solve_cycle,
self.real_time_cycle,
self.real_time_cycle * self.real_time_step) + Fore.RESET)
else:
config.logger.info(Fore.RED + "Cycle %s (time: %s)" % (self.solve_cycle,
self.solve_cycle * self.real_time_step) + Fore.RESET)
self.cfl.update(self.solve_cycle, self.real_time_cycle, self.cfl_ramp_func)
config.logger.info(Fore.GREEN + "CFL %s (%s) - MG %s (coarse mesh: %s)" % (self.cfl.cfl, self.cfl.transport,
self.cfl.coarse,
self.num_mesh_levels - 1) + Fore.RESET)
timeSpent.start("solving")
try:
self.advance_multigrid(self.cfl, self.real_time_step,
min(self.time_order,
self.real_time_cycle), self.num_mesh_levels,
self.prolongation_factor, self.prolongation_transport_factor)
except InvalidSolution:
config.logger.info(
"Invalid solution detected: Writing out solution and terminating")
self.host_sync()
output_name = config.options.case_name + "_fail"
self.volume_variable_list.append('FailCell')
self.output(config.output_dir, output_name, self.surface_variable_list,
self.volume_variable_list, self.real_time_cycle, total_cycles,
self.local_timestepping and self.real_time_cycle * self.real_time_step or float(self.solve_cycle), False)
if isinstance(self.report_thread, threading.Thread):
self.report_thread.join()
self.sync()
config.logger.info("Terminating")
return
timeSpent.stop("solving")
total_cycles += 1
timeSpent.start("update source terms")
# Update source terms
self.update_source_terms(self.solve_cycle, (self.solve_cycle >= max(self.cycles, self.unsteady_start)))
timeSpent.stop("update source terms")
timeSpent.start("output")
# Sync to host for output and or reporting
if self.solve_cycle == 1 or self.solve_cycle >= max(self.cycles, self.unsteady_start) or (self.solve_cycle % self.output_frequency == 0 or self.solve_cycle % self.report_frequency == 0):
self.host_sync()
# Output restart results file
if self.solve_cycle % self.output_frequency == 0 or self.solve_cycle >= max(self.cycles, self.unsteady_start):
if self.real_time_cycle % self.unsteady_restart_output_frequency == 0:
results_only = True
output_name = config.options.case_name
self.output(config.output_dir, output_name, self.surface_variable_list, self.volume_variable_list, self.real_time_cycle, total_cycles, self.local_timestepping and self.real_time_cycle * self.real_time_step or float(self.solve_cycle), results_only)
else:
config.logger.info(Fore.RED + "Restart file not written this real time step. Reduce \'unsteady restart file output frequency\' if necessary. " + Fore.RESET)
# Output
if self.solve_cycle % self.output_frequency == 0 or self.solve_cycle >= max(self.cycles, self.unsteady_start):
# if self.solve_cycle > output_frequency:
# self.output_thread.join()
output_name = config.options.case_name
# self.output_thread = threading.Thread(name='output', target=self.output,args=(output_name,surface_variable_list,volume_variable_list,real_time_cycle,))
# self.output_thread.start()
if self.real_time_cycle >= self.start_output_real_time_cycle and self.real_time_cycle % self.output_real_time_cycle_freq == 0:
results_only = False
self.output(config.output_dir, output_name, self.surface_variable_list, self.volume_variable_list, self.real_time_cycle, total_cycles, self.local_timestepping and self.real_time_cycle * self.real_time_step or float(self.solve_cycle), results_only)
timeSpent.stop("output")
# Reporting
timeSpent.start("reporting")
if ((self.real_time_cycle == 0) and (self.solve_cycle == 1)) or self.solve_cycle >= max(self.cycles, self.unsteady_start) or self.solve_cycle % self.report_frequency == 0 or self.solve_cycle == 1:
if (self.solve_cycle > 1 or self.real_time_cycle > 0) and isinstance(self.report_thread, threading.Thread):
self.report_thread.join()
self.report_list.append(self.report())
output_name = config.options.case_name
self.report_thread = threading.Thread(name='report', target=self.report_output, args=(
output_name, total_cycles, self.real_time_cycle))
self.report_thread.start()
# If user has updated the control dictionary
# if self.solve_cycle >= max(self.cycles, self.unsteady_start):
# break
# Parse control dictionary if it has change
param = Parameters.Parameters()
if param.read_if_changed(config.options.case_name, config.controlfile):
config.logger.info(
Fore.MAGENTA + 'Control dictionary changed - parsing' + Fore.RESET)
self.parameter_update()
timeSpent.stop("reporting")
# Increment
self.solve_cycle += 1
total, report = timeSpent.generateReport()
config.logger.info(Fore.GREEN + "Timer: %s" %
total + Fore.RESET + report)
# self.host_sync()
# Time history
self.copy_time_history()
# Volume history
# If user has updated control dictionary
if self.real_time_cycle >= int(self.total_time / self.real_time_step):
break
# Ensure output freq clamped to inner cycles
self.output_frequency = min(config.parameters['write output'][
'frequency'], max(self.cycles, 1))
self.report_frequency = min(config.parameters['report'][
'frequency'], max(self.cycles, 1))
# Increment
self.real_time_cycle += 1
restart_solve_cycle = 0
# self.output_thread.join()
config.end_time = time.time()
if isinstance(self.report_thread, threading.Thread):
self.report_thread.join()
self.sync()
config.logger.info("Total Time: %s" % str(
config.end_time - config.start_time))
config.logger.info("Solver loop finished")
def advance_solution(self, cfl, real_time_step, time_order, mesh_level):
"""
"""
if self.lusgs and self.finite_volume_solver:
if mesh_level == 0:
self.advance_lusgs(cfl.cfl, cfl.transport, real_time_step,
time_order, self.solve_cycle, mesh_level)
else:
self.advance_lusgs(cfl.coarse, cfl.transport, real_time_step,
time_order, self.solve_cycle, mesh_level)
elif self.lusgs and mesh_level != 0:
self.advance_rk(cfl.cfl, cfl.transport,
cfl.coarse, real_time_step, time_order)
else:
if mesh_level == 0:
self.advance_rk(cfl.cfl, cfl.transport, real_time_step, time_order)
else:
self.advance_rk(cfl.coarse, min(
cfl.transport, cfl.coarse), real_time_step, time_order)
def advance_rk(self, cfl, cfl_transport, real_time_step, time_order):
"""
"""
self.copy_solution()
for rk_index in xrange(len(self.rk.coeff())):
# Runge-Kutta loop
valid = self.march(rk_index, self.rk.coeff()[rk_index],
self.rk.rkstage_scaling()[rk_index],
self.rk.time_leveln_scaling()[rk_index],
cfl, cfl_transport,
real_time_step, time_order, self.safe_mode, False)
if self.safe_mode and not valid:
raise InvalidSolution
def set_mesh_level(self, mesh_level):
raise NotImplementedError
def advance_point_implicit(self, cfl, cfl_transport, real_time_step, time_order, solve_cycle):
calculate_viscous = True
fd_jacobian = self.lusgs_fd_jacobian
use_rusanov = self.lusgs_use_rusanov_jacobian
updateJacobian = False
if (solve_cycle - 1) % self.lusgs_jacobian_update_frequency == 0:
updateJacobian = True
num_cell_colours = self.solver.get_number_cell_colours()
self.copy_solution()
if updateJacobian:
config.logger.info("Updating Jacobian")
self.solver.set_cell_colour(-1)
if not fd_jacobian:
self.solver.update_jacobian_LUSGS(True,
real_time_step, time_order,
1, # self.space_order,
cfl, cfl_transport,
self.lusgs_epsilon,
False, False,
use_rusanov)
else:
for current_colour in xrange(num_cell_colours):
config.logger.debug(
"Updating jacobian matrix for colour %s" % current_colour)
self.solver.set_cell_colour(current_colour)
self.solver.update_jacobian_LUSGS(True,
real_time_step, time_order,
1, # self.space_order,
cfl, cfl_transport,
self.lusgs_epsilon,
False, True, use_rusanov)
self.solver.set_cell_colour(-1)
self.solver.march_point_implicit(real_time_step,
cfl, cfl_transport,
self.safe_mode)
self.update_halos()
def advance_gmres(self, cfl, cfl_transport, real_time_step, time_order, solve_cycle):
raise NotImplementedError
def advance_lusgs(self, cfl, cfl_transport, real_time_step, time_order, solve_cycle, mesh_level=0):
calculate_viscous = True
fd_jacobian = self.lusgs_fd_jacobian
use_rusanov = self.lusgs_use_rusanov_jacobian
# Always calculate viscous fluxes if using fd jacobian
if fd_jacobian:
calculate_viscous = True
updateJacobian = False
if (solve_cycle - 1) % self.lusgs_jacobian_update_frequency == 0:
updateJacobian = True
num_cell_colours = self.solver.get_number_cell_colours()
self.copy_solution()
if updateJacobian:
if mesh_level == 0:
config.logger.info("Updating Jacobian")
self.solver.set_cell_colour(-1)
self.update_halos()
if not fd_jacobian:
self.solver.update_jacobian_LUSGS(True,
real_time_step, time_order,
1, # self.space_order,
cfl, cfl_transport,
self.lusgs_epsilon,
False, False,
use_rusanov)
else:
for current_colour in xrange(num_cell_colours):
config.logger.debug(
"Updating jacobian matrix for colour %s" % current_colour)
self.solver.set_cell_colour(current_colour)
self.solver.update_jacobian_LUSGS(True,
real_time_step, time_order,
1, # self.space_order,
cfl, cfl_transport,
self.lusgs_epsilon,
False, True, use_rusanov)
# self.set_mesh_level(mesh_number)
# self.solver.set_cell_colour(-1)
# self.solver.calculate_rhs(real_time_step,time_order,self.space_order)
# calculate_viscous = False
converged = numpy.array(0, 'i')
for sweep in xrange(self.num_sgs_sweeps):
if mesh_level == 0:
config.logger.info("Starting Sweep %s" % sweep)
self.solver.set_cell_colour(-1)
self.update_halos()
# Forward sweep
for current_colour in xrange(num_cell_colours):
config.logger.debug("Marching colour %s" % current_colour)
self.solver.set_cell_colour(current_colour)
valid = self.solver.march_colour_set_LUSGS(calculate_viscous,
real_time_step, time_order,
self.space_order,
cfl, cfl_transport,
self.lusgs_add_relaxation,
self.safe_mode,
0,
# (current_colour == 0) and (sweep > 0)
False,
)
if self.safe_mode and not valid:
raise InvalidSolution
if self.include_backward_sweep:
self.solver.set_cell_colour(-1)
self.update_halos()
# Backward sweep
for current_colour in reversed(xrange(num_cell_colours)):
config.logger.debug("Marching colour %s" % current_colour)
self.solver.set_cell_colour(current_colour)
valid = self.solver.march_colour_set_LUSGS(calculate_viscous,
real_time_step, time_order,
self.space_order,
cfl, cfl_transport,
self.lusgs_add_relaxation,
self.safe_mode,
1,
# (current_colour == num_cell_colours-1)
False,
)
if self.safe_mode and not valid:
raise InvalidSolution
self.solver.set_cell_colour(-1)
if mesh_level == 0:
self.host_sync()
new_report = self.report(True)
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
converged = numpy.array(0, 'i')
if rank == 0:
if sweep == 0:
first_report = new_report
if sweep > 0:
# Compare reports
count = 0
nvar = 0
for i in xrange(min(7, len(new_report))):
v = new_report[i][0]
if v.startswith('rho'):
nvar += 1
v1 = new_report[i][1]
v2 = old_report[i][1]
v3 = first_report[i][1]
if abs(v1 - v2) / (v3 + 1.0e-8) < 0.1 or v1 < 1.0e-8:
count += 1
if count == nvar:
converged = numpy.array(1, 'i')
old_report = new_report
MPI.COMM_WORLD.Bcast([converged, MPI.INT], root=0)
if numpy.asscalar(converged) == 1:
break
def advance_multigrid(self, cfl, real_time_step, time_order, num_mesh_levels,
prolongation_factor, prolongation_transport_factor):
"""
V-cycle multigrid with first solve on coarse mesh
"""
for mesh_level in xrange(num_mesh_levels - 1):
config.logger.debug("Solving on mesh level: %s" %
str(mesh_level + 1))
self.set_mesh_level(mesh_level)
# calculate residual on fine mesh
self.calculate_rhs(real_time_step, time_order)
# add stored residual to computed residual
if mesh_level != 0: # [CRrk + [RR - CR]
self.add_stored_residual()
self.set_mesh_level(mesh_level + 1)
# restrict to coarse mesh
self.restrict()
# update halos on coarse mesh
self.update_halos()
# calculate residual on coarse mesh using restricted flow
self.calculate_rhs(real_time_step, time_order)
# Store restricted residual - computed residual from restricted
# flow [RR - CR]
self.store_residual()
# solve on coarse mesh
self.advance_solution(cfl,
real_time_step, time_order,
mesh_level + 1)
if num_mesh_levels == 1:
self.set_mesh_level(0)
self.advance_solution(cfl,
real_time_step, time_order, 0)
if not self.finite_volume_solver and self.solve_cycle % self.filter_freq == 0:
self.solver.filter_solution()
for mesh_level in xrange(num_mesh_levels - 1, 0, -1):
config.logger.debug("Prolonging on mesh level: from %s to %s" % (
str(mesh_level), str(mesh_level - 1)))
self.set_mesh_level(mesh_level)
# prolongate the solution from coarse to fine
self.prolongate(prolongation_factor, prolongation_transport_factor)
self.set_mesh_level(mesh_level - 1)
# update halos on fine mesh
self.update_halos()
# advance solution
self.advance_solution(cfl,
real_time_step, time_order,
mesh_level - 1)
def report_output(self, output_name, total_cycles, real_time_cycle):
"""
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
new_file = os.path.isfile(output_name + '_report.csv')
f = open(output_name + '_report.csv', 'a')
if not new_file:
f.write('RealTimeStep Cycle' + ' '.join(' %s ' %
x[0] for x in self.report_list[-1]) + '\n')
f.write(str(real_time_cycle) + ' ' + str(total_cycles) +
' '.join(' %.8E ' % x[1] for x in self.report_list[-1]) + '\n')
f.close()
def report_initialise(self, output_name, total_cycles, real_time_cycle):
"""
"""
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if total_cycles > 0:
if rank == 0:
#
# Read the current report file, up to total_cycles
# and write to a restart file, then remove the file
# to start a new clean file
#
restart_casename = output_name
if 'restart casename' in config.parameters:
restart_casename = config.parameters['restart casename']
if os.path.isfile(restart_casename + '_report.restart.csv'):
from shutil import copyfile
copyfile(restart_casename + '_report.restart.csv',
output_name + '_report.restart.csv')
try:
if os.path.isfile(output_name + '_report.restart.csv'):
report_file = restart_casename + '_report.csv'
restart_report_file = output_name + '_report.restart.csv'
report_data = pd.read_csv(
report_file, sep=' ').dropna(axis=1)
# truncate
report_data = report_data.query(
'Cycle <= ' + str(total_cycles))
# read
restart_data = pd.read_csv(
restart_report_file, sep=' ').dropna(axis=1)
# truncate
restart_data = restart_data.query(
'Cycle <= ' + str(total_cycles))
# concat
# report_data = pd.concat([restart_data, report_data], ignore_index=True)
# Set column order for append. Careful to deal with case
# of user adding or removing reporting on restart
b = report_data.columns.tolist()
a = restart_data.columns.tolist()
col_list = a + [x for x in b if x not in a]
report_data = restart_data.append(
report_data, ignore_index=True)[col_list]
# write
report_data.to_csv(
restart_report_file, sep=' ', index=False,
float_format='%.8E', na_rep='NaN')
"""
f_report = open(restart_casename + '_report.csv', 'r')
lines = f_report.readlines()
f_report.close()
f_restart = open(restart_casename + '_report.restart.csv', 'r')
restart_lines = f_restart.readlines()
f_restart.close()
old_length = len(restart_lines[0].split())
new_length = len(lines[0].split())
if new_length > old_length:
f = open(output_name + '_report.restart.csv', 'w')
f.write(lines[0])
for line in restart_lines:
words = line.split()
if words[0] != 'RealTimeStep':
for i in range(old_length,new_length):
words.append(0.0)
f.write(" ".join(words))
for line in lines:
words = line.split()
if words[0] != 'RealTimeStep' and int(words[1]) <= total_cycles:
f.write(line)
f.close()
elif new_length < old_length:
f = open(output_name + '_report.restart.csv', 'a')
for line in lines:
words = line.split()
if words[0] != 'RealTimeStep' and int(words[1]) <= total_cycles:
for i in range(new_length,old_length):
words.append(0.0)
f.write(" ".join(words))
f.close()
else:
f = open(output_name + '_report.restart.csv', 'a')
for line in lines:
words = line.split()
if words[0] != 'RealTimeStep' and int(words[1]) <= total_cycles:
f.write(line)
f.close()
"""
else:
from shutil import copyfile
copyfile(restart_casename + '_report.csv',
output_name + '_report.restart.csv')
except Exception as e:
pass
else:
if rank == 0:
# delete .restart if not restarting
try:
os.remove(output_name + '_report.restart.csv')
except Exception as e:
pass
if rank == 0:
try:
os.remove(output_name + '_report.csv')
except Exception as e:
pass
# March solution in time
def march(self, rk_index, rk_coeff, rk_stage_scaling, rk_time_level_scaling, cfl, cfl_transport, real_time_step, time_order, safe_mode):
raise NotImplementedError
# Sync data from device to host
def sync(self):
raise NotImplementedError
def report(self, residual_only=False):
"""
"""
raise NotImplementedError
def output(self, case_dir, case_name, surface_variable_list, volume_variable_list, real_time_cycle,
solve_cycle,
real_time, results_only):
"""
"""
raise NotImplementedError
def host_sync(self):
raise NotImplementedError
def copy_solution(self):
raise NotImplementedError
def copy_time_history(self):
raise NotImplementedError
def calculate_rhs(self, real_time_step, time_order):
raise NotImplementedError
def add_stored_residual(self):
raise NotImplementedError
def store_residual(self):
raise NotImplementedError
def add_const_time_derivative(self, real_time_step, time_order):
raise NotImplementedError
def restrict(self):
raise NotImplementedError
def update_halos(self):
raise NotImplementedError
def prolongate(self, prolongation_factor, prolongation_transport_factor):
raise NotImplementedError
def update_source_terms(self, cycle, force):
# update source terms
self.solver.update_source_terms(cycle, force) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/ExplicitSolver.py | ExplicitSolver.py |
class RungeKutta:
def num_stages(self):
raise NotImplementedError
def coeff(self):
raise NotImplementedError
def time_leveln_scaling(self):
raise NotImplementedError
def rkstage_scaling(self):
raise NotImplementedError
class SingleStageRungeKutta(RungeKutta):
"""
"""
NUM_STAGES = 1
COEFFICIENTS = [1.0]
LEVELN_COEFFICIENTS = [1.0]
LEVELK_COEFFICIENTS = [0.0]
def num_stages(self):
return self.NUM_STAGES
def coeff(self):
return self.COEFFICIENTS
def time_leveln_scaling(self):
return self.LEVELN_COEFFICIENTS
def rkstage_scaling(self):
return self.LEVELK_COEFFICIENTS
class FourStageRungeKutta(RungeKutta):
"""
"""
NUM_STAGES = 4
COEFFICIENTS = [0.25, 0.5, 0.55, 1.0]
LEVELN_COEFFICIENTS = [1.0, 1.0, 1.0, 1.0]
LEVELK_COEFFICIENTS = [0.0, 0.0, 0.0, 0.0]
def num_stages(self):
return self.NUM_STAGES
def coeff(self):
return self.COEFFICIENTS
def time_leveln_scaling(self):
return self.LEVELN_COEFFICIENTS
def rkstage_scaling(self):
return self.LEVELK_COEFFICIENTS
class FiveStageRungeKutta(RungeKutta):
"""
"""
NUM_STAGES = 5
COEFFICIENTS = [0.0695, 0.1602, 0.2898, 0.506, 1.0]
LEVELN_COEFFICIENTS = [1.0, 1.0, 1.0, 1.0, 1.0]
LEVELK_COEFFICIENTS = [0.0, 0.0, 0.0, 0.0, 0.0]
def num_stages(self):
return self.NUM_STAGES
def coeff(self):
return self.COEFFICIENTS
def time_leveln_scaling(self):
return self.LEVELN_COEFFICIENTS
def rkstage_scaling(self):
return self.LEVELK_COEFFICIENTS
class ThreeStageThirdOrderTVDRungeKutta(RungeKutta):
"""
"""
NUM_STAGES = 3
COEFFICIENTS = [1.0, 0.25, 0.6666667]
LEVELN_COEFFICIENTS = [1.0, 0.75, 0.3333333]
LEVELK_COEFFICIENTS = [0.0, 0.25, 0.6666667]
def num_stages(self):
return self.NUM_STAGES
def coeff(self):
return self.COEFFICIENTS
def time_leveln_scaling(self):
return self.LEVELN_COEFFICIENTS
def rkstage_scaling(self):
return self.LEVELK_COEFFICIENTS | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/utils/RungeKutta.py | RungeKutta.py |
import importlib
from zcfd.utils import config
from ctypes import cdll
def load_solver_runtime(params, solver_params):
required_params = ("device", "medium", "type", "dg")
if not all(k in params for k in required_params):
raise ValueError(
"All required parameters not passed to load_solver_runtime")
if params['dg']:
module_name = "libzCFDDGSolver"
else:
module_name = "libzCFDSolver"
module_name += params["type"]
if params["medium"] == "air":
module_name += "AIR"
elif params['medium'] == "water":
module_name += "WATER"
else:
raise ValueError("Unknown medium: %s" % params['medium'])
# For CUDA we have a separate library
# The MIC Offload is compiled into the INTEL build
module_name_stub = module_name
if params['device'] == "gpu":
module_name += '_CUDA'
else:
module_name += '_INTEL'
config.logger.info(" Loading Library: " + module_name)
try:
# Fix for Linux TLS issue on certain machines
metislib = cdll.LoadLibrary("libmetis.so")
solverlib = importlib.import_module(module_name)
except Exception as e:
config.logger.info(" Unable to load " + module_name)
config.logger.info(" Error: " + str(e))
# Failed to find library - try intel
if params['device'] == "gpu":
module_name = module_name_stub
module_name += '_INTEL'
config.logger.info(" Loading Library: " + module_name)
solverlib = importlib.import_module(module_name)
else:
raise e
set_parameters = getattr(solverlib, "set_parameters")
set_parameters(solver_params)
if params['dg']:
solver_type = "DGExplicitSolver"
DGExplicitSolver = getattr(solverlib, solver_type)
return DGExplicitSolver(params['space_order'], (params["device"] == "cpu"))
else:
solver_type = "ExplicitSolver"
ExplicitSolver = getattr(solverlib, solver_type)
return ExplicitSolver((params["device"] == "cpu")) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/utils/RuntimeLoader.py | RuntimeLoader.py |
line_evenly_spaced = {
'P1': [-1.0, 1.0],
'P2': [-1.0, 0.0, 1.0],
'P3': [-1.0, -0.33333333, 0.33333333, 1.0],
'P4': [-1.0, -0.5, 0.0, 0.5, 1.0]
}
line_gauss_lobatto = {
'P1': [-0.57735027, 0.57735027],
'P2': [-0.77459667, 0.0, 0.77459667],
'P3': [-0.86113631, -0.33998104, 0.33998104, 0.86113631],
'P4': [-0.90617985, -0.53846931, 0.0, 0.53846931, 0.90617985]
}
line_gauss_legendre_lobatto = {
'P1': [-1.0, 1.0],
'P2': [-1.0, 0.0, 1.0],
'P3': [-1.0, -0.44721360, 0.44721360, 1.0],
'P4': [-1.0, -0.65465367, 0.0, 0.65465367, 1.0]
}
tri_shunn_ham = {
'P1': [-0.66666666, 0.33333333, 0.33333333, -0.66666666, -0.66666666, -0.66666666],
'P2': [-0.81684757, 0.63369515, 0.63369515, -0.81684757, -0.81684757, -0.81684757, -0.10810302, -0.78379396, -0.78379396, -0.10810302, -0.10810302, -0.10810302],
'P3': [-0.33333333, -0.33333333, -0.88887190, 0.77774379, 0.77774379, -0.88887190, -0.88887190, -0.88887190, 0.26842150, -0.40893258, -0.85948892, -0.40893258, -0.40893258, -0.85948892, 0.26842150, -0.85948892, -0.85948892, 0.26842150, -0.40893258, 0.26842150]
}
tri_evenly_spaced = {
'P1': [-1.0, -1.0, 1.0, -1.0, -1.0, 1.0],
'P2': [-1.0, -1.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, 0.0, 0.0, -1.0, 1.0],
'P3': [-1, -1, -0.33333334, -1, 0.33333334, -1, 1, -1, -1, -0.33333334, -0.33333334, -0.33333334, 0.33333334, -0.33333334, -1, 0.33333334, -0.33333334, 0.33333334, -1, 1]
}
tet_shunn_ham = {
'P1': [-0.72360680, -0.72360680, 0.17082039, 0.17082039, -0.72360680, -0.72360680, -0.72360680, 0.17082039, -0.72360680, -0.72360680, -0.72360680, -0.72360680],
'P2': [-0.85233020, -0.85233020, 0.55699059, 0.55699059, -0.85233020, -0.85233020, -0.85233020, 0.55699059, -0.85233020, -0.85233020, -0.85233020, -0.85233020, -0.18751131, -0.81248869, -0.18751131, -0.81248869, -0.18751131, -0.18751131, -0.18751131, -0.81248869, -0.81248869, -0.81248869, -0.18751131, -0.81248869, -0.81248869, -0.81248869, -0.18751131, -0.18751131, -0.18751131, -0.81248869],
'P3': [-0.93529481, -0.93529481, 0.80588443, 0.80588443, -0.93529481, -0.93529481, -0.93529481, 0.80588443, -0.93529481, -0.93529481, -0.93529481, -0.93529481, -0.38046139, -0.38046139, -0.85861583, -0.85861583, -0.38046139, -0.38046139, -0.38046139, -0.85861583, -0.38046139, -0.38046139, -0.38046139, -0.38046139, 0.23319307, -0.87927912, -0.47463483, 0.23319307, -0.87927912, -0.87927912, -0.87927912, -0.87927912, 0.23319307, -0.47463483, 0.23319307, -0.87927912, -0.87927912, -0.47463483, 0.23319307, -0.87927912, 0.23319307, -0.87927912, -0.47463483, -0.87927912, 0.23319307, -0.87927912, -0.47463483, -0.87927912, -0.87927912, -0.87927912, -0.47463483, -0.87927912, 0.23319307, -0.47463483, -0.47463483, -0.87927912, -0.87927912, 0.23319307, -0.47463483, -0.87927912]
}
tet_evenly_spaced = {
'P1': [-1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 1.0],
'P2': [-1.0, -1.0, -1.0, 0.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 0.0, -1.0, 0.0, 0.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0, 0.0, -1.0, -1.0, 1.0],
'P3': [-1, -1, -1, -0.33333334, -1, -1, 0.33333334, -1, -1, 1, -1, -1, -1, -0.33333334, -1, -0.33333334, -0.33333334, -1, 0.33333334, -0.33333334, -1, -1, 0.33333334, -1, -0.33333334, 0.33333334, -1, -1, 1, -1, -1, -1, -0.33333334, -0.33333334, -1, -0.33333334, 0.33333334, -1, -0.33333334, -1, -0.33333334, -0.33333334, -0.33333334, -0.33333334, -0.33333334, -1, 0.33333334, -0.33333334, -1, -1, 0.33333334, -0.33333334, -1, 0.33333334, -1, -0.33333334, 0.33333334, -1, -1, 1]
}
pyramid_evenly_spaced = {
'P1': [0.0, 0.0, 1.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0],
'P2': [0.0, 0.0, 1.0, -1.0, -1.0, -1.0, 0.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 0.0, -1.0, 0.0, 0.0, -1.0, 1.0, 0.0, -1.0, -1.0, 1.0, -1.0, 0.0, 1.0, -1.0, 1.0, 1.0, -1.0, -0.5, -0.5, 0.0, 0.5, -0.5, 0.0, -0.5, 0.5, 0.0, 0.5, 0.5, 0.0],
'P3': []
}
pyramid_gauss_legendre = {
'P1': [-0.45534180126147954892124105691766, -0.45534180126147954892124105691766, -0.577350269189625764509148780502, 0.45534180126147954892124105691766, -0.45534180126147954892124105691766, -0.577350269189625764509148780502, -0.45534180126147954892124105691766, 0.45534180126147954892124105691766, -0.577350269189625764509148780502, 0.45534180126147954892124105691766, 0.45534180126147954892124105691766, -0.577350269189625764509148780502, 0.0, 0.0, 0.577350269189625764509148780502],
'P2': [-0.68729833462074168851792653997805, -0.68729833462074168851792653997805, -0.774596669241483377035853079956, 0.0, -0.68729833462074168851792653997805, -0.774596669241483377035853079956, 0.68729833462074168851792653997805, -0.68729833462074168851792653997805, -0.774596669241483377035853079956, -0.68729833462074168851792653997805, 0.0, -0.774596669241483377035853079956, 0.0, 0.0, -0.774596669241483377035853079956, 0.68729833462074168851792653997805, 0.0, -0.774596669241483377035853079956, -0.68729833462074168851792653997805, 0.68729833462074168851792653997805, -0.774596669241483377035853079956, 0.0, 0.68729833462074168851792653997805, -0.774596669241483377035853079956, 0.68729833462074168851792653997805, 0.68729833462074168851792653997805, -0.774596669241483377035853079956, -0.28867513459481288225457439025098, -0.28867513459481288225457439025098, 0.0, 0.28867513459481288225457439025098, -0.28867513459481288225457439025098, 0.0, -0.28867513459481288225457439025098, 0.28867513459481288225457439025098, 0.0, 0.28867513459481288225457439025098, 0.28867513459481288225457439025098, 0.0, 0.0, 0.0, 0.774596669241483377035853079956],
'P3': []
}
pyramid_gauss_legendre_lobatto = {
'P1': [0.0, 0.0, 1.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0],
'P2': [0.0, 0.0, 1.0, -1.0, -1.0, -1.0, 0.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 0.0, -1.0, 0.0, 0.0, -1.0, 1.0, 0.0, -1.0, -1.0, 1.0, -1.0, 0.0, 1.0, -1.0, 1.0, 1.0, -1.0, -0.5, -0.5, 0.0, 0.5, -0.5, 0.0, -0.5, 0.5, 0.0, 0.5, 0.5, 0.0],
'P3': [0.0, 0.0, 1.0, -1.0, -1.0, -1.0, -0.44721359549995793928183473374626, -1.0, -1.0, 0.44721359549995793928183473374626, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -0.44721359549995793928183473374626, -1.0, -0.44721359549995793928183473374626, -0.44721359549995793928183473374626, -1.0, 0.44721359549995793928183473374626, -0.44721359549995793928183473374626, -1.0, 1.0, -0.44721359549995793928183473374626, -1.0, -1.0, 0.44721359549995793928183473374626, -1.0, -0.44721359549995793928183473374626, 0.44721359549995793928183473374626, -1.0, 0.44721359549995793928183473374626, 0.44721359549995793928183473374626, -1.0, 1.0, 0.44721359549995793928183473374626, -1.0, -1.0, 1.0, -1.0, -0.44721359549995793928183473374626, 1.0, -1.0, 0.44721359549995793928183473374626, 1.0, -1.0, 1.0, 1.0, -1.0, -0.723606797749978969640917366873, -0.723606797749978969640917366873, -0.447213595499957939281834733746, 0.0, -0.723606797749978969640917366873, -0.447213595499957939281834733746, 0.723606797749978969640917366873, -0.723606797749978969640917366873, -0.447213595499957939281834733746, -0.723606797749978969640917366873, 0.0, -0.447213595499957939281834733746, 0.0, 0.0, -0.447213595499957939281834733746, 0.723606797749978969640917366873, 0.0, -0.447213595499957939281834733746, -0.723606797749978969640917366873, 0.723606797749978969640917366873, -0.447213595499957939281834733746, 0.0, 0.723606797749978969640917366873, -0.447213595499957939281834733746, 0.723606797749978969640917366873, 0.723606797749978969640917366873, -0.447213595499957939281834733746, -0.276393202250021030359082633127, -0.276393202250021030359082633127, 0.447213595499957939281834733746, 0.276393202250021030359082633127, -0.276393202250021030359082633127, 0.447213595499957939281834733746, -0.276393202250021030359082633127, 0.276393202250021030359082633127, 0.447213595499957939281834733746, 0.276393202250021030359082633127, 0.276393202250021030359082633127, 0.447213595499957939281834733746],
}
nodal_locations_default = {
'Nodal Locations': {
'Line': line_gauss_lobatto,
'Tetrahedron': tet_shunn_ham,
'Tri': tri_shunn_ham,
'Pyramid': pyramid_gauss_legendre_lobatto
}
} | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/solvers/utils/DGNodalLocations.py | DGNodalLocations.py |
import sys
import os
import yaml
from zcfd.utils import config
from zcfd_validate import validate
from voluptuous import error
from colorama import Fore
# Timestamp of parameter file when file was last read
last_timestamp = 0
class Parameters:
default = '''
import zutil
parameters = {
# units for dimensional quantities
'units' : 'SI',
# reference state
'reference' : 'IC_1',
# restart from previous solution
'restart' : False,
'partitioner' : 'metis',
# time marching properties
'time marching' : {
'unsteady' : {
'total time' : 1.0,
'time step' : 1.0,
'order' : 'second',
'start' : 3000,
},
'scheme' : {
'name' : 'runge kutta',
'stage': 5,
'kind' : 'local timestepping',
},
# multigrid levels including fine mesh
'multigrid' : 4,
'cfl': 2.0,
'cycles' : 1000,
},
'equations' : 'euler',
'euler' : {
'order' : 'second',
'limiter' : 'vanalbada',
'precondition' : True,
},
'viscous' : {
'order' : 'second',
'limiter' : 'vanalbada',
'precondition' : True,
},
'RANS' : {
'order' : 'second',
'limiter' : 'vanalbada',
'precondition' : True,
'turbulence' : {
'model' : 'sst',
},
},
'IC_1' : {
'temperature':273.15,
'pressure':101325.0,
'V': {
'vector' : [1.0,0.0,0.0],
# Used to define velocity mag if specified
'Mach' : 0.5,
},
#'viscosity' : 0.0,
'Reynolds No' : 1.0e6,
'Reference Length' : 1.0,
'turbulence intensity': 0.01,
'eddy viscosity ratio': 0.1,
},
'IC_2' : {
'reference' : 'IC_1',
# total pressure/reference static pressure
'total pressure ratio' : 1.0,
# total temperature/reference static temperature
'total temperature ratio' : 1.0,
},
'IC_3' : {
'reference' : 'IC_1',
# static pressure/reference static pressure
'static pressure ratio' : 1.0,
},
'BC_1' : {
'ref' : 7,
'type' : 'symmetry',
},
'BC_2' : {
'ref' : 3,
'type' : 'wall',
'kind' : 'slip',
},
'BC_3' : {
'ref' : 9,
'type' : 'farfield',
'condition' : 'IC_1',
'kind' : 'riemann',
},
'BC_4' : {
'ref' : 4,
'type' : 'inflow',
'kind' : 'default',
'condition' : 'IC_2',
},
'BC_5' : {
'ref' : 5,
'type' : 'outflow',
'kind' : 'pressure',
'condition' : 'IC_3',
},
'write output' : {
'format' : 'vtk',
'surface variables': ['V','p','T','rho','cp'],
'volume variables': ['V','p','T','rho'],
'frequency' : 100,
},
'report' : {
'frequency' : 10,
},
}
############################
#
# Variable list
#
# var_1 to var_n
# p,pressure
# T, temperature
#
############################
'''
def read(self, configfile):
config.logger.debug(__file__ + " " + __name__ + ":read")
sys.path.insert(0, os.getcwd())
configmodule = __import__(configfile)
old_dict = configmodule.__dict__.copy()
try:
configmodule = reload(configmodule)
except Exception as e:
config.logger.error(Fore.RED + "Error updating configuration:" + str(e) + Fore.RESET)
configmodule.__dict__.update(old_dict)
parameters = getattr(sys.modules[configfile], 'parameters')
# TODO Need to improve this
unvalidated_parameters = dict(config.parameters, **parameters)
# Validate
try:
config.parameters = validate(unvalidated_parameters)
except Exception as e:
config.logger.error(Fore.RED + "Error validating parameters dictionary" + Fore.RESET)
if isinstance(e, error.MultipleInvalid):
for err in reversed(e.errors):
if err.path[0] in unvalidated_parameters.keys():
config.logger.error("Parameters dictionary key" + Fore.CYAN + " \'%s\' " % err.path[0] + Fore.RESET +
"has value" + Fore.CYAN + " %s:" % unvalidated_parameters[err.path[0]] + Fore.RED + " %s" % err.error_message + Fore.RESET)
else:
config.logger.error("Key" + Fore.CYAN + " \'%s\': " % err.path[0] + Fore.RED +
" %s" % err.error_message + Fore.RESET)
else:
config.logger.error(e)
exit(1)
props = os.stat(config.controlfile)
global last_timestamp
last_timestamp = props.st_mtime
config.logger.debug("Parameters: \n " + '\n '.join(
['%s: %s' % (key, value) for (key, value) in config.parameters.items()]))
def read_if_changed(self, casename, configfile):
global last_timestamp
props = os.stat(configfile)
if props.st_mtime > last_timestamp:
self.read(casename)
return True
return False
def write(self, configfile):
if config.logger != 0:
config.logger.debug(__file__ + " " + __name__ + ":write")
# Open control file for writing
fp = open(configfile, "w")
# Write control file in yaml format
fp.write(self.default)
def read_yaml(self):
config.logger.debug(__file__ + " " + __name__ + ":read_yaml")
# Open control file for reading
fp = open(config.controlfile, "r")
config.parameters = yaml.load(fp)
# print config.parameters['velocity'][0]
def write_yaml(self):
if config.logger != 0:
config.logger.debug(__file__ + " " + __name__ + ":write_yaml")
# Open control file for writing
fp = open(config.controlfile, "w")
# Write control file in yaml format
yaml.dump(config.parameters, fp, indent=2)
def create_native(self):
from cgen import (
ArrayOf, POD,
Block,
For, Statement, Struct)
from cgen import dtype_to_ctype
import numpy
members = []
code = []
for pk, pv in config.parameters.iteritems():
if isinstance(pv, int):
members.append(POD(numpy.int, pk))
code.append(Statement("params.%s = extract<%s>(cppdict[\"%s\"])" % (
pk, dtype_to_ctype(numpy.int), pk)))
elif isinstance(pv, float):
members.append(POD(numpy.float64, pk))
code.append(Statement("params.%s = extract<%s>(cppdict[\"%s\"])" % (
pk, dtype_to_ctype(numpy.float64), pk)))
elif isinstance(pv, list):
if isinstance(pv[0], int):
members.append(ArrayOf(POD(numpy.int, pk), len(pv)))
code.append(Block([Statement("list v = extract<%s>(cppdict[\"%s\"])" % (list.__name__, pk)),
For("unsigned int i = 0",
"i<len(v)",
"++i",
Statement("params.%s[i] = extract<%s>(v[i])" % (
pk, dtype_to_ctype(numpy.int)))
),
]))
elif isinstance(pv[0], float):
members.append(ArrayOf(POD(numpy.float64, pk), len(pv)))
code.append(Block([Statement("list v = extract<%s>(cppdict[\"%s\"])" % (list.__name__, pk)),
For("unsigned int i = 0",
"i < len(v)",
"++i",
Block([Statement("params.%s[i] = extract<%s>(v[i])" % (pk, dtype_to_ctype(numpy.float64))),
Statement(
"//std::cout << params.%s[i] << std::endl" % (pk))
])
),
]))
mystruct = Struct('Parameters', members)
mycode = Block(code)
# print mystruct
# print mycode
from jinja2 import Template
tpl = Template("""
#include <boost/python.hpp>
#include <boost/python/object.hpp>
#include <boost/python/extract.hpp>
#include <boost/python/list.hpp>
#include <boost/python/dict.hpp>
#include <boost/python/str.hpp>
#include <stdexcept>
#include <iostream>
{{my_struct}}
Parameters params;
void CopyDictionary(boost::python::object pydict)
{
using namespace boost::python;
extract< dict > cppdict_ext(pydict);
if(!cppdict_ext.check()){
throw std::runtime_error(
"PassObj::pass_dict: type error: not a python dict.");
}
dict cppdict = cppdict_ext();
list keylist = cppdict.keys();
{{my_extractor}}
}
BOOST_PYTHON_MODULE({{my_module}})
{
boost::python::def("copy_dict", &CopyDictionary);
}
""")
rendered_tpl = tpl.render(
my_module="NativeParameters", my_extractor=mycode, my_struct=mystruct)
# print rendered_tpl
from codepy.toolchain import NVCCToolchain
import codepy.toolchain
kwargs = codepy.toolchain._guess_toolchain_kwargs_from_python_config()
# print kwargs
kwargs["cc"] = "nvcc"
# kwargs["cflags"]=["-m64","-x","cu","-Xcompiler","-fPIC","-ccbin","/opt/local/bin/g++-mp-4.4"]
kwargs["cflags"] = ["-m64", "-x", "cu", "-Xcompiler", "-fPIC"]
kwargs["include_dirs"].append("/usr/local/cuda/include")
kwargs["defines"] = []
kwargs["ldflags"] = ["-shared"]
# kwargs["libraries"]=["python2.7"]
kwargs["libraries"] = ["python2.6"]
print kwargs
toolchain = NVCCToolchain(**kwargs)
from codepy.libraries import add_boost_python
add_boost_python(toolchain)
from codepy.jit import extension_from_string
mymod = extension_from_string(
toolchain, "NativeParameters", rendered_tpl)
mymod.copy_dict(config.parameters) | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/utils/Parameters.py | Parameters.py |
from zcfd.utils.Logger import Logger
zmq = 0 # Message Q connector
logger = 0 # = Logger(0)
filelogger = 0
streamlogger = 0
options = 0 # Output from argparser
controlfile = 0
parameters = {}
solver = 0
solver_handle = 0
solver_names = 0
device = 0
output_dir = 0
cycle_info = 0
start_time = 0
end_time = 0
def get_space_order(equations):
global parameters
if equations.startswith('DG'):
space_order = 0
if parameters[equations]['order'] == 'zero' or parameters[equations]['order'] == 0:
space_order = 0
if parameters[equations]['order'] == 'first' or parameters[equations]['order'] == 1:
space_order = 1
if parameters[equations]['order'] == 'second' or parameters[equations]['order'] == 2:
space_order = 2
if parameters[equations]['order'] == 'third' or parameters[equations]['order'] == 3:
space_order = 3
if parameters[equations]['order'] == 'fourth' or parameters[equations]['order'] == 4:
space_order = 4
if parameters[equations]['order'] == 'fifth' or parameters[equations]['order'] == 5:
space_order = 5
if parameters[equations]['order'] == 'sixth' or parameters[equations]['order'] == 6:
space_order = 6
if parameters[equations]['order'] == 'seventh' or parameters[equations]['order'] == 7:
space_order = 7
if parameters[equations]['order'] == 'eighth' or parameters[equations]['order'] == 8:
space_order = 8
else:
space_order = 2
if parameters[equations]['order'] == 'first':
space_order = 1
if parameters[equations]['order'] == 'euler_second':
space_order = 3
return space_order
def get_time_order(time_order):
if isinstance(time_order, int):
return time_order
else:
if time_order == 'first':
return 1
if time_order == 'second':
return 2
# Default to first order
return 1 | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/utils/config.py | config.py |
from __future__ import division
import time
class Stopwatch(object):
"""
The Stopwatch class provides a timer that can be started and stopped
multiple times and which records the accumulated elapsed time.
It is most conveniently used via the TimeSpent class which automatically
creates Stopwatch instances and associates user-supplied names with them.
"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0
self.startTime = None
self.elapsedMillis = 0
def start(self):
if self.isRunning():
return # calling 'start' on a running Stopwatch has no effect
self.startTime = time.time()
self.count += 1
def stop(self):
self.elapsedMillis = self.getElapsedMillis()
self.startTime = None # indicates that it isn't running
def isRunning(self):
return (self.startTime is not None)
def getElapsedMillis(self):
elapsedMillis = self.elapsedMillis
if self.startTime is not None: # it's running
elapsedMillis += ((time.time() - self.startTime) * 1000)
return elapsedMillis
def getAvgElapsedMillis(self):
if self.count == 0:
return 0.0
return self.getElapsedMillis() / self.count
def getElapsedTimeDayHourMinSec(self):
seconds = self.getElapsedMillis() / 1000
days, seconds = divmod(seconds, 24 * 60 * 60)
hours, seconds = divmod(seconds, 60 * 60)
minutes, seconds = divmod(seconds, 60)
return days, hours, minutes, seconds
# -----------------------------------------------------------------------------
class TimeSpent(object):
"""
The TimeSpent class provides a simple way of intrumenting Python code
to find out how much (wallclock) time is used by various parts of the code.
To use it, you first create an instance of this class and then bracket the
sections of code you want to measure with calls to the 'start' and 'stop'
methods, supplying a name of your choice as the argument to these methods.
For each distinct name, a Stopwatch instance is automatically created and it
keeps track of the accumulated time between the calls to 'start' and 'stop'.
At some suitable point in your program, call the 'generateReport' or
'generateReportAndReset' method to get a string with a report of the times
for each of the stopwatches. Each stopwatch also keeps track of how many
times it was started and this count is part of the report.
Example of use:
timeSpent = TimeSpent()
timeSpent.start("A")
func1()
func2()
timeSpent.stop("A")
boringFunc()
timeSpent.start("B")
func3()
timeSpent.stop("B")
timeSpent.start("A")
func4()
func5()
timeSpent.stop("A")
print timeSpent.generateReportAndReset()
"""
_globalInstance = None
@staticmethod
def getGlobalInstance():
"""
This method can be used to obtain a global instance of TimeSpent
that is shared across all parts of your program.
It is an alternative to creating your own TimeSpent instances as needed.
Example of use:
timeSpent = TimeSpent.getGlobalInstance()
timeSpent.start("A")
... etc
"""
if TimeSpent._globalInstance is None:
TimeSpent._globalInstance = TimeSpent()
return TimeSpent._globalInstance
def __init__(self):
self.stopwatches = dict() # indexed by name
self.reportStopwatch = Stopwatch()
self.reportStopwatch.start()
def _getStopwatch(self, name, createIfNeeded=True):
"""
Return the stopwatch with the specified name.
If there is no stopwatch with that name and 'createIfNeeded' is True,
a new stopwatch with that name is created (and returned).
NB: this is intended only for internal use.
"""
if createIfNeeded and name not in self.stopwatches:
self.stopwatches[name] = Stopwatch()
return self.stopwatches.get(name, None)
def start(self, name):
"""
Start the stopwatch with the specified name.
If there is no stopwatch with that name, one is created (and started).
"""
stopwatch = self._getStopwatch(name)
if stopwatch.isRunning():
print "WARNING: start(%s) called when stopwatch is already running" % name
stopwatch.start()
def stop(self, name):
stopwatch = self._getStopwatch(name, False)
if stopwatch is not None:
stopwatch.stop()
else:
print "WARNING: unknown stopwatch name passed to 'stop' (%s)" % name
def getElapsedMillis(self, name):
stopwatch = self._getStopwatch(name)
return stopwatch.getElapsedMillis()
def getAvgElapsedMillis(self, name):
stopwatch = self._getStopwatch(name)
return stopwatch.getAvgElapsedMillis()
def getCount(self, name):
stopwatch = self._getStopwatch(name)
return stopwatch.count
def reset(self, name):
# remove the stopwatch since it will get recreated if needed
if name in self.stopwatches:
del self.stopwatches[name]
def resetAll(self):
# remove the stopwatches since they will get recreated if needed
self.stopwatches.clear()
def generateReport(self):
self.reportStopwatch.stop()
secondsSinceReport = self.reportStopwatch.getElapsedMillis() / 1000.0
self.reportStopwatch.reset()
self.reportStopwatch.start()
total = ("Elapsed seconds: %.1f\n" % secondsSinceReport)
report = "Breakdown of time spent:\n"
for name in sorted(self.stopwatches.keys()):
stopwatch = self.stopwatches[name]
elapsed = stopwatch.getElapsedTimeDayHourMinSec()
report += ("%s: %.0f days %.0f hrs %.0f mins %.0f s (count: %d cycles, avg: %.1f ms)\n"
% (name,
elapsed[0], elapsed[1], elapsed[2], elapsed[3],
stopwatch.count,
stopwatch.getAvgElapsedMillis()))
return total, report
def generateReportAndReset(self):
total, report = self.generateReport()
self.resetAll()
return total + report
# ----------------------------------------------------------------------------- | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/utils/TimeSpent.py | TimeSpent.py |
import argparse
class ZOption:
def parse(self, suppress=False):
# config.logger.debug('Start ZOption parse')
if suppress:
usage = argparse.SUPPRESS
parser = argparse.ArgumentParser(usage=usage, add_help=False)
else:
usage = "usage: %(prog)s [options] PROBLEM-NAME"
parser = argparse.ArgumentParser(
usage=usage, description="zCFD command line arguments", add_help=True)
parser.add_argument('problem_name', nargs='?', default=None)
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument("--mq", dest='mq', action='store_const',
const=True, default=False, help='Use message queue')
parser.add_argument("-c", "--case-name", dest="case_name",
metavar="CASE-NAME", default=None, help="Case name")
parser.add_argument("-d", "--device", dest="device",
metavar="DEVICE", default="cpu", help="Execution mode: cpu or gpu [default: %(default)s]")
# parser.add_argument("-l","--loglevel", dest="loglevel",
# metavar="LOGLEVEL", default="INFO", help=argparse.SUPPRESS)
args = parser.parse_args()
# print args.problem
# print options.filename
# check number of arguments, verify values, etc.:
# if len(args.args) > 1 and MPI.COMM_WORLD.Get_rank() == 0:
# parser.error('unrecognised command-line arguments; '
# '"%s"' % (args.args[1],))
# elif len(args.args):
if not args.mq:
if args.problem_name is None:
parser.error('Problem name not defined')
# if args.solver is None:
# parser.error('Please define solver type')
if args.case_name is None:
args.case_name = args.problem_name
# exit(-1)
# config.logger.debug('End ZOption parse')
return args
if __name__ == "__main__":
opt = ZOption()
opt.parse() | zcfd-validate | /zcfd-validate-2018.7.60.post3.tar.gz/zcfd-validate-2018.7.60.post3/zcfd/utils/commandline.py | commandline.py |
zcheck
===========
zcheck is a command-line utility to check the configuration of a
production Zenko deployment and diagnose problems in it.
Pre-Requisites
--------------
zcheck requires a `Helm <https://github.com/kubernetes/helm>`__
installation that is configured to access Tiller running inside
Kubernetes.
Installation
------------
zcheck can be installed directly from PyPi using Pip:
::
pip install zcheck
A Docker image is also provided for convenience.
::
docker pull zenko/zcheck:latest
docker run -it zenko/zcheck help
Syntax
------
zcheck commands conform to the following syntax:
::
zcheck <global option> <subcommand> <-flag or --verbose_option> <optional target>
Global Options
~~~~~~~~~~~~~~
::
--mongo Override the default Mongo connection string (host:port)
-r, --helm-release The Helm release name under which Zenko was installed.
Subcommands
~~~~~~~~~~~
checkup
^^^^^^^
Run all checks and tests (may take a while).
k8s
^^^
Check Kubernetes-related configuration.
::
-c, --check-services Attempt to connect to defined services and report their status.
orbit
^^^^^
Check overlay configuration applied via Orbit.
backends
^^^^^^^^
Check existence and configuration of backend buckets.
::
-d, --deep Enable deep checking. Check every Zenko bucket for its backing bucket
(same as zcheck buckets)
buckets
^^^^^^^
Check every Zenko bucket for its backend bucket.
| zcheck | /zcheck-0.1.5.tar.gz/zcheck-0.1.5/README.rst | README.rst |
# zcheck
zcheck is a command-line utility to check the configuration of a
production Zenko deployment and diagnose problems in it.
## Pre-Requisites
zcheck requires a [Helm](https://github.com/kubernetes/helm) installation
that is configured to access Tiller running inside Kubernetes.
## Installation
zcheck can be installed directly from PyPi using Pip:
```
pip install zcheck
```
A Docker image is also provided for convenience.
```
docker pull zenko/zcheck:latest
docker run -it zenko/zcheck help
```
## Syntax
zcheck commands conform to the following syntax:
```
zcheck <global option> <subcommand> <-flag or --verbose_option> <optional target>
```
### Global Options
```
--mongo Override the default Mongo connection string (host:port)
-r, --helm-release The Helm release name under which Zenko was installed.
```
### Subcommands
#### checkup
Run all checks and tests (may take a while).
#### k8s
Check Kubernetes-related configuration.
```
-c, --check-services Attempt to connect to defined services and report their status.
```
#### orbit
Check overlay configuration applied via Orbit.
#### backends
Check existence and configuration of backend buckets.
```
-d, --deep Enable deep checking. Check every Zenko bucket for its backing bucket
(same as zcheck buckets)
```
#### buckets
Check every Zenko bucket for its backend bucket.
| zcheck | /zcheck-0.1.5.tar.gz/zcheck-0.1.5/README.md | README.md |
<div align="right">
Language:
🇺🇸
<a title="Chinese" href="./README.zh-CN.md">🇨🇳</a>
</div>
<div align="center"><a title="" href="https://github.com/ZJCV/ZCls"><img align="center" src="./imgs/ZCls.png"></a></div>
<p align="center">
«ZCls» is a classification model training/inferring framework
<br>
<br>
<a href="https://github.com/RichardLitt/standard-readme"><img src="https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square"></a>
<a href="https://conventionalcommits.org"><img src="https://img.shields.io/badge/Conventional%20Commits-1.0.0-yellow.svg"></a>
<a href="http://commitizen.github.io/cz-cli/"><img src="https://img.shields.io/badge/commitizen-friendly-brightgreen.svg"></a>
<a href="https://pypi.org/project/zcls/"><img src="https://img.shields.io/badge/PYPI-zcls-brightgreen"></a>
<a href='https://zcls.readthedocs.io/en/latest/?badge=latest'>
<img src='https://readthedocs.org/projects/zcls/badge/?version=latest' alt='Documentation Status' />
</a>
</p>
Supported Recognizers:
<p align="center">
<img align="center" src="./imgs/roadmap.svg">
</p>
*Refer to [roadmap](https://zcls.readthedocs.io/en/latest/roadmap/) for details*
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Background](#background)
- [Installation](#installation)
- [Usage](#usage)
- [Maintainers](#maintainers)
- [Thanks](#thanks)
- [Contributing](#contributing)
- [License](#license)
## Background
In the fields of object detection/object segmentation/action recognition, there have been many training frameworks with high integration and perfect process, such as [facebookresearch/detectron2](https://github.com/facebookresearch/detectron2), [open-mmlab/mmaction2](https://github.com/open-mmlab/mmaction2) ...
Object classification is the most developed and theoretically basic field in deeplearning. Referring to the existing training framework, a training/inferring framework based on object classification model is implemented. I hope ZCls can bring you a better realization.
## Installation
See [INSTALL](https://zcls.readthedocs.io/en/latest/install/)
## Usage
How to train, see [Get Started with ZCls](https://zcls.readthedocs.io/en/latest/get-started/)
Use builtin datasets, see [Use Builtin Datasets](https://zcls.readthedocs.io/en/latest/builtin-datasets/)
Use custom datasets, see [Use Custom Datasets](https://zcls.readthedocs.io/en/latest/)
Use pretrained model, see [Use Pretrained Model](https://zcls.readthedocs.io/en/latest/pretrained-model/)
## Maintainers
* zhujian - *Initial work* - [zjykzj](https://github.com/zjykzj)
## Thanks
```
@misc{ding2021diverse,
title={Diverse Branch Block: Building a Convolution as an Inception-like Unit},
author={Xiaohan Ding and Xiangyu Zhang and Jungong Han and Guiguang Ding},
year={2021},
eprint={2103.13425},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{ding2021repvgg,
title={RepVGG: Making VGG-style ConvNets Great Again},
author={Xiaohan Ding and Xiangyu Zhang and Ningning Ma and Jungong Han and Guiguang Ding and Jian Sun},
year={2021},
eprint={2101.03697},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{fan2020pyslowfast,
author = {Haoqi Fan and Yanghao Li and Bo Xiong and Wan-Yen Lo and
Christoph Feichtenhofer},
title = {PySlowFast},
howpublished = {\url{https://github.com/facebookresearch/slowfast}},
year = {2020}
}
@misc{zhang2020resnest,
title={ResNeSt: Split-Attention Networks},
author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola},
year={2020},
eprint={2004.08955},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{han2020ghostnet,
title={GhostNet: More Features from Cheap Operations},
author={Kai Han and Yunhe Wang and Qi Tian and Jianyuan Guo and Chunjing Xu and Chang Xu},
year={2020},
eprint={1911.11907},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
*For more thanks, check [THANKS](./THANKS)*
## Contributing
Anyone's participation is welcome! Open an [issue](https://github.com/ZJCV/ZCls/issues) or submit PRs.
Small note:
* Git submission specifications should be complied
with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0-beta.4/)
* If versioned, please conform to the [Semantic Versioning 2.0.0](https://semver.org) specification
* If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme)
specification.
## License
[Apache License 2.0](LICENSE) © 2020 zjykzj | zcls | /zcls-0.15.2.tar.gz/zcls-0.15.2/README.md | README.md |
<div align="right">
Language:
🇺🇸
<a title="Chinese" href="./README.zh-CN.md">🇨🇳</a>
</div>
<div align="center"><a title="" href="https://github.com/ZJCV/ZCls2"><img align="center" src="./imgs/ZCls2.png" alt=""></a></div>
<p align="center">
«ZCls2» is a more faster classification model training framework
<br>
<br>
<a href="https://github.com/RichardLitt/standard-readme"><img src="https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square" alt=""></a>
<a href="https://conventionalcommits.org"><img src="https://img.shields.io/badge/Conventional%20Commits-1.0.0-yellow.svg" alt=""></a>
<a href="http://commitizen.github.io/cz-cli/"><img src="https://img.shields.io/badge/commitizen-friendly-brightgreen.svg" alt=""></a>
<a href="https://libraries.io/pypi/zcls2"><img src="https://img.shields.io/librariesio/github/ZJCV/ZCls2" alt=""></a>
<br>
<a href="https://pypi.org/project/zcls2/"><img src="https://img.shields.io/badge/PYPI-zcls2-brightgreen" alt=""></a>
<a href="https://pypi.org/project/zcls2/"><img src="https://img.shields.io/pypi/pyversions/zcls2" alt=""></a>
<a href="https://pypi.org/project/zcls2/"><img src="https://img.shields.io/pypi/v/zcls2" alt=""></a>
<a href="https://pypi.org/project/zcls2/"><img src="https://img.shields.io/pypi/l/zcls2" alt=""></a>
<a href="https://pypi.org/project/zcls2/"><img src="https://img.shields.io/pypi/dd/zcls2?style=plastic" alt=""></a>
<br>
<a href='https://zcls2.readthedocs.io/en/latest/?badge=latest'>
<img src='https://readthedocs.org/projects/zcls2/badge/?version=latest' alt='Documentation Status' />
</a>
<a href="https://github.com/ZJCV/ZCls2"><img src="https://img.shields.io/github/v/tag/ZJCV/ZCls2" alt=""></a>
<a href="https://github.com/ZJCV/ZCls2"><img src="https://img.shields.io/github/repo-size/ZJCV/ZCls2" alt=""></a>
<a href="https://github.com/ZJCV/ZCls2"><img src="https://img.shields.io/github/forks/ZJCV/ZCls2?style=social" alt=""></a>
<a href="https://github.com/ZJCV/ZCls2"><img src="https://img.shields.io/github/stars/ZJCV/ZCls2?style=social" alt=""></a>
<a href="https://github.com/ZJCV/ZCls2"><img src="https://img.shields.io/github/downloads/ZJCV/ZCls2/total" alt=""></a>
<a href="https://github.comZJCV/ZCls2"><img src="https://img.shields.io/github/commit-activity/y/ZJCV/ZCls2" alt=""></a>
</p>
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Background](#background)
- [Installation](#installation)
- [Usage](#usage)
- [Maintainers](#maintainers)
- [Thanks](#thanks)
- [Contributing](#contributing)
- [License](#license)
## Background
After nearly one and a half years of development, [ZCls](https://github.com/ZJCV/ZCls) has integrated many training features, includes configuration module, register module, training module, and many model implementations (`resnet/mobilenet/senet-sknet-resnest/acbnet-repvgg-dbbnet/ghostnet/gcnet...`) and so on. In the development process, it is found that compared with the current excellent classification training framework, such as [apex](https://github.com/NVIDIA/apex/tree/master/examples/imagenet), the training speed of [ZCls](https://github.com/ZJCV/ZCls) is not outstanding.
In order to better improve the training speed, we decided to develop a new training framework [ZCls2](https://github.com/ZJCV/ZCls2), which is implemented based on [apex](https://github.com/NVIDIA/apex/tree/master/examples/imagenet) and provides more friendly and powerful functions. In the preliminary implementation, it can be found that [ZCls2](https://github.com/ZJCV/ZCls2) improves the training speed by at least 50% compared with [ZCls](https://github.com/ZJCV/ZCls). More functions are being added.
## Installation
See [Install](https://zcls2.readthedocs.io/en/latest/install/)
## Usage
See [Get started](https://zcls2.readthedocs.io/en/latest/get-started/)
## Maintainers
* zhujian - *Initial work* - [zjykzj](https://github.com/zjykzj)
## Thanks
* [NVIDIA/apex](https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
* [ZJCV/ZCls](https://github.com/ZJCV/ZCls)
## Contributing
Anyone's participation is welcome! Open an [issue](https://github.com/ZJCV/ZCls2/issues) or submit PRs.
Small note:
* Git submission specifications should be complied
with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0-beta.4/)
* If versioned, please conform to the [Semantic Versioning 2.0.0](https://semver.org) specification
* If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme)
specification.
## License
[Apache License 2.0](LICENSE) © 2022 zjykzj | zcls2 | /zcls2-0.5.3.tar.gz/zcls2-0.5.3/README.md | README.md |
ZeroMQ Component Model (ZCM) in Python
======================================
Overview
---------
* ZCM is a lightweight component model using ZeroMQ (http://zeromq.org/)
* Components are the building blocks of an application
* Components are characterized by ports and timers.
* Timers are bound to an *operation* and fire periodically
* There are four basic types of ports in ZCM: publisher, subscriber, client and server
* Publishers publish messages and Subscribers receive messages
* Clients request the services of a server by sending a request message; Servers receive such requests, process the requests, and respond back to the Client. Until the Server responds, the Client port blocks
* A Component can be instantiated multiple times in an application with different port configurations
* A component has a single operation queue that handles timer triggers and receives messages
* A component has an executor thread that processes this operation queue
* Components register functionality e.g. timer_operations, subscribers_operations etc.
* Component instances are grouped together into a process, called *Actor*
* An actor receives a configuration (.JSON) file, that contains information regarding the components to instantiate
* This configuration file also contains properties of all timers and ports contained by the component instances
| zcm | /zcm-1.0.0.tar.gz/zcm-1.0.0/README.rst | README.rst |
# zcmds_win32
[](https://github.com/zackees/zcmds_win32/actions/workflows/lint.yml)
[](https://github.com/zackees/zcmds_win32/actions/workflows/push_win.yml)
Optional zcmds package for win32 to make it feel more like a linux distribution. This is a great package to use if you want to
use things like `tee`, `grep` and unix commands and have it work on windows.
# Commands
* cat
* cp
* du
* git-bash
* grep
* home
* false
* id
* ls
* md5sum
* mv
* nano
* pico
* ps
* open
* rm
* true
* test
* tee
* touch
* unzip
* which
* wc
* xargs
* uniq
* uname
* fixvmmem
* If CPU consumption for vmmem high, run this command to fix it.
* yes
# Install (normal)
* `python -m pip install zcmds`
# Install (dev):
* `git clone https://github.com/zackees/zcmds_win32`
* `cd zcmds_win32`
* `python -pip install -e .`
# Release Notes
* 1.2.1: Adds tool `dig`
* 1.0.26: When sublime is opened via `open` it now opens in it's own window.
* 1.0.25: Fix `open` for python 3.9
* 1.0.24: Add `sed`
* 1.0.23: Yank 1.0.21/22
* 1.0.20: Adds `uniq` and `uname`
* 1.0.19: Change default text editor to sublime over textpad
* 1.0.18: Adds `true` and `false` and `timeout`
* 1.0.17: Minor fixes.
* 1.0.16: Adds `xargs`, `ps`, `id`, `wc`, `md5sum`, `tee`
* 1.0.15: fixed 'no' command, which doesn't exist.
* 1.0.13: Adds `yes`
* 1.0.12: open tries to find a text editor.
* 1.0.11: Adds `sudo_win32[sudo]`
* 1.0.10: Fixes `fixvmmem` which now uses elevated_exec
* 1.0.9: Fixes `open` when using forward slashes
* 1.0.8: Fixes `open` when using `open .`
* 1.0.7: Fixes missing `fixvmmem`
* 1.0.5: `open` now assumes current directory if no path is given
* 1.0.4: `fixvmmem` now runs in elevated privledges
* 1.0.3: Adds `fixvmmem`
* 1.0.2: Adds `unzip`
* 1.0.1: Adds `pico/nano`
* 1.0.0: Moved zcmds_win32 from zcmds
| zcmds-win32 | /zcmds_win32-1.2.1.tar.gz/zcmds_win32-1.2.1/README.md | README.md |
import os
import shutil
from tempfile import TemporaryDirectory
from typing import Optional
from download import download # type: ignore
from zcmds_win32._exec import os_exec
GIT_BIN = r"C:\Program Files\Git\usr\bin"
GIT_BIN_TOOL_URL = (
"https://github.com/zackees/zcmds_win32/raw/main/assets/git-bash-bin.zip"
)
HERE = os.path.abspath(os.path.dirname(__file__))
DOWNLOADED_GIT_BIN = os.path.join(HERE, "git-bash-bin")
def install() -> None:
"""Installs the Unix tools."""
with TemporaryDirectory() as tmpdir:
download(GIT_BIN_TOOL_URL, tmpdir, replace=True, kind="zip")
dst = os.path.join(HERE, "git-bash-bin")
os.makedirs(dst, exist_ok=True)
files = os.listdir(os.path.join(tmpdir, "git-bash-bin"))
# Sort dll's so that they are first
files.sort(key=lambda x: not x.endswith(".dll"))
files = [os.path.join(tmpdir, "git-bash-bin", f) for f in files]
for src in files:
filename = os.path.basename(src)
try:
shutil.move(src, os.path.join(dst, filename))
except shutil.Error:
pass
def get_or_fetch_unix_tool_path(name: str) -> Optional[str]:
"""Attempts to find the given Unix tool."""
path = shutil.which(name)
if path and os.path.basename(os.path.dirname(path)).lower() != "scripts":
return path
# add .exe to the name if it's not there
if not name.lower().endswith(".exe"):
name += ".exe"
if os.path.exists(os.path.join(GIT_BIN, name)):
return os.path.join(GIT_BIN, name)
if not os.path.exists(DOWNLOADED_GIT_BIN):
install()
return os.path.join(HERE, "git-bash-bin", name)
def unix_tool_exec(
cmdname: str, inherit_params: bool = True, cwd: Optional[str] = None
) -> int:
"""Executes the given Unix tool."""
cmd = get_or_fetch_unix_tool_path(cmdname)
if not cmd:
raise FileNotFoundError(f"Could not find {cmdname} in PATH or in {GIT_BIN}")
return os_exec(cmd, inherit_params, cwd) | zcmds-win32 | /zcmds_win32-1.2.1.tar.gz/zcmds_win32-1.2.1/zcmds_win32/unix_tool_path.py | unix_tool_path.py |
import os
import shutil
import sys
from dataclasses import dataclass
from typing import Optional
from zcmds_win32._exec import os_exec
@dataclass
class Program:
path: str
args: list[str]
def get_sublime() -> Optional[Program]:
"""Attempts to find the Sublime Text executable."""
path = shutil.which("subl") or shutil.which("sublime_text")
args = ["-n"]
if path:
return Program(path, args)
for i in ["", "2", "3", "4"]:
path = f"C:\\Program Files\\Sublime Text{i}\\sublime_text.exe"
if os.path.exists(path):
return Program(path, args)
return None
def get_textpad() -> Optional[Program]:
path = shutil.which("textpad")
if path:
return Program(path, [])
for i in ["", "2", "3", "4", "5", "6", "7", "8"]:
path = f"C:\\Program Files\\TextPad {i}\\TextPad.exe"
if os.path.exists(path):
return Program(path, [])
return None
TEXT_EDITOR = get_sublime() or get_textpad()
SOURCE_EXTENSIONS = [
".c",
".cpp",
".cxx",
".cc",
".c++",
".h",
".hpp",
".hxx",
".hh",
".h++",
".py",
".kt",
".java",
".js",
".ts",
".html",
".css",
".scss",
".sass",
".less",
".json",
".xml",
".yml",
".yaml",
".md",
".txt",
".bat",
".cmd",
".sh",
".ps1",
]
def handle_file(file: str) -> tuple[bool, int]:
"""Attempts to handle the file with a specific program."""
ext = os.path.splitext(file)[1]
if ext in SOURCE_EXTENSIONS:
if TEXT_EDITOR:
# empty quotes is for title.
args_statement = " ".join(TEXT_EDITOR.args)
cmd = f'start "" "{TEXT_EDITOR.path}" {args_statement} "{file}"'
rtn = os.system(cmd)
return (True, rtn)
return (False, 0)
def main() -> int:
cmd = "explorer"
if len(sys.argv) == 1:
cmd += " ."
return os.system(cmd)
for i, _ in enumerate(sys.argv):
if i < 1:
continue
arg = sys.argv[i].replace("/", "\\")
sys.argv[i] = arg
if os.path.isfile(arg):
handled, ret = handle_file(arg)
if handled:
return ret
return os_exec(cmd)
def unit_test() -> None:
"""Unit test for this module."""
here = os.path.dirname(__file__)
project_root = os.path.join(here, "..", "..")
os.chdir(project_root)
sys.argv.append("README.md")
main()
if __name__ == "__main__":
unit_test() | zcmds-win32 | /zcmds_win32-1.2.1.tar.gz/zcmds_win32-1.2.1/zcmds_win32/cmds/open.py | open.py |
# zcmds
Cross platform(ish) productivity commands written in python. Tools for doing video manipulation, searching through files and other things. On Windows ls, rm and other common unix file commands are installed.
[](https://github.com/zackees/zcmds/actions/workflows/push_macos.yml)
[](https://github.com/zackees/zcmds/actions/workflows/push_win.yml)
[](https://github.com/zackees/zcmds/actions/workflows/push_ubuntu.yml)
[](https://github.com/zackees/zcmds/actions/workflows/lint.yml)
# Install
```bash
> pip install zmcds
> zcmds
> diskaudit
```
# Commands
* archive
* Easily create archives
* askai
* Asks a question to OpenAI from the terminal command. Requires an openai token which will be requested and saved on first use.
* audnorm
* Normalizes the audio.
* comports
* Shows all the ports that are in use at the current computer (useful for Arduino debugging).
* diskaudit
* walks the directory from the current directory and shows which folders / files take up the most disk.
* git-bash (win32)
* launches git-bash terminal (windows only).
* gitsummary
* Generates a summary of the git repository commits
* findfile
* finds a file with the given glob.
* img2webp
* Conversion tool for converting images into webp format.
* img2vid
* Converts a series of images to a video.
* obs_organize
* organizes the files in your default obs directory.
* printenv
* prints the current environment variables, including path.
* pdf2png
* Converts a pdf to a series of images
* pdf2txt
* Converts a pdf to a text file.
* search_and_replace
* Search all the files from the current directory and apply search and replace changes.
* search_in_files
* Search all files from current working directory for matches.
* sharedir
* takes the current folder and shares it via a reverse proxy (think ngrok).
* stereo2mono
* Reduces a stereo audio / video to a single mono track.
* sudo (win32 only)
* Runs a command as in sudo, using the gsudo tool.
* vidcat
* Concatenates two videos together, upscaling a lower resolution video.
* vidmute
* Strips out the audio in a video file.
* vidinfo
* Uses ffprobe to find the information from a video file.
* vid2gif
* A video is converted into an animated gif.
* vid2jpg
* A video is converted to a series of jpegs.
* vid2mp3
* A video is converted to an mp3.
* vid2mp4
* A video is converted to mp4. Useful for obs which saves everything as mkv. Extremely fast with mkv -> mp4 converstion.
* vidclip
* Clips a video using timestamps.
* viddur
* Get's the. Use vidinfo instead.
* vidshrink
* Shrinks a video. Useful for social media posts.
* vidspeed
* Changes the speed of a video.
* vidvol
* Changes the volume of a video.
* ytclip
* Download and clip a video from a url from youtube, rumble, bitchute, twitter... The timestamps are prompted by this program.
* whichall
* Finds all the executables in the path.
* unzip
* unzip the provided file
* fixinternet
* Attempts to fix the internet connection by flushing the dns and resetting the network adapter.
* fixvmmem (win32 only)
* Fixes the vmmem consuming 100% cpu on windows 10 after hibernate.
# Install (dev):
* `git clone https://github.com/zackees/zcmds`
* `cd zcmds`
* `python -pip install -e .`
* Test by typing in `zcmds`
# Additional install
For the pdf2image use:
* win32: `choco install poppler`
* ... ?
# Note:
Running tox will install hooks into the .tox directory. Keep this in my if you are developing.
TODO: Add a cleanup function to undo this.
# Release Notes
* 1.4.24: Adds `archive`
* 1.4.23: Bump zcmds-win32
* 1.4.21: `askai` handles pasting text that has double lines in it.
* 1.4.20: `askai` is now at gpt-4
* 1.4.19: Adds `losslesscut` for win32.
* 1.4.18: Fix win32 `zcmds_win32`
* 1.4.17: `vid2mp4` now adds `--nvenc` and `--height` `--crf`
* 1.4.16: Fixes `img2webp`.
* 1.4.15: Adds `img2webp` utility.
* 1.4.13: Add `--no-fast-start` to vidwebmaster.
* 1.4.12: Fixes a bug in find files when an exception is thrown during file inspection.
* 1.4.11: `findfiles` now has --start --end --larger-than --smaller-then
* 1.4.10: `zcmds` now uses `get_cmds.py` to get all of the commands from the exe list.
* 1.4.8: `audnorm` now encodes in mp3 format (improves compatibility). vid2mp3 now allows `--normalize`
* 1.4.7: Fixes broken build.
* 1.4.6: Adds `say` command to speak out the text you give the program
* 1.4.5: Adds saved settings for gitsummary
* 1.4.4: Adds `pdf2txt` command
* 1.4.3: Adds `gitsummary` command
* 1.4.2: Bump up zcmds_win32 to 1.0.17
* 1.4.1: Adds 'whichall' command
* 1.4.0: Askai now supports question-answer-question-... interactive mode
* 1.3.17: Adds syntax highlighting to open askai tool
* 1.3.16: Improves openai by using gpt 3.5
* 1.3.15: Improve vidinfo for more data and be a lot faster with single pass probing.
* 1.3.14: Improve vidinfo to handle non existant streams and bad files.
* 1.3.13: Added `img2vid` command.
* 1.3.12: Added `fixinternet` command.
* 1.3.11: Fix badges.
* 1.3.10: Suppress spurious warnings with chardet in openai
* 1.3.9: Changes sound driver, should eliminate the runtime dependency on win32.
* 1.3.8: Adds askai tool
* 1.3.7: findfile -> findfiles
* 1.3.6: zcmds[win32] is now at 1.0.2 (includes `unzip`)
* 1.3.5: zcmds[win32] is now at 1.0.1 (includes `nano` and `pico`)
* 1.3.4: Adds `printenv` utility
* 1.3.3: Adds `findfile` utility.
* 1.3.2: Adds `comports` to display all comports that are active on the computer.
* 1.3.1: Nit improvement in search_and_replace to improve ui
* 1.3.0: vidwebmaster now does variable rate encoding. --crf and --heights has been replaced by --encodings
* 1.2.1: Adds improvements to vidhero for audio fade and makes vidclip improves usability
* 1.2.0: stripaudio -> vidmute
* 1.1.30: Improves vidinfo with less spam on the console and allows passing height list
* 1.1.29: More improvements to vidinfo
* 1.1.28: vidinfo now has more encodingg information
* 1.1.27: Fix issues with spaces in vidinfo
* 1.1.26: Adds vidinfo
* 1.1.26: Vidclip now supports start_time end_time being omitted.
* 1.1.25: Even better performance of diskaudit. 50% reduction in execution time.
* 1.1.24: Fixes diskaudit from double counting
* 1.1.23: Fixes test_net_connection
* 1.1.22: vid2mp4 - if file exists, try another name.
* 1.1.21: Adds --fps option to vidshrink utility
* 1.1.19: Using pyprojec.toml build system now.
* 1.1.17: vidwebmaster fixes heights argument for other code path
* 1.1.16: vidwebmaster fixes heights argument
* 1.1.15: vidwebmaster fixed
* 1.1.14: QT5 -> QT6
* 1.1.13: vidwebmaster fixes () bash-bug in linux
* 1.1.12: vidwebmaster now has a gui if no file is supplied
* 1.1.11: Adds vidlist
* 1.1.10: Adds vidhero
* 1.1.9: adds vidwebmaster
* 1.1.8: adds vidmatrix to test out different settings.
* 1.1.7: vidshrink and vidclip now both feature width argument
* 1.1.6: Adds touch to win32
* 1.1.5: Adds unzip to win32
* 1.1.4: Fix home cmd.
* 1.1.3: Fix up cmds so it returns int
* 1.1.2: Fix git-bash on win32
* 1.1.1: Release
# TODO:
* Add silence remover:
* https://github.com/bambax/Remsi
* Add lossless cut to vidclip
* https://github.com/mifi/lossless-cut
| zcmds | /zcmds-1.4.25.tar.gz/zcmds-1.4.25/README.md | README.md |
================================
zcms : 基于文件系统的超轻CMS
================================
zcms是一个极简的基于文件系统CMS(类Jekyll),都是你熟悉的:
- 无需数据库, 每个页面是一个文本文件(rst/md)
- 扩展reStructuredText指令(.rst),轻松实现博客、导航、新闻等动态内容
示例站点:
- http://viewer.everydo.com
- http://developer.everydo.com
- http://everydo.com
- http://edodocs.com
运行自带的demo站点(8000端口访问):
docker run -d -p 8000:80 panjunyong/zcms
运行自己位于/home/panjy/sites的站点::
docker run -d -v /home/panjy/sites:/var/sites -p 8000:80 panjunyong/zcms
调试站点皮肤(即时刷新,但是运行速度较慢):
docker run -d -v /home/panjy/sites:/var/sites -p 8000:80 panjunyong/zcms debug
如有反馈,请微博联系: http://weibo.com/panjunyong
无阻力建站
============================
站点放在sites文件夹内容,每个站点包括内容(contents)和皮肤(themes)
设置栏目顺序和标题
-----------------------
每个文件夹下,可以放置一个 `_config.yaml` 的文件,在这里设置文件夹的属性:
title: 教程 # 标题
order: [index.rst, tour, blog, about.rst] # 显示顺序
exclude: [img] # 隐藏图片文件夹的显示
对于rst/md的页面文件, 可直接在文件头部指定这些信息:
---
title: 教程 # 标题
creator: 潘俊勇 # 创建人
created: 2010-12-12 9:12 # 创建时间,新闻根据这个时间排序
---
页面文件的属性,必须以三个短横开始和结束
设置左右列以及头部区域
--------------------------
对整个文件夹下的页面模版,可以定制左侧、右侧和头部的显示信息,分别加入: `_left.rst` , `_right.rst` , `_upper.rst`
如果具体某个页面,需要定制,也可以单独设置,通过命名来区分:
1. index.rst 页面的头部信息 `_upper_index.rst`
2. about.rst 页面的左侧信息 `_left_about.rst`
动态内容
-------------
可在reST中使用如下指令即可:
1. 最近新闻
.. news::
:size: 5
:path: blog
2. 博客页面
.. blogs::
:size: 20
3. 导航树
.. navtree::
:root_depth: 2
外观模版的设置
---------------------
在站点根文件夹下面的_config.yaml里面,定义了整个站点的皮肤
theme_base: http://localhost:6543/themes/bootstrap # 存放模版的基准位置,这里可能存放了多个模版
theme: default.html # 默认的模版
外观模版是通过一个网址来指定的,上面的完整外观模版地址是:
http://localhost:6543/themes/bootstrap/default.html
如果不想使用默认的外观模版,可文件夹或页面属性中,设置个性化的外观模版:
theme: home.html # 首页模版,可能没有左右列
这里会使用外观模版:
http://localhost:6543/themes/bootstrap/home.html
制作外观模版
-----------------
可看看themes文件夹里面的文件,其实就是一个python的String Template.
一个最基础的外观模版可以是:
<html>
<head>
<title>$title - $site_title</title>
<meta name="Description" content="$site_description"/>
</head>
<body>
<ul>$nav</ul>
<div>$upper</div>
<table>
<tr>
<td>$left</td>
<td>$content</td>
<td>$right</td>
</tr>
</table>
</body>
</html>
这个文件里面可以包括如下变量:
- `site_title` : 站点的标题
- `site_description` : 当前内容的描述信息
- `nav` : 站点的导航栏目
- `title` : 当前内容的标题
- `description` : 当前内容的描述信息
- `content` : 当前内容正文
- `left` : 左侧列显示的内容
- `right` : 右侧列显示的内容
- `upper` : 上方区域显示的内容
- `theme_base` : 外观模版的所在的位置
虚拟主机设置
-----------------
在站点根文件夹下面的_config.yaml里面,定义了整个站点的虚拟主机设置:
domain_name: domain.com, www.domain.com # 域名
这表示,可以通过上述 `domain_name` 直接访问站点,url路径上可省略 `site_name`
更新缓存
===================
默认系统会自动对theme进行缓存,最近更新等内容是每天刷新一次。
可调用如下地址,手动进行即时刷新:
1. 更新皮肤: `http://server.com/clear_theme_cache`
2. 更新内容: `http://server.com/clear_content_cache`
开发调试代码
===================
使用本地代码(/home/panjy/git/zcms):
docker run -t -i -v /home/panjy/git/zcms:/opt/zcms/ -p 8000:80 panjunyong/zcms shell
bin/buildout
bin/pserve development.ini
Jekyll参考
===================
- http://www.ruanyifeng.com/blog/2012/08/blogging_with_jekyll.html
- http://yanping.me/cn/blog/2012/03/18/github-pages-step-by-step/
- http://www.soimort.org/posts/101/
TODO
================
1. 优化默认的bootstrap风格皮肤
2. 简化虚拟主机的配置:
- 合并nginx和zcms这2个docker
- 各个站点部署方面的配置转到站点的 `_config.py` 中
- 自动生成nginx的配置文件
3. production模式下,应该大量缓存加速,减少io
4. 提供webdav api
5. 提供RSS输出
| zcms | /zcms-0.6.4.tar.gz/zcms-0.6.4/README.md | README.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.