code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from logging import getLogger
from typing import cast
from bs4 import BeautifulSoup
import bs4
from ac_core.constant import _SITE_URL
from ac_core.interfaces.HttpUtil import HttpUtilInterface
from ac_core.utils import HTML_PARSER
logger = getLogger(__name__)
def is_logged_in(http_util: HttpUtilInterface) -> bool:
"""This method will use ``http_util`` for login check by visit atcoder site and parse html
:param HttpUtilInterface http_util: a http instance, for example ``requests.session()``
:returns: if it is successful logged in.
:examples:
.. code-block::
from ac_core.auth import fetch_login, is_logged_in
import requests
h = requests.session()
#h = Helper(requests.session())
print(is_logged_in(h))
print(fetch_login(h, 'username', 'password'))
print(is_logged_in(h))
"""
html = http_util.get(f"{_SITE_URL}/home").text
soup = BeautifulSoup(html, HTML_PARSER)
dropdown_menus = soup.find_all('ul', class_="dropdown-menu")
found_user_link = False
for menu in dropdown_menus:
a_s = menu.find_all('a')
for a in a_s:
if a['href'].startswith('/users/'):
found_user_link = True
return found_user_link
def fetch_login(http_util: HttpUtilInterface, username: str, password: str) -> bool:
"""This method will use ``http_util`` for login request and :py:func:`is_logged_in()` for login check
:param http_util: a http instance, for example ``requests.session()``
:param username: AtCoder username
:param password: AtCoder password
:returns: if it is successful post and logged
:examples:
.. code-block::
from ac_core.auth import fetch_login, is_logged_in
import requests
h = requests.session()
#h = Helper(requests.session())
print(is_logged_in(h))
print(fetch_login(h, 'username', 'password'))
print(is_logged_in(h))
"""
try:
res = http_util.get(_SITE_URL + '/login')
soup = BeautifulSoup(res.text, HTML_PARSER)
csrf_token = cast(bs4.Tag, soup.find(attrs={'name': 'csrf_token'})).get('value')
post_data = {
'csrf_token': csrf_token,
'username': username,
'password': password,
}
ret = http_util.post(url='https://atcoder.jp/login', data=post_data)
if ret.status_code == 403:
# TODO fix???
# 403 REVEL_CSRF: tokens mismatch. 似乎因为历史cookie导致?更新了也不能成功?
logger.error('Atcoder 403(may need clear cookies and relogin):')
logger.error(ret.text)
except Exception as e:
logger.exception(e)
return False
return is_logged_in(http_util)
class InvalidSessionError(Exception):
"""
:meta private:
"""
# not use, hide in doc now
DEFAULT_MESSAGE = "Your login session is invalid. please relogin."
def __init__(self, message: str = DEFAULT_MESSAGE) -> None:
super().__init__(message)
# TODO logout support | yxr-atcoder-core | /yxr_atcoder_core-0.0.3.3-py3-none-any.whl/ac_core/auth.py | auth.py |
from dataclasses import dataclass
from enum import Enum
import json
import os
import re
from bs4 import BeautifulSoup
from ac_core.constant import _SITE_URL
from ac_core.interfaces.HttpUtil import HttpUtilInterface
@dataclass
class SubmissionResult:
class Status(Enum):
INIT: str = 'Init'
PENDING: str = 'Waiting for Judging'
RUNNING: str = 'Judging'
RE: str = 'Runtime Error'
AC: str = 'Accepted'
WA: str = 'Wrong Answer'
CE: str = 'Compilation Error'
TLE: str = 'Time Limit Exceeded'
id: str = ''
url: str = '' # json url for refetch
score: int = 500
status: Status = Status.INIT
time_cost_ms: int = 0
mem_cost_kb: int = 0
msg_txt: str = ''
def watch_result(url: str) -> str: # sock url, single submissions
return ''
# title=\"Compilation Error\"\u003eCE\u003c/span\u003e\u003c/td\u003e","Score":"0"
def parse_result(resp: str) -> SubmissionResult:
"""parse submit result get from json result
:param resp: the json result get from ``https://atcoder.jp/contests/{contest_id}/submissions/me/status/json?sids[]={submision id}``
:examples:
.. code-block::
import requests
from ac_core.result import parse_result
r = requests.get('https://atcoder.jp/contests/abc101/submissions/me/status/json?sids[]=5371077')
if r.status_code == 200:
print(parse_result(r.text)) # pass html
"""
res = json.loads(resp)["Result"]
sub_id = list(res.keys())[0]
soup = BeautifulSoup(res[sub_id]["Html"], "lxml")
tds = soup.find_all('td')
status = SubmissionResult.Status(str(tds[0].find('span').attrs.get('title')))
try:
score = int(res[sub_id]["Score"])
except:
score = 0
try:
time_cost_ms = int(tds[1].text.split(" ")[0])
except:
time_cost_ms = 0
try:
mem_cost_kb = int(tds[2].text.split(" ")[0])
except:
mem_cost_kb = 0
msg_txt = ''
if status == SubmissionResult.Status.RUNNING:
msg_txt = soup.text.strip()
return SubmissionResult(
id=sub_id,
score=score,
status=status,
time_cost_ms=time_cost_ms,
mem_cost_kb=mem_cost_kb,
msg_txt=msg_txt,
)
def fetch_result_by_url(http_util: HttpUtilInterface, json_url: str) -> SubmissionResult:
"""parse submit result by *http_util* with submission *json_url*.
:param http_util: e.g. ``requests.session()``
:param json_url: e.g. ``https://atcoder.jp/contests/abc101/submissions/me/status/json?sids[]=5371077``
:examples:
.. code-block::
import requests
from ac_core.result import fetch_result_by_url
print(fetch_result_by_url(requests.session(),'https://atcoder.jp/contests/abc101/submissions/me/status/json?sids[]=5371077'))
the structured data returned by :py:func:`fetch_result` has the submission json url
.. code-block::
import requests
from ac_core.auth import fetch_login, is_logged_in
from ac_core.result import fetch_result, fetch_result_by_url
h = requests.session()
fetch_login(h, 'username', 'password')
assert(is_logged_in(h))
result = fetch_result(h,'https://atcoder.jp/contests/abc275/tasks/abc275_f')
print(fetch_result_by_url(h,result.url))
"""
response = http_util.get(url=json_url)
ret = parse_result(resp=response.text)
ret.url = json_url
return ret
def _problem_url_to_sub_url(problem_url: str) -> str:
# problem_url https://atcoder.jp/contests/abc275/tasks/abc275_f
r = re.match('^(.*)/tasks/(.*)$', problem_url)
assert r is not None
prefix = r.group(1)
problem_suffix = r.group(2)
# https://atcoder.jp/contests/abc275/submissions/me?f.Task=abc275_f
return os.path.join(prefix, f'submissions/me?f.Task={problem_suffix}')
def _parse_json_url(html: str):
soup = BeautifulSoup(html, 'lxml')
# <a href='/contests/abc101/submissions/5371227'>Detail</a>
r = re.search('<td class="text-center">.*?"/contests/(.*?)/submissions/([0-9]*?)\">Detail</a>', str(soup),
re.DOTALL | re.MULTILINE)
assert r is not None # no submission
return os.path.join(_SITE_URL, f"contests/{r.group(1)}/submissions/me/status/json?sids[]={r.group(2)}")
def fetch_result(http_util: HttpUtilInterface, problem_url: str) -> SubmissionResult:
"""parse submit result by *http_util* with *problem_url*.
You need logged in before using this method. This function will find your last submission for the problem.
:param http_util: e.g. ``requests.session()``
:param problem_url: e.g. ``https://atcoder.jp/contests/abc275/tasks/abc275_f``
:examples:
.. code-block::
import requests
from ac_core.auth import fetch_login, is_logged_in
from ac_core.result import fetch_result
h = requests.session()
fetch_login(h, 'username', 'password')
assert(is_logged_in(h))
print(fetch_result(h,'https://atcoder.jp/contests/abc275/tasks/abc275_f'))
"""
# https://atcoder.jp/contests/abc275/submissions/me?f.Task=abc275_f
submission_url = _problem_url_to_sub_url(problem_url)
# <a href='/contests/abc101/submissions/5371227'>Detail</a>
# https://atcoder.jp/contests/abc101/submissions/me/status/json?sids[]=5371077
resp = http_util.get(submission_url)
json_url = _parse_json_url(resp.text)
return fetch_result_by_url(http_util, json_url) | yxr-atcoder-core | /yxr_atcoder_core-0.0.3.3-py3-none-any.whl/ac_core/result.py | result.py |
# yxr-codeforces-core
python3.8+ ([typing.Protocol](https://docs.python.org/3/library/typing.html#typing.Protocol) New in version 3.8.)
| feature | code | inline example doc | unit test | e2e test |
| -------------------------------- | ---- | ------------------ | --------- | -------- |
| account login | ✅ | ✅ | ✅ | ✅ |
| submit code | ✅ | ✅ | ✅ | ❌ |
| submission websocket | ✅ | ✅ | ✅ | ❌ |
| my submission page | ✅ | ✅ | ❌ | ❌ |
| contest register | ✅ | ✅ | ❌ | ✅ |
| contest list | ✅ | ✅ | ✅ | ❌ |
| contest meta | ✅ | ✅ | ✅ | ❌ |
| contest standing(common/friends) | ✅ | ✅ | ✅ | ❌ |
| problems | ✅ | ✅ | ✅ | ❌ |
| specific problem | ✅ | ✅ | ✅ | ❌ |
| language list | ✅ | ✅ | ✅ | ❌ |
| pid/url parser | ✅ | ✅ | ❌ | ❌ |
| domain 2 ip dig tool | ❌ | ❌ | ❌ | ❌ |
## Docs
[User](https://cromarmot.github.io/yxr-codeforces-core/user/index.html)
[Developer](https://cromarmot.github.io/yxr-codeforces-core/dev/index.html)
| yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/README.md | README.md |
import logging
from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Tuple, cast
from lxml import html
from lxml.etree import _Element
from os import path
import asyncio
import aiohttp
import pyaes
import json
import re
from .constants import CF_HOST
from .interfaces.AioHttpHelper import AioHttpHelperInterface
from .kwargs import extract_common_kwargs
default_headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip',
# 'User-Agent': config.conf['user_agent'], TODO
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
}
class RCPCRedirectionError(Exception):
def __init__(self):
super().__init__("RCPC redirection detected")
def add_header(newhdr, headers: Dict[str, str]) -> Dict[str, str]:
headers.update(newhdr)
return headers
async def on_request_end(session, trace_request_ctx, params):
elapsed = asyncio.get_event_loop().time() - trace_request_ctx.start
print("[*] Request end : {}".format(elapsed))
class HttpHelper(AioHttpHelperInterface):
session: Optional[aiohttp.ClientSession] = None
cookie_jar_path = ''
cookie_jar: Optional[aiohttp.CookieJar] = None
token_path = ''
tokens: Dict[str, str] = {}
headers: Dict[str, str] = {} # TODO
logger: logging.Logger
def __init__(self,
cookie_jar_path: str = '',
token_path: str = '',
headers=default_headers,
host=CF_HOST,
**kw) -> None:
# if path is empty string then won't save to any file, just store in memory
self.cookie_jar_path = cookie_jar_path
# if path is empty string then won't save to any file, just store in memory
self.token_path = token_path
self.headers = headers
# TODO support cf mirror site?
self.host = host
self.logger = extract_common_kwargs(**kw).logger
@staticmethod
def load_tokens(token_path: str) -> Dict[str, Any]:
if token_path and path.isfile(token_path):
with open(token_path, 'r') as f:
return json.load(f)
return {}
@staticmethod
def load_cookie_jar(cookie_jar_path: str) -> aiohttp.CookieJar:
jar = aiohttp.CookieJar()
if cookie_jar_path:
if path.isfile(cookie_jar_path):
jar.load(file_path=cookie_jar_path)
else:
jar.save(file_path=cookie_jar_path)
return jar
async def open_session(self) -> aiohttp.ClientSession:
self.cookie_jar = HttpHelper.load_cookie_jar(self.cookie_jar_path)
self.tokens = HttpHelper.load_tokens(self.token_path)
self.session = await aiohttp.ClientSession(cookie_jar=self.cookie_jar).__aenter__()
return self.session
async def close_session(self) -> None:
await self.session.__aexit__(None, None, None)
self.tokens = {}
self.cookie_jar = None
self.session = None
def update_tokens(self, csrf: str, ftaa: str, bfaa: str, uc: str, usmc: str) -> None:
self.tokens = {'csrf': csrf[:32], 'ftaa': ftaa, 'bfaa': bfaa, 'uc': uc, 'usmc': usmc}
if self.token_path:
with open(self.token_path, 'w') as f:
json.dump(self.tokens, f)
async def async_get(self, url, headers=None, csrf=False):
if self.session is None:
raise Exception('Please open_session() before async_get()')
if headers == None: headers = default_headers
if csrf and 'csrf' in self.tokens:
headers = add_header({'X-Csrf-Token': self.tokens['csrf']}, headers=headers)
# TODO remove the feature
if url.startswith('/'): url = self.host + url
try:
async with self.session.get(url, headers=headers) as response:
assert response.status == 200
text = await response.text()
self.check_rcpc(text)
if self.cookie_jar_path:
self.cookie_jar.save(file_path=self.cookie_jar_path) # TODO move auto save to file out
return text
except RCPCRedirectionError:
async with self.session.get(url, headers=headers) as response:
assert response.status == 200
if self.cookie_jar_path:
self.cookie_jar.save(file_path=self.cookie_jar_path)
return await response.text()
except Exception as e:
self.logger.error(e)
async def async_post(self, url, data, headers=default_headers, csrf=False, **kwargs: Any):
if self.session is None:
raise Exception('Please open_session() before async_get()')
if headers == None: headers = default_headers
if csrf and 'csrf' in self.tokens:
headers = add_header({'X-Csrf-Token': self.tokens['csrf']}, headers=headers)
# TODO remove the feature
if url.startswith('/'): url = self.host + url
try:
async with self.session.post(url, headers=headers, data=data, **kwargs) as response:
assert response.status == 200
self.check_rcpc(await response.text())
if self.cookie_jar_path:
self.cookie_jar.save(file_path=self.cookie_jar_path)
return await response.text()
except RCPCRedirectionError:
async with self.session.post(url, headers=headers, data=data) as response:
assert response.status == 200
if self.cookie_jar_path:
self.cookie_jar.save(file_path=self.cookie_jar_path)
return await response.text()
except Exception as e:
self.logger.error(e)
def get_tokens(self):
return self.tokens
def check_rcpc(self, html_data: str):
doc = html.fromstring(html_data)
aesmin = cast(List[_Element], doc.xpath(".//script[@type='text/javascript' and @src='/aes.min.js']"))
if len(aesmin) > 0:
print("[+] RCPC redirection detected")
js = cast(List[_Element], doc.xpath(".//script[not(@type)]"))
assert len(js) > 0
keys = re.findall(r'[abc]=toNumbers\([^\)]*', js[0].text)
for k in keys:
if k[0] == 'a':
key = bytes.fromhex(k.split('"')[1])
elif k[0] == 'b':
iv = bytes.fromhex(k.split('"')[1])
elif k[0] == 'c':
ciphertext = bytes.fromhex(k.split('"')[1])
assert len(key) == 16 and len(iv) == 16 and len(ciphertext) == 16, 'AES decryption error'
c = pyaes.AESModeOfOperationCBC(key, iv=iv)
plaintext = c.decrypt(ciphertext)
rcpc = plaintext.hex()
self.cookie_jar.update_cookies({'RCPC': rcpc})
self.cookie_jar.save(file_path=self.cookie_jar_path)
raise RCPCRedirectionError()
def create_form(self, form_data) -> aiohttp.FormData:
form = aiohttp.FormData()
for k, v in form_data.items():
form.add_field(k, v)
return form
# callback return (end watch?, transform result)
async def websockets(self, url: str, callback: Callable[[Any], Tuple[bool, Any]]) -> AsyncIterator[Any]:
try:
async with self.session.ws_connect(url) as ws:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
js = json.loads(msg.data)
js['text'] = json.loads(js['text'])
endwatch, obj = callback(js)
yield obj
if endwatch:
return
else:
self.logger.error('wrong msg type?', msg.type)
break
return
except Exception as e:
self.logger.error(e)
return | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/httphelper.py | httphelper.py |
from dataclasses import dataclass, field
from enum import Enum
from typing import List
from bs4 import BeautifulSoup
import bs4
from .interfaces.AioHttpHelper import AioHttpHelperInterface
@dataclass
class TestCase:
in_data: str
out_data: str
@dataclass
class ProblemInfo:
# testcases: List[TestCase]
title: str
level: str
time_limit_seconds: str
memory_limit_mb: str
desc: str
in_tc: List[str]
out_tc: List[str]
note: str
@dataclass
class ParseProblemResult(object):
class Status(str, Enum):
AC = 'AC'
FAILED = 'FAILED'
NOTVIS = 'NOTVIS'
status: Status = Status.NOTVIS
title: str = ''
test_cases: List[TestCase] = field(default_factory=lambda: [])
id: str = ''
oj: str = ''
description: str = ''
time_limit: str = ''
mem_limit: str = ''
url: str = ''
html: str = ''
file_path: str = ''
async def async_problem(http: AioHttpHelperInterface, contest_id: str, level: str, **kw) -> ParseProblemResult:
"""
This method will use ``http`` to request ``/contest/<contest_id>/problems``, and parse to struct result
:param http: AioHttpHelperInterface
:param contest_id: contest id in url
:returns: parsed structured result
Examples:
.. code-block::
import asyncio
from codeforces_core.httphelper import HttpHelper
from codeforces_core.problem import async_problem
async def demo():
# http = HttpHelper(token_path='/tmp/cache_token', cookie_jar_path='/tmp/cache_cookie_jar')
http = HttpHelper(token_path='', cookie_jar_path='')
await http.open_session()
# you can login before request
result = await async_problem(http=http, contest_id='1779', level='F')
print(result)
await http.close_session()
asyncio.run(demo())
"""
resp = await http.async_get(f'/contest/{contest_id}/problem/{level}')
problem = ParseProblemResult(html=resp)
soup = BeautifulSoup(resp, 'lxml')
# TODO implememt soup_find function to assert type for mypy
match_groups = soup.find('div', attrs={'class': 'title'})
assert isinstance(match_groups, bs4.Tag)
problem.title = str(match_groups.string)[2:].strip(" \r\n")
match_groups = soup.find(name='div', attrs={'class': 'time-limit'})
assert isinstance(match_groups, bs4.Tag)
problem.time_limit = str(match_groups.contents[-1]).strip()
match_groups = soup.find(name='div', attrs={'class': 'memory-limit'})
assert isinstance(match_groups, bs4.Tag)
problem.mem_limit = str(match_groups.contents[-1]).strip()
match_groups = soup.find(name='div', attrs={'class': 'problem-statement'})
problem.status = ParseProblemResult.Status.NOTVIS # TODO for show progress
match_groups = soup.find(name='div', attrs={'class': 'sample-test'})
assert isinstance(match_groups, bs4.Tag)
problem.test_cases.clear()
if match_groups:
test_case_inputs = match_groups.find_all(name='div', attrs={'class': 'input'})
test_case_outputs = match_groups.find_all(name='div', attrs={'class': 'output'})
assert (len(test_case_inputs) == len(test_case_outputs)) # may not? in April fool contest
for i in range(len(test_case_inputs)):
t_in = test_case_inputs[i].find(name='pre').get_text("\n").strip(" \r\n")
t_out = test_case_outputs[i].find(name='pre').get_text("\n").strip(" \r\n")
problem.test_cases.append(TestCase(t_in, t_out))
return problem | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/problem.py | problem.py |
from time import time
from typing import Any, Callable, Tuple, AsyncIterator
from .account import extract_channel
from .interfaces.AioHttpHelper import AioHttpHelperInterface
from .kwargs import extract_common_kwargs
from .submit import SubmissionWSResult
# return (end watch?, transform result)
def display_ws(result: Any) -> Tuple[bool, Any]:
print(result)
return False, result
# # {'id': 1, 'channel': '34f1ec4b729022e4b48f8d24b65c857805a90469', 'text': {'t': 's', 'd': [5973356517882654806, 200631363, 1777, 1746206, 'TESTS', None, 'OK', 86, 86, 3198, 7884800, 148217099, '21220', '04.04.2023 5:57:08', '04.04.2023 5:57:08', 2147483647, 73, 0]}}
# # 总的ws, 无法获得当前题目的 通过百分比
# def create_ws_task(http: AioHttpHelperInterface, ws_handler: Callable[[Any], Tuple[bool, Any]]) -> asyncio.Task:
# """
# This method will use ``http`` to create common websocket, and ``ws_handler`` to handle each ws message
#
# this websocket cannot receive a submission running percentage, use :py:func:`create_contest_ws_task()` instead
#
# :param http: AioHttpHelperInterface
# :param ws_handler: function to handler messages
#
# :returns: the task which run ws
#
# Examples:
#
# .. code-block::
#
# import asyncio
# from codeforces_core.httphelper import HttpHelper
# from codeforces_core.account import async_login
# from codeforces_core.websocket import create_ws_task, display_ws
# from codeforces_core.submit import async_submit
#
# async def demo():
# # http = HttpHelper(token_path='/tmp/cache_token', cookie_jar_path='/tmp/cache_cookie_jar')
# http = HttpHelper(token_path='', cookie_jar_path='')
# await http.open_session()
# result = await async_login(http=http, handle='<handle>', password='<password>')
# assert(result.success)
# task = create_ws_task(http, ws_handler=display_ws)
# # submit code in webpage
# try:
# result = await asyncio.wait_for(task, timeout=60)
# print("ws is done, result:", task.result())
# except asyncio.TimeoutError:
# pass
# await http.close_session()
#
# asyncio.run(demo())
# """
# epoch = int(time() * 1000) # s -> ms
# token = http.get_tokens()
# ws_url = f"wss://pubsub.codeforces.com/ws/{token['uc']}/{token['usmc']}?_={epoch}&tag=&time=&eventid="
# print(ws_url)
# return asyncio.create_task(http.websockets(ws_url, ws_handler))
# https://codeforces.com/contest/<contest_id>/my 会多出两个 meta
# <meta name="cc" content="xxx"/>
# <meta name="pc" content="yyy"/>
# TODO 设计上不太对, handler处理了数据, 结果也抛给了使用者, 应该handler 只关心是否停止, 而transform不应该在handler里处理
# 这两个可以监听 题目测试时 的通过 百分比 变化
async def create_contest_ws_task_yield(http: AioHttpHelperInterface, contest_id: str,
ws_handler: Callable[[Any], Tuple[bool, Any]],
**kw) -> AsyncIterator[SubmissionWSResult]:
"""
This method will use ``http`` to create contest specific websocket, and ``ws_handler`` to handle each ws message
:param http: AioHttpHelperInterface
:param contest_id: contest id in the url
:param ws_handler: function to handler messages
:returns: the task which run ws
Examples:
See docstring of :py:func:`codeforces_core.submit.async_submit()`
"""
logger = extract_common_kwargs(**kw).logger
epoch = int(time() * 1000) # s -> ms
html_data = await http.async_get(f"/contest/{contest_id}/my")
cc, pc = extract_channel(html_data, logger)[2:4]
assert cc and pc
ws_url = f"wss://pubsub.codeforces.com/ws/s_{pc}/s_{cc}?_={epoch}&tag=&time=&eventid="
logger.debug(f"pc = {pc}") # 似乎和场次有关, 可能包含别人的?
logger.debug(f"cc = {cc}") # 似乎只会包含自己的
logger.debug(f"ws_url = {ws_url}")
async for data in http.websockets(ws_url, ws_handler):
yield data | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/websocket.py | websocket.py |
from collections import defaultdict
from dataclasses import dataclass, field
from os import path
from typing import Any, List, Tuple
from lxml import html
# from .ui import BLUE, GREEN, RED, redraw
from .util import typedxpath
from .account import is_user_logged_in
from .interfaces.AioHttpHelper import AioHttpHelperInterface
from .kwargs import extract_common_kwargs
from .url import problem_url_parse
async def async_submit(http: AioHttpHelperInterface, contest_id: str, level: str, file_path: str, lang_id: str,
**kw) -> Tuple[str, str]:
"""
This method will use ``http`` to post submit
:param http: AioHttpHelperInterface
:param ws_handler: function to handler messages
:returns: (submission_id, html_text of contest/<contest id>/my )
Examples:
.. code-block::
import asyncio
from codeforces_core.httphelper import HttpHelper
from codeforces_core.account import async_login
from codeforces_core.websocket import create_contest_ws_task
from codeforces_core.submit import async_submit, display_contest_ws
async def demo():
# http = HttpHelper(token_path='/tmp/cache_token', cookie_jar_path='/tmp/cache_cookie_jar')
http = HttpHelper(token_path='', cookie_jar_path='')
await http.open_session()
result = await async_login(http=http, handle='<handle>', password='<password>')
assert(result.success)
print('before submit')
submit_id, resp = await async_submit(http, contest_id='1777', level='F', file_path='F.cpp', lang_id='73')
print('submit id:',submit_id)
# connect websocket before submit sometimes cannot receive message
contest_task = create_contest_ws_task(http, contest_id='1777', ws_handler=display_contest_ws)
print("contest ws created");
try:
result = await asyncio.wait_for(contest_task, timeout=30)
print("ws is done, result:", result)
except asyncio.TimeoutError:
pass
await http.close_session()
asyncio.run(demo())
"""
logger = extract_common_kwargs(**kw).logger
if not contest_id or not level:
logger.error("[!] Invalid contestID or level")
return '', ''
if not path.isfile(file_path):
logger.error("[!] File not found : {}".format(file_path))
return '', ''
token = http.get_tokens()
submit_form = {
'csrf_token': token['csrf'],
'ftaa': token['ftaa'],
'bfaa': token['bfaa'],
'action': 'submitSolutionFormSubmitted',
'submittedProblemIndex': level,
'programTypeId': lang_id,
}
url = '/contest/{}/problem/{}?csrf_token={}'.format(contest_id, level.upper(), token['csrf'])
form = http.create_form(submit_form)
form.add_field('sourceFile', open(file_path, 'rb'), filename=file_path)
resp = await http.async_post(url, form) # 正常是 302 -> https://codeforces.com/contest/<contest id>/my
if not is_user_logged_in(resp):
logger.error("Login required")
return '', resp
doc = html.fromstring(resp)
for e in typedxpath(doc, './/span[@class="error for__sourceFile"]'):
if e.text == 'You have submitted exactly the same code before':
logger.error("[!] " + e.text)
return '', resp
status = parse_submit_status(resp)[0]
assert status.url.split('/')[-1] == level.upper()
return status.id, resp
# TODO move oiterminal code to here use dataclass
@dataclass
class SubmissionPageResult:
id: str = ''
url: str = ''
verdict: str = ''
time_ms: str = ''
mem_bytes: str = ''
# status_url = f'/contest/{contest_id}/my'
# resp = await http.async_get(status_url)
# status = parse_submit_status(resp)
def parse_submit_status(html_page: str) -> List[SubmissionPageResult]:
ret: List[SubmissionPageResult] = []
doc = html.fromstring(html_page)
tr = typedxpath(doc, './/table[@class="status-frame-datatable"]/tr[@data-submission-id]')
for t in tr:
td = t.xpath('.//td')
submission_id = ''.join(td[0].itertext()).strip()
url = td[3].xpath('.//a[@href]')[0].get('href')
verdict = ''.join(td[5].itertext()).strip()
prog_time = td[6].text.strip().replace('\xa0', ' ').split()[0]
prog_mem = td[7].text.strip().replace('\xa0', ' ').split()[0]
ret.append(SubmissionPageResult(id=submission_id, url=url, verdict=verdict, time_ms=prog_time, mem_bytes=prog_mem))
return ret
async def async_fetch_submission_page(http: AioHttpHelperInterface, problem_url: str,
**kw) -> List[SubmissionPageResult]:
contest_id, problem_key = problem_url_parse(problem_url)
# 正常是 302 -> https://codeforces.com/contest/<contest id>/my
html_page = await http.async_get(f'/contest/{contest_id}/my')
result = parse_submit_status(html_page)
return list(filter(lambda o: o.url.endswith(problem_key), result))
@dataclass
class SubmissionWSResult:
source: Any = field(default_factory=lambda: defaultdict(dict))
submit_id: int = 0
contest_id: int = 0
title: str = ''
msg: str = ''
passed: int = 0
testcases: int = 0
ms: int = 0
mem: int = 0
date1: str = ''
date2: str = ''
lang_id: int = 0
# TODO 两个不同的ws(公共的和针对题目的) 似乎返回结构不同
def transform_submission(data: Any) -> SubmissionWSResult:
d = data['text']['d']
return SubmissionWSResult(
source=data,
# [5973095143352889425, ???? data-a
submit_id=d[1], # 200625609,
contest_id=d[2], # 1777,
# 1746206, ??
title=d[4], # 'TESTS',
# None,
msg=d[6], # 'TESTING', 'OK'
passed=d[7], # 0, ??
testcases=d[8], # 81, ?? 在测试过程中 这个值会增长,而d[7]一直是0,直到'OK'
ms=d[9], # 0,
mem=d[10], # 0, Bytes
# 148217099,
# '215020',
date1=d[13], # '04.04.2023 3:21:48',
date2=d[14], # '04.04.2023 3:21:48',
# 2147483647,
lang_id=d[16], # 73,
# 0]
)
# return (end watch?, transform result)
def display_contest_ws(result: Any) -> Tuple[bool, Any]:
parsed_data = transform_submission(result)
print(parsed_data)
if parsed_data.msg != 'TESTING':
return True, parsed_data
return False, parsed_data | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/submit.py | submit.py |
from dataclasses import dataclass
import logging
from typing import Any, List, cast
from lxml import html
from .interfaces.AioHttpHelper import AioHttpHelperInterface
from .kwargs import extract_common_kwargs
from .util import pop_element
def extract_testcases(tags):
ret = []
for i in tags:
pop_element(i.xpath('.//div[@class="title"]')[0])
divs = i.xpath('.//div[@class]')
if len(divs) == 0:
ret.append("\n".join([t.strip() for t in i.itertext()]))
else:
l = ''
prev = divs[0].get('class')
lines = []
for d in divs:
if d.get('class') == prev:
l += d.text + '\n'
else:
lines.append(l)
prev = d.get('class')
l = d.text + '\n'
if l: lines.append(l.strip() + '\n')
ret.append("\n".join(lines))
return ret
@dataclass
class TestCase:
in_data: str
out_data: str
@dataclass
class ProblemInfo:
# testcases: List[TestCase]
title: str
level: str
time_limit_seconds: str
memory_limit_mb: str
desc: str
in_tc: List[str]
out_tc: List[str]
note: str
async def async_problems(http: AioHttpHelperInterface, contest_id: str, **kw) -> List[ProblemInfo]:
"""
This method will use ``http`` to request ``/contest/<contest_id>/problems``, and parse to struct result
:param http: AioHttpHelperInterface
:param contest_id: contest id in url
:returns: parsed structured result
Examples:
.. code-block::
import asyncio
from codeforces_core.httphelper import HttpHelper
from codeforces_core.problems import async_problems
async def demo():
# http = HttpHelper(token_path='/tmp/cache_token', cookie_jar_path='/tmp/cache_cookie_jar')
http = HttpHelper(token_path='', cookie_jar_path='')
await http.open_session()
# you can login before request
result = await async_problems(http=http, contest_id='1779')
print(len(result))
print(result[0])
await http.close_session()
asyncio.run(demo())
"""
logger = extract_common_kwargs(**kw).logger
url = "/contest/{}/problems".format(contest_id)
resp = await http.async_get(url)
doc = html.fromstring(resp)
probs = cast(List[Any], doc.xpath('.//div[@class="problemindexholder"]'))
ret: List[ProblemInfo] = []
for p in probs:
try:
# if alert: alert = alert[0].text
level = p.get('problemindex')
typo = p.xpath('.//div[@class="ttypography"]')[0]
title = pop_element(typo.xpath('.//div[@class="title"]')[0])
time_limit = typo.xpath('.//div[@class="time-limit"]')[0]
time_limit = [t for t in time_limit.itertext()][1].split(' ')[0]
memory_limit = typo.xpath('.//div[@class="memory-limit"]')[0]
memory_limit = [t for t in memory_limit.itertext()][1].split(' ')[0]
desc = typo.xpath('.//div[not(@class)]')
if desc:
desc = '\n'.join([t for t in desc[0].itertext()])
else:
desc = ""
for j in typo.xpath('.//div[@class="section-title"]'):
pop_element(j)
in_spec = typo.xpath('.//div[@class="input-specification"]')
if in_spec:
in_spec = '\n'.join([t for t in in_spec[0].itertext()])
else:
in_spec = ""
out_spec = typo.xpath('.//div[@class="output-specification"]')
if out_spec:
out_spec = '\n'.join([t for t in out_spec[0].itertext()])
else:
out_spec = ""
in_tc = extract_testcases(typo.xpath('.//div[@class="input"]'))
out_tc = extract_testcases(typo.xpath('.//div[@class="output"]'))
note = typo.xpath('.//div[@class="note"]')
if note:
note = '\n'.join([t for t in note[0].itertext()])
ret.append(
ProblemInfo(title=title,
level=level,
time_limit_seconds=time_limit,
memory_limit_mb=memory_limit,
desc=desc,
in_tc=in_tc,
out_tc=out_tc,
note=note))
except Exception as e:
logger.exception(e)
return ret | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/problems.py | problems.py |
from dataclasses import dataclass
from typing import Optional, Tuple
from random import choice
from lxml import html
from lxml.html import HtmlElement
import logging
from .kwargs import extract_common_kwargs
from .util import typedxpath
from .interfaces.AioHttpHelper import AioHttpHelperInterface
default_login_url = "/enter?back=%2F"
@dataclass
class LoginResult:
html: str = ''
csrf: str = ''
ftaa: str = ''
bfaa: str = ''
uc: str = '' # user channel ?
usmc: str = ''
# cc: str = '' # contest channel? TODO remove, 这个和 contest相关, 不应该和login在一起
# pc: str = '' # remove same reason
success: bool = False
def is_user_logged_in(html_data: str) -> bool:
doc = html.fromstring(html_data)
links = typedxpath(doc, './/div[@class="lang-chooser"]/div[not(@style)]/a[@href]')
for m in links:
if m.text.strip() in ["Register", "Enter"]:
return False
return True
async def async_fetch_logged_in(http: AioHttpHelperInterface, login_url=default_login_url, **kw) -> Tuple[bool, str]:
"""
auto update token
return bool(is_logged_in), html_data
"""
logger = extract_common_kwargs(**kw).logger
html_data = await http.async_get(login_url)
uc, usmc, cc, pc, csrf_token, ftaa, bfaa = extract_channel(html_data, logger=logger)
if is_user_logged_in(html_data=html_data):
http.update_tokens(csrf=csrf_token, ftaa=ftaa, bfaa=bfaa, uc=uc, usmc=usmc)
return True, html_data
return False, ''
# No exception, handler inside
def extract_channel(html_data: str,
logger: Optional[logging.Logger] = None) -> Tuple[str, str, str, str, str, str, str]:
doc = html.fromstring(html_data)
def xpath_content(el: HtmlElement, s: str) -> str:
try:
l = typedxpath(el, s)
return l[0].get('content') if len(l) > 0 else ''
except Exception as e:
if logger: logger.exception(e)
return ''
uc = xpath_content(doc, './/meta[@name="uc"]')
usmc = xpath_content(doc, './/meta[@name="usmc"]')
cc = xpath_content(doc, './/meta[@name="cc"]')
pc = xpath_content(doc, './/meta[@name="pc"]')
try:
csrf_token = typedxpath(doc, './/span[@class="csrf-token"]')[0].get('data-csrf')
assert len(csrf_token) == 32, "Invalid CSRF token"
except Exception as e:
if logger: logger.exception(e)
csrf_token = ''
ftaa = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789') for x in range(18)])
# bfaa : Fingerprint2.x64hash128
bfaa = ''.join([choice('0123456789abcdef') for x in range(32)])
return uc, usmc, cc, pc, csrf_token, ftaa, bfaa
# TODO 已经登陆账号A, 再调用登陆账号B是不行的, 这个逻辑应该是由外部控制,调用时应该确保未登录状态
async def async_login(http: AioHttpHelperInterface,
handle: str,
password: str,
login_url=default_login_url,
**kw) -> LoginResult:
"""
This method will use ``http`` for login request, and :py:func:`is_user_logged_in()` for login check
:param handle: Codeforces handle
:param password: Codeforces password
:returns: if it is successful post and logged
Examples:
.. code-block::
import asyncio
from codeforces_core.account import async_login, is_user_logged_in
from codeforces_core.httphelper import HttpHelper
async def demo():
# http = HttpHelper(token_path='/tmp/cache_token', cookie_jar_path='/tmp/cache_cookie_jar')
http = HttpHelper(token_path='', cookie_jar_path='')
await http.open_session()
result = await async_login(http=http, handle='<handle>', password='<password>')
assert(result.success)
html_data = await http.async_get('https://codeforces.com')
assert(is_user_logged_in(html_data))
await http.close_session()
asyncio.run(demo())
"""
logger = extract_common_kwargs(**kw).logger
html_data = await http.async_get(login_url)
csrf_token, ftaa, bfaa = extract_channel(html_data, logger=logger)[4:7]
login_data = {
'csrf_token': csrf_token,
'action': 'enter',
'ftaa': ftaa,
'bfaa': bfaa,
'handleOrEmail': handle,
'password': password,
'remember': 'on',
}
html_data = await http.async_post(login_url, login_data)
# uc, usmc, cc, pc, csrf_token, ftaa, bfaa = extract_channel(html_data)
uc, usmc, cc, pc = extract_channel(html_data, logger=logger)[0:4]
success = False
# if check_login(result.html):
if is_user_logged_in(html_data=html_data):
http.update_tokens(csrf=csrf_token, ftaa=ftaa, bfaa=bfaa, uc=uc, usmc=usmc)
success = True
else:
success = False
return LoginResult(
html=html_data,
csrf=csrf_token,
ftaa=ftaa,
bfaa=bfaa,
uc=uc,
usmc=usmc,
# cc=cc,
# pc=pc,
success=success) | yxr-codeforces-core | /yxr_codeforces_core-0.0.2.2.tar.gz/yxr_codeforces_core-0.0.2.2/codeforces_core/account.py | account.py |
import asyncio
import logging
import os
from typing import Any, Tuple, AsyncIterator
from requests.exceptions import ReadTimeout, ConnectTimeout
from oi_cli2.cli.constant import APP_NAME, CIPHER_KEY, GREEN, DEFAULT
from oi_cli2.model.BaseOj import BaseOj
from oi_cli2.model.ParseProblemResult import ParsedProblemResult
from oi_cli2.model.LangKV import LangKV
from oi_cli2.model.Account import Account
from oi_cli2.model.ProblemMeta import ContestMeta, ProblemMeta, E_STATUS
from oi_cli2.model.Result import SubmissionResult
from oi_cli2.model.TestCase import TestCase
# from oi_cli2.utils.async2sync import iter_over_async
from oi_cli2.utils.enc import AESCipher
from codeforces_core.account import async_login, async_fetch_logged_in
from codeforces_core.contest_list import async_contest_list
from codeforces_core.contest_register import async_register, RegisterResultMsg
from codeforces_core.contest_standing import async_friends_standing
from codeforces_core.contest_meta import async_contest_meta, ProblemMeta as InnerProblemMeta
from codeforces_core.interfaces.AioHttpHelper import AioHttpHelperInterface
from codeforces_core.language import async_language
from codeforces_core.problem import async_problem
from codeforces_core.submit import async_submit, transform_submission, async_fetch_submission_page, SubmissionWSResult, SubmissionPageResult
from codeforces_core.url import pid2url, pid2split, problem_url_parse
from codeforces_core.websocket import create_contest_ws_task_yield
class Codeforces(BaseOj):
def __init__(self, http_util: AioHttpHelperInterface, logger: logging.Logger, account: Account) -> None:
super().__init__()
assert (account is not None)
self._base_url = 'https://codeforces.com/'
self.logger: logging.Logger = logger
self.account: Account = account
self.http = http_util
self.api_sub_logger:logging.Logger = logging.getLogger(f'{APP_NAME}.yxr-cf-core')
async def init(self) -> None:
await self.http.open_session()
async def deinit(self) -> None:
await self.http.close_session()
def pid2url(self, problem_id: str):
return self._base_url[:-1] + pid2url(problem_id)
def pid2file_path(self, problem_id: str):
contest_id, problem_key = pid2split(problem_id)
return os.path.join(contest_id, problem_key)
def problem_by_id(self, problem_id: str) -> ParsedProblemResult:
return self.async_2_sync_session_wrap(lambda: self.async_problem_by_id(problem_id))
async def async_problem_by_id(self, problem_id: str) -> ParsedProblemResult:
contest_id, problem_key = pid2split(problem_id)
self.logger.debug(f'{problem_id} => {contest_id}, {problem_key}')
result = await async_problem(http=self.http, contest_id=contest_id, level=problem_key)
return ParsedProblemResult(
status=ParsedProblemResult.Status.NOTVIS, # TODO for show progress
title=result.title,
test_cases=list(map(lambda x: TestCase(in_data=x.in_data, out_data=x.out_data), result.test_cases)),
id=problem_id,
oj=Codeforces.__name__,
description=result.description,
time_limit=result.time_limit,
mem_limit=result.mem_limit,
url=self.pid2url(problem_id),
html=result.html,
file_path=self.pid2file_path(problem_id))
def problem(self, problem: ProblemMeta) -> ParsedProblemResult:
return self.problem_by_id(problem.contest_id + problem.id)
async def async_problem(self, problem: ProblemMeta) -> ParsedProblemResult:
return await self.async_problem_by_id(problem.contest_id + problem.id)
def login_website(self, force=False) -> bool: # return successful
return self.async_2_sync_session_wrap(lambda: self.async_login_website(force=force))
# Force: true/false login whatever login before
# TODO 逻辑还是有点问题,要实现支持
# - 未登录 => 登录
# - 已登录 => 不操作
# 强制:
# - 未登录 => 登录
# - 已登录 => 取消cookies等 强制登录
async def async_login_website(self, force=False) -> bool: # return successful
if not force:
# try using cookies
self.logger.info(f"{GREEN}Checking Log in {DEFAULT}")
try:
if await self.async_is_login():
self.logger.info(f"{GREEN}{self.account.account} is Logged in {Codeforces.__name__}{DEFAULT}")
return True
except (ReadTimeout, ConnectTimeout) as e:
self.logger.error(f'Http Timeout[{type(e).__name__}]: {e.request.url}')
except Exception as e:
self.logger.exception(e)
try:
self.logger.debug(f"{GREEN}{self.account.account} Logining {Codeforces.__name__}{DEFAULT}")
return (await async_login(http=self.http,
handle=self.account.account,
password=AESCipher(CIPHER_KEY).decrypt(self.account.password))).success
except (ReadTimeout, ConnectTimeout) as e:
self.logger.error(f'Http Timeout[{type(e).__name__}]: {e.request.url}')
except Exception as e:
self.logger.exception(e)
return False
def _is_login(self) -> bool:
return self.async_2_sync_session_wrap(lambda: self.async_is_login())
async def async_is_login(self) -> bool:
ok, html_data = await async_fetch_logged_in(self.http)
return ok
def reg_contest(self, contest_id: str) -> bool:
return self.async_2_sync_session_wrap(lambda: self.async_reg_contest(contest_id))
async def async_reg_contest(self, contest_id: str) -> bool:
result = await async_register(http=self.http, contest_id=contest_id)
return result.msg == RegisterResultMsg.AlreadyRegistered or result.msg == RegisterResultMsg.HaveBeenRegistered
def submit_code(self, problem_url: str, language_id: str, code_path: str) -> bool:
# https://codeforces.com/contest/1740/problem/G
contest_id, problem_key = problem_url_parse(problem_url)
sid = contest_id + problem_key
return self.async_2_sync_session_wrap(lambda: self.async_submit_code(sid, language_id, code_path))
# TODO move sid out as just Syntactic sugar
async def async_submit_code(self, sid: str, language_id: str, code_path: str) -> bool:
if not await self.async_login_website():
raise Exception('Login Failed')
contest_id, problem_key = pid2split(sid)
self.logger.debug(f'{contest_id},{problem_key}')
submit_id, resp = await async_submit(http=self.http,
contest_id=contest_id,
level=problem_key,
file_path=code_path,
lang_id=language_id,
logger=self.api_sub_logger)
self.logger.debug(f'submit_id = {submit_id}')
return bool(submit_id)
async def async_get_result_yield(self, problem_url: str, time_gap: float = 2) -> AsyncIterator[SubmissionResult]:
contest_id, problem_key = problem_url_parse(problem_url)
# TODO move more parse inside codeforces-core ? cf是出错中断形式,状态+数量
def page_result_transform(res: SubmissionPageResult) -> SubmissionResult:
self.logger.debug('page res:'+str(res))
cur_status = SubmissionResult.Status.PENDING
if res.verdict.startswith('Running'):
cur_status = SubmissionResult.Status.RUNNING
elif res.verdict.startswith('In queue'):
cur_status = SubmissionResult.Status.RUNNING
elif res.verdict.startswith('Accepted'):
cur_status = SubmissionResult.Status.AC
elif res.verdict.startswith('Pretests passed'):
cur_status = SubmissionResult.Status.AC
elif res.verdict.startswith('Wrong answer'):
cur_status = SubmissionResult.Status.WA
elif res.verdict.startswith('Time limit exceeded'):
cur_status = SubmissionResult.Status.TLE
elif res.verdict.startswith('Runtime error'): # Runtime error on pretest 2
cur_status = SubmissionResult.Status.RE
else:
self.logger.error('NOT HANDLE PAGE:'+str(res.verdict))
if res.url.startswith('/'):
res.url = 'https://codeforces.com' + res.url
return SubmissionResult(id=res.id,
cur_status=cur_status,
time_note=res.time_ms + ' ms',
mem_note=str(int(res.mem_bytes)/1000) + ' kb',
url=res.url,
msg_txt=res.verdict)
# TODO move more parse inside codeforces-core ?
def ws_result_transform(res: SubmissionWSResult) -> SubmissionResult:
cur_status = SubmissionResult.Status.PENDING
if res.msg == 'TESTING':
cur_status = SubmissionResult.Status.RUNNING
elif res.msg == 'OK':
cur_status = SubmissionResult.Status.AC
elif res.msg == 'WRONG_ANSWER':
cur_status = SubmissionResult.Status.WA
else:
self.logger.error('NOT HANDLE WS:' + str(res.msg))
msg_txt = str(res.testcases)
if cur_status in [SubmissionResult.Status.AC, SubmissionResult.Status.WA]:
msg_txt = f'{res.passed}/{res.testcases}'
return SubmissionResult(
id=str(res.submit_id),
cur_status=cur_status,
time_note=str(res.ms) + ' ms',
mem_note=str(int(res.mem)/1000) + ' kb',
url=f'https://codeforces.com/contest/{res.contest_id}/submission/{res.submit_id}',
msg_txt=msg_txt,
)
# TODO visit page without ws first
results = await async_fetch_submission_page(http=self.http, problem_url=problem_url,logger=self.api_sub_logger)
fix_submit_id = ''
if len(results) > 0:
result = page_result_transform(results[0])
fix_submit_id = result.id
self.logger.debug(f"fix submit_id = {fix_submit_id}");
yield result
if result.cur_status not in [SubmissionResult.Status.PENDING, SubmissionResult.Status.RUNNING]:
return
self.logger.debug('after page result, enter ws result')
# return (end watch?, transform result)
def custom_handler(result: Any) -> Tuple[bool, SubmissionWSResult]:
parsed_data = transform_submission(result)
if fix_submit_id and fix_submit_id != parsed_data.contest_id: # submit id not match, dont end watch ws
return False, parsed_data
if parsed_data.msg != 'TESTING':
return True, parsed_data
return False, parsed_data
# TODO add timeout for ws
# TODO 可能有别人的? pc/cc?
async for wsresult in create_contest_ws_task_yield(http=self.http, contest_id=contest_id, ws_handler=custom_handler,logger=self.api_sub_logger):
self.logger.debug('ws res:'+str(wsresult))
if fix_submit_id and wsresult.submit_id != fix_submit_id:
self.logger.debug('[skip]fixed id not match! continue')
continue
data = ws_result_transform(wsresult)
yield data
if data.cur_status not in [SubmissionResult.Status.PENDING, SubmissionResult.Status.RUNNING]:
return
results = await async_fetch_submission_page(http=self.http, problem_url=problem_url)
assert len(results) > 0
yield page_result_transform(results[0])
def get_language(self) -> LangKV:
return self.async_2_sync_session_wrap(lambda: self.async_get_language())
async def async_get_language(self) -> LangKV:
await self.async_login_website()
res = await async_language(self.http)
ret: LangKV = {}
for item in res:
ret[item.value] = item.text
return ret
@staticmethod
def support_contest() -> bool:
return True
def print_contest_list(self) -> bool:
return self.async_2_sync_session_wrap(lambda: self.async_print_contest_list())
async def async_print_contest_list(self) -> bool:
await self.async_login_website()
result = await async_contest_list(http=self.http)
from .contestList import printData
printData(result)
return True
def get_contest_meta(self, contest_id: str) -> ContestMeta:
return self.async_2_sync_session_wrap(lambda: self.async_get_contest_meta(contest_id=contest_id))
async def async_get_contest_meta(self, contest_id: str) -> ContestMeta:
await self.async_login_website()
result = await async_contest_meta(http=self.http, contest_id=contest_id)
def transform(problem: InnerProblemMeta) -> ProblemMeta:
return ProblemMeta(
id=problem.id,
url=problem.url,
name=problem.name,
passed=problem.passed, # number of passed submission in contest
score=0,
status=E_STATUS(problem.status), # ???? TODO
time_limit_msec=problem.time_limit_msec, # ms
memory_limit_kb=problem.memory_limit_kb, # mb
contest_id=problem.contest_id,
)
return ContestMeta(id=contest_id, url=result.url, problems=list(map(lambda o: transform(o), result.problems)))
def async_2_sync_session_wrap(self, fn):
async def task():
await self.http.open_session()
result = await fn()
await self.http.close_session()
return result
return asyncio.run(task())
def print_friends_standing(self, cid: str) -> None:
return self.async_2_sync_session_wrap(lambda: self.async_print_friends_standing(cid))
async def async_print_friends_standing(self, cid: str) -> None:
result = await async_friends_standing(http=self.http, contest_id=cid)
from .standing import printData
printData(result, title=f"Friends standing {result.url}", handle=self.account.account) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/custom/Codeforces/Codeforces.py | Codeforces.py |
from typing import List
from oi_cli2.model.Analyze import Analyze
from oi_cli2.utils.analyze import AnalyzeManager
# TODO 动态注册不同平台
def analyze_list(tm: AnalyzeManager):
analyze_list: List[Analyze] = tm.get_list()
for i in range(len(analyze_list)):
if i == 0 or analyze_list[i].platform != analyze_list[i - 1].platform:
print(analyze_list[i].platform)
mark = ' '
if analyze_list[i].default:
mark = '*'
print(f'\t {mark} {analyze_list[i].template_alias} {analyze_list[i].submit_lang}')
if len(analyze_list) == 0:
print("Analyze list is empty.")
def analyze_new(tm: AnalyzeManager):
platforms = ['Codeforces', 'AtCoder']
for i in range(len(platforms)):
print(f"{i + 1}) {platforms[i]}")
try:
index = int(input("> "))
except Exception:
print("input error")
return
if 0 < index <= len(platforms):
platform = platforms[index - 1]
else:
print("input error")
return
submit_lang = input('submit_lang:')
template_alias = input('template_alias:')
class_path = input('class_path:')
tm.add_analyze(platform, submit_lang, template_alias, class_path)
def analyze_modify(tm: AnalyzeManager):
analyze_list = tm.get_list()
for i in range(len(analyze_list)):
if i == 0 or analyze_list[i].platform != analyze_list[i - 1].platform:
print(analyze_list[i].platform)
mark = ' '
item = analyze_list[i]
if item.default:
mark = '*'
print(f'\t {mark} {i}) {item.template_alias}')
print(f'\t\t submit_lang: {item.submit_lang}')
print(f'\t\t class_path: {item.class_path}')
try:
acc_index = int(input("> "))
except Exception:
print("input error")
if acc_index < 0 or acc_index >= len(analyze_list):
print("input error")
return
print("1) Change Analyze template")
print("2) Change Analyze submit language")
print("3) Change Analyze class path")
print("4) Set as Default")
print("5) Delete")
try:
index = int(input("> "))
except (Exception):
print("input error")
if index == 1:
tm.modify_template_alias(acc_index, input("Enter template_alias:"))
elif index == 2:
tm.modify_submit_lang(acc_index, input("Enter submit lang:"))
elif index == 3:
tm.modify_class_path(acc_index, input("Enter class path:"))
elif index == 4:
tm.set_default(acc_index)
elif index == 5:
tm.delete_analyze(acc_index)
def analyze(db):
print("1) Analyze List")
print("2) New Analyze")
print("3) Modify Analyze")
try:
index = int(input("> "))
except (Exception):
print("input error")
tm = AnalyzeManager(db)
if index == 1:
analyze_list(tm)
elif index == 2:
analyze_new(tm)
elif index == 3:
analyze_modify(tm)
else:
print("input error") | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/analyze.py | analyze.py |
import logging
import os
import sys
import json
from typing import List, Type
from oi_cli2.cli.constant import CIPHER_KEY, OT_FOLDER, USER_CONFIG_FILE
from oi_cli2.model.BaseOj import BaseOj
from oi_cli2.model.ParseProblemResult import ParsedProblemResult
from oi_cli2.model.ProblemMeta import ProblemMeta
from oi_cli2.model.TestCase import TestCase
from oi_cli2.model.FolderState import FolderState
from oi_cli2.utils.FileUtil import FileUtil
from oi_cli2.utils.configFolder import ConfigFolder
# file_util can be any thing , everything is file
from oi_cli2.utils.template import TemplateManager
from oi_cli2.utils.force_symlink import force_symlink
# def createDir(oj: BaseOj, problem_id: str, problem: ProblemMeta, file_util: Type[FileUtil], logger,
# template_manager: TemplateManager, config_folder: ConfigFolder):
# template = template_manager.get_platform_default(type(oj).__name__)
# if template is None:
# print(type(oj).__name__ + ' has no default template, run ./oiTerminal.py config first')
# logger.warn(f'{type(oj).__name__} parse problem when no template set')
# return None
#
# result = oj.problem(problem_id)
# test_cases: List[TestCase] = result.test_cases
# directory = config_folder.get_file_path(os.path.join('dist', type(oj).__name__, result.file_path))
#
# for i in range(len(test_cases)):
# file_util.write(config_folder.get_file_path(os.path.join(directory, f'in.{i}')), test_cases[i].in_data)
# file_util.write(config_folder.get_file_path(os.path.join(directory, f'out.{i}')), test_cases[i].out_data)
#
# # if code file exist not cover code
# if not os.path.exists(config_folder.get_file_path(os.path.join(directory, os.path.basename(template.path)))):
# file_util.copy(config_folder.get_file_path(template.path),
# config_folder.get_file_path(os.path.join(directory, os.path.basename(template.path))))
# # TODO 生成state.json ( 提供 自定义字段)
# TEST_PY = 'test.py'
# SUBMIT_PY = 'submit.py'
# STATE_FILE = 'state.json'
# # symlink test.py submit.py
# RELATIVE_CLI_FOLDER = '../../../../'
# force_symlink(os.path.join(RELATIVE_CLI_FOLDER, TEST_PY),
# config_folder.get_file_path(os.path.join(directory, TEST_PY)))
# force_symlink(os.path.join(RELATIVE_CLI_FOLDER, SUBMIT_PY),
# config_folder.get_file_path(os.path.join(directory, SUBMIT_PY)))
#
# # TODO provide more info, like single test and
# # generate state.json
# folder_state = FolderState(oj=type(oj).__name__,
# sid=problem_id,
# template_alias=template.alias,
# up_lang=template.uplang) # TODO get data from analyzer
# with open(config_folder.get_file_path(os.path.join(directory, STATE_FILE)), "w") as statejson:
# json.dump(folder_state.__dict__, statejson)
# statejson.close()
#
# return directory
# def main(argv: List[str], logger: logging, folder=OT_FOLDER):
# config_folder = ConfigFolder(folder)
# user_config_path = config_folder.get_config_file_path(USER_CONFIG_FILE)
#
# http_util = HttpUtil(logger=logger)
# dbIns = JsonFileDB(file_path=user_config_path, logger=logger)
# template_manager = TemplateManager(db=dbIns)
# account_manager = AccountManager(db=dbIns, cipher=AESCipher(CIPHER_KEY))
#
# if argv[0] == Platforms.codeforces:
# try:
# from oi_cli2.custom.Codeforces.Codeforces import Codeforces
# oj: BaseOj = Codeforces(http_util=http_util,
# logger=logger,
# account=account_manager.get_default_account(Codeforces.__name__),
# html_tag=HtmlTag(http_util))
# except Exception as e:
# logger.exception(e)
# raise e
# else:
# raise Exception('Unknown Platform')
#
# directory = createDir(
# oj=oj,
# problem_id=argv[1],
# ProblemMeta=None, # TODO support
# file_util=FileUtil,
# logger=logger,
# template_manager=template_manager,
# config_folder=config_folder)
#
# if directory is None:
# return None
# # TODO switch directory
# print(directory)
#
# start_terminal(config_folder.get_file_path(os.path.join(directory)))
# if __name__ == '__main__':
# main(sys.argv, folder=OT_FOLDER) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/problem.py | problem.py |
import asyncio
import json
import logging
import os
import traceback
from typing import Tuple, cast
import click
from rich.console import Console
from rich.text import Text
from rich.table import Table
from rich.live import Live
from oi_cli2.cli.adaptor.ojman import OJManager
from oi_cli2.cli.constant import FETCH_RESULT_INTERVAL, STATE_FILE
from oi_cli2.core.DI import DI_ACCMAN, DI_LOGGER, DI_TEMPMAN
from oi_cli2.model.Account import Account
from oi_cli2.model.BaseOj import BaseOj
from oi_cli2.model.FolderState import FolderState
from oi_cli2.model.Result import SubmissionResult, status_string
from oi_cli2.utils.Provider2 import Provider2
from oi_cli2.utils.account import AccountManager
from oi_cli2.utils.template import TemplateManager
console = Console(color_system='256', style=None)
def generate_submission_table(res: SubmissionResult) -> Table:
"""Make a new submission table."""
table = Table().grid()
table.add_column(min_width=12)
table.add_column()
table.add_row("Result ID", f"{res.id}")
# "[red]ERROR" if value < 50 else "[green]SUCCESS"
table.add_row("Status", Text.from_ansi(f"{status_string(res)}"))
table.add_row("Time", f"{res.time_note}")
table.add_row("Memory", f"{res.mem_note}")
if res.msg_txt:
table.add_row("MSG", f"{res.msg_txt}")
if res.url:
table.add_row("Url", f"{res.url}")
return table
def watch_result(oj: BaseOj, problem_url: str) -> SubmissionResult:
return asyncio.run(async_watch_result(oj, problem_url))
async def async_watch_result(oj: BaseOj, problem_url: str) -> SubmissionResult:
await oj.init()
try:
result = SubmissionResult()
with Live(auto_refresh=False) as live:
async for result in oj.async_get_result_yield(problem_url, time_gap=FETCH_RESULT_INTERVAL):
live.update(generate_submission_table(result), refresh=True)
except Exception as e:
logger: logging.Logger = Provider2().get(DI_LOGGER)
logger.exception(e)
await oj.deinit()
return result
def submit_parser() -> Tuple[str, str, str, Account, str, str]:
logger: logging.Logger = Provider2().get(DI_LOGGER)
am: AccountManager = Provider2().get(DI_ACCMAN)
tm: TemplateManager = Provider2().get(DI_TEMPMAN)
# get lang config
if not os.path.isfile(STATE_FILE):
raise Exception(f'STATE_FILE [{STATE_FILE}] NOT EXIST!')
state_oj = FolderState()
with open(STATE_FILE) as f:
state_oj.__dict__ = json.load(f)
oj = state_oj.oj
up_lang = cast(str, state_oj.up_lang)
template = tm.get_template_by_name(state_oj.oj, state_oj.template_alias)
if template is None:
raise Exception(f'Template not found by [{state_oj.oj},{state_oj.template_alias}]')
source_file_name = os.path.basename(template.path)
code_file = os.path.join('.', source_file_name)
if not os.path.isfile(code_file):
raise Exception(f"code_file [{code_file}] NOT EXIST!")
account = am.get_default_account(oj)
return oj, state_oj.id, up_lang, account, code_file, state_oj.problem_url
@click.command(name="submit")
def submit_command() -> None:
try:
logger: logging.Logger = Provider2().get(DI_LOGGER)
platform, sid, up_lang, account, code_path, problem_url = submit_parser()
table = Table().grid()
table.add_column(min_width=12)
table.add_column()
table.add_row("OJ", f"{platform}")
table.add_row("Account", f"{account.account}")
table.add_row("Problem ID", f"{sid}")
table.add_row("up_lang", f"{up_lang}")
console.print(table)
try:
oj: BaseOj = OJManager.createOj(platform=platform, account=account, provider=Provider2())
except Exception as e:
logger.exception(e)
raise e
if not oj.submit_code(problem_url=problem_url, language_id=up_lang, code_path=code_path):
raise Exception(f'submit failed, account={account.account}')
console.print("[green]Submitted")
watch_result(oj, problem_url)
except KeyboardInterrupt:
logger.info("Interrupt by user")
except Exception:
logger.error(traceback.format_exc())
@click.command(name="result")
def result_command() -> None:
logger: logging.Logger = Provider2().get(DI_LOGGER)
platform, sid, up_lang, account, code_path, problem_url = submit_parser()
table = Table().grid()
table.add_column(min_width=12)
table.add_column()
table.add_row("OJ", f"{platform}")
table.add_row("Account", f"{account.account}")
table.add_row("Problem ID", f"{sid}")
table.add_row("up_lang", f"{up_lang}")
table.add_row("Problem Url", f"{problem_url}")
console.print(table)
try:
oj: BaseOj = OJManager.createOj(platform=platform, account=account, provider=Provider2())
except Exception as e:
logger.exception(e)
raise e
logger.debug(problem_url)
watch_result(oj, problem_url) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/submit.py | submit.py |
import logging
from typing import List
import click
from oi_cli2.core.DI import DI_DB, DI_LOGGER, DI_TEMPMAN
from oi_cli2.model.Template import Template
from oi_cli2.utils.template import TemplateManager
@click.group()
@click.pass_context
def template(ctx):
"""Manage templates"""
db = ctx.obj[DI_DB]
ctx.obj[DI_TEMPMAN] = TemplateManager(db)
@template.command(name='list')
@click.option('-d', '--detail', is_flag=True, help="display config detail")
@click.pass_context
def list_command(ctx, detail: bool):
"""List all templates"""
tm: TemplateManager = ctx.obj[DI_TEMPMAN]
temp_list: List[Template] = tm.get_list()
for i in range(len(temp_list)):
if i == 0 or temp_list[i].platform != temp_list[i - 1].platform:
print(temp_list[i].platform)
mark = ' '
if temp_list[i].default:
mark = '*'
print(f' {mark} {temp_list[i].alias}')
if detail:
print(f' \tCompile command : {temp_list[i].compilation}')
print(f' \tExecute command : {temp_list[i].execute}')
print(f' \tTemplate file Path : {temp_list[i].path}')
print(f' \tUpload language id : {temp_list[i].uplang}')
if len(temp_list) == 0:
print("Template list is empty.")
@template.command()
@click.pass_context
@click.argument('platform')
@click.argument('name')
@click.argument('path')
@click.argument('compile')
@click.argument('execute')
@click.argument('langid')
def new(ctx, platform, name, path, compile, execute, langid) -> None:
"""Create new template
PLATFORM Platform Name, (AtCoder,Codeforces)
NAME Custom template name
PATH Your template file path
COMPILE Compile command
EXECUTE Execute command
LANGID Upload language id(`oi lang <platform>`)"""
tm: TemplateManager = ctx.obj[DI_TEMPMAN]
logger: logging.Logger = ctx.obj[DI_LOGGER]
logger.debug(f"{platform}, {name}, {path}, {compile}, {execute}, {langid}")
tm.add_template(platform=platform, alias=name, path=path, compilation=compile, execute=execute, uplang=langid)
@template.command()
@click.pass_context
@click.argument('platform')
@click.argument('name')
def delete(ctx, platform, name) -> None:
"""Delete a specific template"""
tm: TemplateManager = ctx.obj[DI_TEMPMAN]
tm.delete_template(platform, name)
@template.command()
@click.pass_context
@click.argument('platform')
@click.argument('name')
@click.option('-n', '--name', 'newname', help='Change template name')
@click.option('-p', '--path', help='Change template path')
@click.option('-c', '--compile', help='Change compile command')
@click.option('-e', '--execute', help='Change execute command')
@click.option('-l', '--langid', help='Change upload language id')
@click.option('-d', '--default', is_flag=True, help='Set as default template')
def modify(ctx, platform, name, newname, path, compile, execute, langid, default) -> None:
"""Update current template
PLATFORM Platform Name, (AtCoder,Codeforces)
NAME Custom template name
"""
tm: TemplateManager = ctx.obj[DI_TEMPMAN]
logger: logging.Logger = ctx.obj[DI_LOGGER]
logger.debug(f"{platform}, {name}, {path}, {compile}, {execute}, {langid},{default}")
tm.update_template(platform=platform,
alias=name,
newalias=newname,
path=path,
compilation=compile,
execute=execute,
uplang=langid,
default=default) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/template.py | template.py |
import click
import getpass
import logging
from rich.console import Console
from oi_cli2.cli.adaptor.ojman import OJManager
from oi_cli2.core.DI import DI_ACCMAN, DI_LOGGER, DI_PROVIDER
from oi_cli2.model.BaseOj import BaseOj
from oi_cli2.utils.account import AccountManager
console = Console(color_system='256', style=None)
@click.group()
@click.pass_context
def account(ctx):
"""Manage accounts"""
from oi_cli2.utils.Provider2 import Provider2
ctx.obj[DI_PROVIDER] = Provider2()
@account.command(name='list')
@click.pass_context
def list_command(ctx) -> None:
"""List all account"""
provider = ctx.obj[DI_PROVIDER]
am: AccountManager = provider.get(DI_ACCMAN)
acc_list = am.get_list()
for i in range(len(acc_list)):
if i == 0 or acc_list[i].platform != acc_list[i - 1].platform:
print(acc_list[i].platform)
mark = ' '
if acc_list[i].default:
mark = '*'
print(f' {mark} {acc_list[i].account}')
if len(acc_list) == 0:
print("Account List is empty.")
@account.command()
@click.argument("platform")
@click.argument("account")
@click.option("-d", "--default", "default_", is_flag=True, help='Set account as default account in the oj platform.')
@click.pass_context
def new(ctx, platform, account, default_) -> None:
"""Create new account
PLATFORM Platform Name, (AtCoder,Codeforces)
ACCOUNT Account name
"""
provider = ctx.obj[DI_PROVIDER]
logger: logging.Logger = provider.get(DI_LOGGER)
am: AccountManager = provider.get(DI_ACCMAN)
password = getpass.getpass("Password:")
if not am.new(platform=platform, account=account, password=password, default=default_):
logger.error('New Account Failed.')
else:
logger.info('Success')
# TODO support password in arg???
@account.command()
@click.argument("platform")
@click.argument("account")
@click.option("-p", "--password", "changepassword", is_flag=True, help='Change account password.')
@click.option("-d", "--default", "default_", is_flag=True, help='Set account as default account in the oj platform.')
@click.pass_context
def modify(ctx, platform, account, changepassword: bool, default_):
"""Modify a specific account default status or change password
PLATFORM Platform Name, (AtCoder,Codeforces)
ACCOUNT Account name
"""
provider = ctx.obj[DI_PROVIDER]
logger: logging.Logger = provider.get(DI_LOGGER)
am: AccountManager = provider.get(DI_ACCMAN)
if changepassword:
password = getpass.getpass("Password:")
else:
password = None
if not am.modify(platform=platform, account=account, password=password, default=default_):
logger.error('Modify Account Failed.')
else:
logger.info('Success Modify')
@account.command()
@click.argument("platform")
@click.argument("account")
@click.pass_context
def delete(ctx, platform, account) -> bool:
"""Delete a specific account
PLATFORM Platform Name, (AtCoder,Codeforces)
ACCOUNT Account name
"""
provider = ctx.obj[DI_PROVIDER]
logger: logging.Logger = provider.get(DI_LOGGER)
am: AccountManager = provider.get(DI_ACCMAN)
if not am.delete(platform=platform, account=account):
logger.error("Account not found")
return False
else:
logger.info("Success Delete")
return True
@account.command(name="test")
@click.argument("platform")
@click.argument("account")
@click.pass_context
def valid_account(ctx, platform: str, account: str) -> bool:
"""Test account login
PLATFORM Platform Name, (AtCoder,Codeforces)
ACCOUNT Account name
"""
provider = ctx.obj[DI_PROVIDER]
logger: logging.Logger = provider.get(DI_LOGGER)
logger.debug(f'platform:{platform}')
am: AccountManager = provider.get(DI_ACCMAN)
acc = am.get_account(platform=platform, account=account)
if acc is None:
console.print(f'[red bold]Account [{account}] not found')
return False
try:
oj: BaseOj = OJManager.createOj(platform=platform, account=acc, provider=provider)
except Exception as e:
logger.exception(e)
raise e
console.print(f"[green bold]{platform} Logging with {acc.account} ...")
ok = oj.login_website(force=True)
if ok:
console.print(f"[green bold]Successful login.")
else:
console.print(f"[red bold]Login failed.")
return ok | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/account.py | account.py |
import logging
import time
from typing import List, AsyncIterator
from rich.console import Console
from rich.table import Table
from rich.style import Style
from oi_cli2.model.LangKV import LangKV
from ...cli.constant import CIPHER_KEY
from ...model.Account import Account
from ...model.BaseOj import BaseOj
from ...model.ParseProblemResult import ParsedProblemResult
from ...model.ProblemMeta import ContestMeta, ProblemMeta
from ...model.Result import SubmissionResult
from ...model.TestCase import TestCase
from ...utils.HtmlTag import HtmlTag
from ...utils.HttpUtil import HttpUtil
from ...utils.HttpUtilCookiesHelper import HttpUtilCookiesHelper
from ...utils.Provider2 import Provider2
from ...utils.enc import AESCipher
from ...abstract.HtmlTagAbstract import HtmlTagAbstract
from ...core.DI import DI_ACCMAN, DI_HTTP, DI_LOGGER, DI_PROVIDER
from ac_core.auth import fetch_login, is_logged_in
from ac_core.contest import fetch_tasks_meta, ParserProblemResult, fetch_standing
from ac_core.problem import parse_task
from ac_core.submit import fetch_submit
from ac_core.interfaces.HttpUtil import HttpRespInterface
from ac_core.result import fetch_result, SubmissionResult as CORE_SUB_RES
from ac_core.language import fetch_language
console = Console(color_system='256', style=None)
def s2str(sec: int) -> str:
if sec < 60:
return str(sec)
if sec < 60 * 60:
return f"{sec//60}:{(sec%60):02d}"
return f"{sec // 60 // 60}:{((sec // 60) % 60):02d}:{(sec % 60):02d}"
def transform_Result(res: CORE_SUB_RES) -> SubmissionResult:
mapdict = {
CORE_SUB_RES.Status.AC: SubmissionResult.Status.AC,
CORE_SUB_RES.Status.PENDING: SubmissionResult.Status.PENDING,
CORE_SUB_RES.Status.RUNNING: SubmissionResult.Status.RUNNING,
CORE_SUB_RES.Status.INIT: SubmissionResult.Status.PENDING,
CORE_SUB_RES.Status.RE: SubmissionResult.Status.RE,
CORE_SUB_RES.Status.TLE: SubmissionResult.Status.TLE,
CORE_SUB_RES.Status.WA: SubmissionResult.Status.WA,
CORE_SUB_RES.Status.CE: SubmissionResult.Status.CE,
}
if res.status in list(mapdict.keys()):
status = mapdict[res.status]
else:
logger: logging.Logger = Provider2().get(DI_LOGGER)
logger.error(f'Unknown status {res.status}')
status = SubmissionResult.Status.UNKNOWN
return SubmissionResult(
id=res.id,
cur_status=status,
quick_key=res.url, # for refetch result
url=res.url, # TODO change to webpage url
state_note=str(res.score),
time_note=str(res.time_cost_ms / 1000) + ' ms',
mem_note=str(res.mem_cost_kb) + ' kb',
msg_txt=res.msg_txt,
)
class AtCoder(BaseOj):
def __init__(self, http_util: HttpUtil, logger: logging.Logger, account: Account, html_tag: HtmlTagAbstract) -> None:
super().__init__()
assert (account is not None)
self._base_url = 'https://atcoder.jp/'
self.logger: logging.Logger = logger
self.html_tag = html_tag
self.account: Account = account
self.http_util = http_util
HttpUtilCookiesHelper.load_cookie(provider=Provider2(), platform=AtCoder.__name__, account=account.account)
def login_website(self, force: bool = False) -> bool:
if force or not is_logged_in(self.http_util): # need login
if force:
self.http_util._request.cookies.clear()
ok = fetch_login(self.http_util, self.account.account, AESCipher(CIPHER_KEY).decrypt(self.account.password))
# if ok:
# always save cookie
HttpUtilCookiesHelper.save_cookie(provider=Provider2(), platform=AtCoder.__name__, account=self.account.account)
return ok
return True
async def async_get_contest_meta(self, cid: str) -> ContestMeta:
return self.get_contest_meta(cid)
def get_contest_meta(self, cid: str) -> ContestMeta:
self.login_website()
res = fetch_tasks_meta(self.http_util, cid)
def transform(pm: ParserProblemResult) -> ProblemMeta:
return ProblemMeta(id=pm.id,
url=pm.url,
name=pm.name,
contest_id=cid,
memory_limit_kb=pm.memory_limit_kb,
time_limit_msec=pm.time_limit_msec)
return ContestMeta(id=cid, url=res.url, problems=[transform(pm) for pm in res.problems])
async def async_problem(self, problem: ProblemMeta) -> ParsedProblemResult:
return self.problem(problem)
# Care !! in Atcoder may arc058 C = https://atcoder.jp/contests/arc058/tasks/arc058_a
def problem(self, pm: ProblemMeta) -> ParsedProblemResult:
html = self.http_util.get(pm.url).text
res = parse_task(html=html)
return ParsedProblemResult(
# status=: Status = Status.NOTVI STODO
id=res.id,
title=pm.name,
test_cases=[TestCase(in_data=o.input, out_data=o.output) for o in res.tests],
oj=AtCoder.__name__,
# description=res.id,
time_limit=str(pm.time_limit_msec),
mem_limit=str(pm.memory_limit_kb),
url=res.url,
)
def submit_code(self, problem_url: str, language_id: str, code_path: str) -> HttpRespInterface:
if not self.login_website():
raise Exception('Login Failed')
return fetch_submit(self.http_util,
problem_url=problem_url,
lang_id=language_id,
source_code=open(code_path, 'r').read())
async def async_get_result_yield(self, problem_url: str, time_gap: float = 1) -> AsyncIterator[SubmissionResult]:
while True:
res = transform_Result(fetch_result(self.http_util, problem_url))
yield res
if res.cur_status not in [SubmissionResult.Status.PENDING, SubmissionResult.Status.RUNNING]:
break
time.sleep(time_gap)
# TODO fav control ?
def print_friends_standing(self, cid: str) -> None:
if not self.login_website():
raise Exception('Login Failed')
standing = fetch_standing(self.http_util, contest_id=cid)
table = Table(title=f"Binary standing {cid}")
table.add_column("rank", style="cyan")
table.add_column("handle")
for task in standing.TaskInfo:
table.add_column(task.Assignment)
for i in range(len(standing.StandingsData)):
row: List[str] = []
d = standing.StandingsData[i]
is_self = d.UserName == self.account.account
if is_self or (i & (i + 1)) == 0: # care 0-index
row.append(str(d.Rank))
row.append(d.UserScreenName)
for task in standing.TaskInfo:
if task.TaskScreenName in d.TaskResults:
# score = d.TaskResults[task.TaskScreenName].Score // 100
penalty = d.TaskResults[task.TaskScreenName].Penalty
elapsed_s = d.TaskResults[task.TaskScreenName].Elapsed // 1000 // 1000 // 1000
row.append(f"+{penalty}\n{s2str(elapsed_s)}")
else:
row.append("")
table.add_row(*row, style=Style(bgcolor="dark_green" if is_self else None))
if is_self:
break
console.print(table)
def get_language(self) -> LangKV:
results = fetch_language(self.http_util)
ret: LangKV = {}
for item in results:
ret[item.value] = item.text
return ret
def AtcoderGen(account: Account, provider: Provider2) -> BaseOj:
http_util = provider.get(DI_HTTP)
logger = provider.get(DI_LOGGER)
oj: BaseOj = AtCoder(http_util=http_util, logger=logger, account=account, html_tag=HtmlTag(http_util))
return oj | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/cli/adaptor/AtCoderAdaptor.py | AtCoderAdaptor.py |
from typing import List
# Analyze 是紧密依赖 不采用依赖注入?
from oi_cli2.model.Analyze import Analyze
# 依赖注入
from oi_cli2.utils.db import JsonFileDB
# 静态配置
from oi_cli2.utils.consts.ids import Ids
class AnalyzeManager:
def __init__(self, db: JsonFileDB):
self.db = db
self.keys = ['platform', 'alias', 'path', 'compilation', 'execute', 'clean', 'default']
def _get_analyze_list(self) -> List[Analyze]:
analyze_list: List[dict] = self.db.load(Ids.analyze) or []
return list(map(lambda d: Analyze().dict_init(d), analyze_list))
def _set_analyze_list(self, analyze_list: List[Analyze]):
analyze_list.sort(key=lambda temp0: (temp0.platform, -temp0.default, temp0.template_alias))
self.db.save(Ids.analyze, list(map(lambda d: d.__dict__, analyze_list)))
def get_list(self) -> List[Analyze]:
return self._get_analyze_list()
def set_default(self, index: int):
analyze_list: List[Analyze] = self._get_analyze_list()
assert 0 <= index < len(analyze_list)
for i in range(len(analyze_list)):
if i == index:
analyze_list[i].default = True
elif analyze_list[i].platform == analyze_list[index].platform:
analyze_list[i].default = False
self._set_analyze_list(analyze_list)
def delete_analyze(self, index):
analyze_list: List[Analyze] = self._get_analyze_list()
assert 0 <= index < len(analyze_list)
if analyze_list[index].default:
for i in range(len(analyze_list)):
if i == index:
continue
if analyze_list[i].platform == analyze_list[index].platform:
analyze_list[i].default = True
break
del analyze_list[index]
self._set_analyze_list(analyze_list)
# set default if no platform there
def add_analyze(self, platform, submit_lang, template_alias, class_path):
analyze_list: List[Analyze] = self._get_analyze_list()
is_default = True
for item in analyze_list:
if item.platform == platform and item.default:
is_default = False
break
analyze_list.append(Analyze().initial(platform, submit_lang, template_alias, class_path, default=is_default))
self._set_analyze_list(analyze_list)
def modify_submit_lang(self, index: int, value: str):
analyze_list: List[Analyze] = self._get_analyze_list()
assert 0 <= index < len(analyze_list)
analyze_list[index].submit_lang = value
self._set_analyze_list(analyze_list)
def modify_template_alias(self, index: int, value: str):
analyze_list: List[Analyze] = self._get_analyze_list()
assert 0 <= index < len(analyze_list)
analyze_list[index].template_alias = value
self._set_analyze_list(analyze_list)
def modify_class_path(self, index: int, value: str): # 实例class 文件
analyze_list: List[Analyze] = self._get_analyze_list()
assert 0 <= index < len(analyze_list)
analyze_list[index].class_path = value
self._set_analyze_list(analyze_list) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/utils/analyze.py | analyze.py |
from typing import Any, Dict
from oi_cli2.core.DI import DI_ACCMAN, DI_CFG, DI_DB, DI_DB_COOKIES, DI_HTTP, DI_LOGGER, DI_PROVIDER, DI_TEMPMAN
from oi_cli2.cli.constant import CIPHER_KEY, COOKIES_FILE, OT_FOLDER, OT_LOG, USER_CONFIG_FILE
from oi_cli2.utils.HttpUtil import HttpUtil
from oi_cli2.utils.Logger import getLogger
from oi_cli2.utils.account import AccountManager
from oi_cli2.utils.configFolder import ConfigFolder
from oi_cli2.utils.db import JsonFileDB
from oi_cli2.utils.enc import AESCipher
from oi_cli2.utils.template import TemplateManager
from .Singleton import Singleton
@Singleton
class Provider2:
_objs: Dict[str, Any] = {}
_fns: Dict[str, Any] = {}
loop = 0 # 简单防止循环依赖
def __init__(self) -> None:
self.reg(DI_CFG, gen_cfg)
self.reg(DI_LOGGER, gen_logger)
self.reg(DI_HTTP, gen_http_util)
self.reg(DI_DB, gen_json_db)
self.reg(DI_DB_COOKIES, gen_json_db_cookies)
self.reg(DI_ACCMAN, gen_account_manager)
self.reg(DI_TEMPMAN, gen_template_manager)
def reg(self, key: str, func) -> bool:
assert key not in self._fns
self._fns[key] = func
return True
def get(self, key: str) -> Any:
self.loop += 1
assert key in self._fns
assert (self.loop < 100)
if key not in self._objs:
self._objs[key] = self._fns[key](self)
self.loop -= 1
return self._objs[key]
def gen_cfg(p: Provider2):
return ConfigFolder(OT_FOLDER)
def gen_logger(o: Provider2):
try:
config_folder: ConfigFolder = o.get(DI_CFG)
logger = getLogger(config_folder.get_file_path(OT_LOG))
except Exception as e:
print(str(e))
exit(1)
return logger
def gen_template_manager(o: Provider2):
return TemplateManager(db=o.get(DI_DB))
def gen_account_manager(o: Provider2):
return AccountManager(db=o.get(DI_DB), cipher=AESCipher(CIPHER_KEY), logger=o.get(DI_LOGGER))
def gen_json_db(o: Provider2):
config_folder: ConfigFolder = o.get(DI_CFG)
return JsonFileDB(config_folder.get_config_file_path(USER_CONFIG_FILE), logger=o.get(DI_LOGGER))
def gen_json_db_cookies(o: Provider2):
config_folder: ConfigFolder = o.get(DI_CFG)
return JsonFileDB(config_folder.get_config_file_path(COOKIES_FILE), logger=o.get(DI_LOGGER))
def gen_http_util(o: Provider2):
return HttpUtil(logger=o.get(DI_LOGGER)) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/utils/Provider2.py | Provider2.py |
from typing import List, Optional
import logging
from oi_cli2.model.Template import Template
from oi_cli2.utils.db import JsonFileDB
from oi_cli2.utils.consts.ids import Ids
class TemplateManager:
def __init__(self, db: JsonFileDB, platform: str = ''):
self.db = db
self.platform = platform
self.keys = ['platform', 'alias', 'path', 'compilation', 'execute', 'clean', 'default']
def _get_template_list(self) -> List[Template]:
temp_list: List[dict] = self.db.load(Ids.template) or []
return list(map(lambda d: Template().dict_init(d), temp_list))
def _set_template_list(self, temp_list: List[Template]):
temp_list.sort(key=lambda temp0: (temp0.platform, -temp0.default, temp0.alias))
self.db.save(Ids.template, list(map(lambda d: d.__dict__, temp_list)))
def get_list(self) -> List[Template]:
return self._get_template_list()
def alias_exist(self, temps: List[Template], platform: str, alias: str):
return self.find_alias(temps=temps, platform=platform, alias=alias) != -1
def find_alias(self, temps: List[Template], platform: str, alias: str) -> int:
for i in range(len(temps)):
if temps[i].platform == platform and temps[i].alias == alias:
return i
return -1
def get_platform_default(self, platform: str) -> Optional[Template]:
temps: List[Template] = self._get_template_list()
for i in range(len(temps)):
if temps[i].platform == platform and temps[i].default:
return temps[i]
return None
def get_default(self) -> Optional[Template]:
if not self.platform:
logging.error('Please set platform first or using get_platform_default()')
return None
temps: List[Template] = self._get_template_list()
for i in range(len(temps)):
if temps[i].platform == self.platform and temps[i].default:
return temps[i]
return None
def get_template_by_name(self, platform: str, name: str) -> Optional[Template]:
temps: List[Template] = self._get_template_list()
for i in range(len(temps)):
if temps[i].platform == platform and temps[i].alias == name:
return temps[i]
return None
def get_template_by_alias(self, alias: str) -> Optional[Template]:
# deperated
assert False
pass
if not self.platform:
logging.error('Please set platform first or using get_platform_default()')
return None
temps: List[Template] = self._get_template_list()
for i in range(len(temps)):
if temps[i].platform == self.platform and temps[i].alias == alias:
return temps[i]
return None
def set_temps_default(self, temps: List[Template], index: int):
assert 0 <= index < len(temps)
for i in range(len(temps)):
if i == index:
temps[i].default = True
elif temps[i].platform == temps[index].platform:
temps[i].default = False
def set_default(self, index: int):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
for i in range(len(temps)):
if i == index:
temps[i].default = True
elif temps[i].platform == temps[index].platform:
temps[i].default = False
self._set_template_list(temps)
def delete_template(self, platform: str, name: str) -> bool:
temps: List[Template] = self._get_template_list()
idx = -1
for i in range(len(temps)):
if temps[i].platform == platform and temps[i].alias == name:
idx = i
break
if idx < 0:
return False
if temps[idx].default:
for i in range(len(temps)):
if i == idx:
continue
if temps[i].platform == temps[idx].platform:
temps[i].default = True
break
del temps[idx]
self._set_template_list(temps)
return True
# set default if no platform there
def add_template(self, platform, alias, path, compilation, execute, uplang) -> None:
temps: List[Template] = self._get_template_list()
if self.find_alias(temps, platform, alias) != -1:
raise Exception('Duplicate alias')
is_default = True
for item in temps:
if item.platform == platform and item.default:
is_default = False
break
temps.append(Template().initial(platform=platform,
alias=alias,
path=path,
compilation=compilation,
execute=execute,
uplang=uplang,
default=is_default))
self._set_template_list(temps)
def modify_alias(self, index: int, value: str):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
if self.alias_exist(temps, temps[index].platform, value):
raise Exception('Duplicate alias')
temps[index].alias = value
self._set_template_list(temps)
def modify_path(self, index: int, value: str):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
temps[index].path = value
self._set_template_list(temps)
def modify_compilation(self, index: int, value: str):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
temps[index].compilation = value
self._set_template_list(temps)
def modify_execute(self, index: int, value: str):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
temps[index].execute = value
self._set_template_list(temps)
def modify_clean(self, index: int, value: str):
temps: List[Template] = self._get_template_list()
assert 0 <= index < len(temps)
temps[index].clean = value
self._set_template_list(temps)
# update
def update_template(self, platform, alias: str, newalias: str, path: str, compilation: str, execute: str, uplang: str,
default: bool):
temps: List[Template] = self._get_template_list()
idx = self.find_alias(temps, platform, alias)
if idx == -1:
raise Exception('Template Not Exist')
if default:
self.set_temps_default(temps, idx)
if newalias:
temps[idx].alias = newalias
if path:
temps[idx].path = path
if compilation:
temps[idx].compilation = compilation
if execute:
temps[idx].execute = execute
if uplang:
temps[idx].uplang = uplang
self._set_template_list(temps) | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/utils/template.py | template.py |
import logging
from typing import List, Optional
from oi_cli2.model.Account import Account
# 依赖注入
from oi_cli2.utils.db import JsonFileDB
from oi_cli2.utils.enc import AESCipher
# 静态配置
from oi_cli2.utils.consts.ids import Ids
class AccountManager:
def __init__(self, db: JsonFileDB, cipher: AESCipher, logger=logging):
self.db = db
self.cipher = cipher
self.logger = logger
def _get_account_list(self) -> List[Account]:
acc_list: List[dict] = self.db.load(Ids.account) or []
return list(map(lambda d: Account().dict_init(d), acc_list))
def _set_account_list(self, acc_list: List[Account]):
acc_list.sort(key=lambda acc0: (acc0.platform, -acc0.default, acc0.account, acc0.password))
self.db.save(Ids.account, list(map(lambda d: d.__dict__, acc_list)))
def get_list(self) -> List[Account]:
return self._get_account_list()
def set_default(self, index: int):
accs: List[Account] = self._get_account_list()
assert 0 <= index < len(accs)
for i in range(len(accs)):
if i == index:
accs[i].default = True
elif accs[i].platform == accs[index].platform:
accs[i].default = False
self._set_account_list(accs)
def get_default_account(self, platform: str) -> Account:
accs: List[Account] = self._get_account_list()
for i in range(len(accs)):
if accs[i].platform == platform and accs[i].default:
return accs[i]
raise Exception(f'Account Not Found int Platform [{platform}]')
def get_account(self, platform: str, account: str) -> Optional[Account]:
accs: List[Account] = self._get_account_list()
for i in range(len(accs)):
if accs[i].platform == platform and accs[i].account == account:
return accs[i]
return None
def modify(self, platform: str, account: str, password=None, default=None) -> bool:
modified = False
accs: List[Account] = self._get_account_list()
for item in accs:
if item.platform == platform:
if item.account == account:
if password is not None:
item.password = self.cipher.encrypt(password)
if default:
item.default = True
modified = True
elif default:
item.default = False
self._set_account_list(accs)
return modified
# Delete
def delete(self, platform: str, account: str) -> bool:
accs: List[Account] = self._get_account_list()
for i in range(len(accs)):
acc = accs[i]
if acc.account != account or acc.platform != platform:
continue
if acc.default:
# set new default
for j in range(len(accs)):
if j == i:
continue
if accs[i].platform == accs[j].platform:
accs[i].default = True
break
del accs[i]
self._set_account_list(accs)
return True
return False
def new(self, platform, account, password, default=False) -> bool:
accs: List[Account] = self._get_account_list()
self.logger.debug("platform = %s, account = %s, default = %s", platform, account, default)
has_default = False
for item in accs:
if item.platform == platform and item.account == account:
return False
for item in accs:
if item.platform == platform and item.default:
has_default = True
break
# first account in platform
if not has_default:
default = True
accs.append(Account().initial(platform=platform,
account=account,
password=self.cipher.encrypt(password),
default=default))
if default:
for item in accs:
if item.platform == platform and item.account != account:
item.default = False
self._set_account_list(accs)
return True | yxr-oi-cli | /yxr_oi_cli-0.2.2.4.tar.gz/yxr_oi_cli-0.2.2.4/oi_cli2/utils/account.py | account.py |
import numpy as np
import math
import sys
import click
__version__=1.1
#blocks is a list,the info of face include block id, the id start from 0 not 1
def similar_idenfication(func):
def wrap_func(*d,**k):
a = func(*d,**k)
a = [reverse_line1(i) for i in a]
a.sort(key = lambda x:x[0])
length = len(a)
for i in range(length-1):
bid1,(h1,w1,d1,_,_),_,_ = a[i]
is_similar = False
for bid2,(h2,w2,d2,_,_),_,_ in a[i+1:]:
if bid2 == bid1 and min(h1) == min(h2) and max(h1) == max(h2) and \
min(w1) == min(w2) and max(w1) == max(w2) and \
min(d1) == min(d2) and max(d1) == max(d2):
is_similar = True
if is_similar:
a[i] = None
break
a = [i for i in a if i is not None]
return a
return wrap_func
def standard_mesh(blocks):
def f_max(t):
t = np.array(t)
return t.max()-t.min()
tx = [(i['X'].max(),i['X'].min()) for i in blocks]
ty = [(i['Y'].max(),i['Y'].min()) for i in blocks]
tz = [(i['Z'].max(),i['Z'].min()) for i in blocks]
dx = f_max(tx)
dy = f_max(ty)
dz = f_max(tz)
rate = 1000 / max(dx,dy,dz)
return [{key:i[key]*rate for key in 'XYZ'} for i in blocks]
def to_fortran_format(func):
def deal_r(r):
a = [(i[0]+1,i[1]+1) for i in r[:-2]]
a.extend((r[-2]+1,r[-1]+1))
return a
def wrap_func(*d,**k):
a = func(*d,**k)
if a is not None:
a = [deal_r(i) for i in a]
return a
return wrap_func
def is_equal_face(face1,face2,res = 1e-6):
x1,y1,z1 = face1
x2,y2,z2 = face2
if x1.shape != x2.shape:
return False
if (np.abs(x1-x2).max() < res).all() and (np.abs(y1-y2).max() < res).all() and (np.abs(z1-z2).max() < res).all():
return True
else:
return False
def is_parallel_face(face1,face2,res = 1e-3):
x1,y1,z1 = face1
x2,y2,z2 = face2
dx = np.abs(x1-x2)
dy = np.abs(y1-y2)
dz = np.abs(z1-z2)
if (dx.max() - dx.min() < 2*res).all() and (dy.max() - dy.min()< 2*res).all() and (dz.max()-dz.min() < 2*res).all():
return True
else:
return False
def is_rotated_face(face1,face2,res = 1e-3):
x1,y1,_ = face1
x2,y2,_ = face2
dx = (x1*x2+y1*y2)/(np.sqrt(x1*x1+y1*y1)*np.sqrt(x2*x2+y2*y2))
if (dx.max() - dx.min() < 0.02*res).all():
return True
else:
return False
def is_equal_point(p1,p2,res = 1e-3):
x1, y1, z1 = p1
x2, y2, z2 = p2
m = ((x2-x1)**2 + (y1-y2)**2 + (z1-z2)**2)**0.5
if m < res:
return True
else:
return False
@to_fortran_format
def Ogrid_check(block,res=1e-3):
x,y,z = block['X'],block['Y'],block['Z']
idim,jdim,kdim = x.shape
if abs(x[0,0,0] - x[-1,0,0]) < res:
if np.abs(x[0]-x[-1]).max() < res and np.abs(y[0]-y[-1]).max() < res and np.abs(z[0]-z[-1]).max() < res:
return ((0,0),(0,jdim-1),(0,kdim-1),1,2),((idim-1,idim-1),(0,jdim-1),(0,kdim-1),1,2)
if abs(y[0,0,0] - y[0,-1,0]) < res:
if np.abs(x[:,0]-x[:,-1]).max() < res and np.abs(y[:,0]-y[:,-1]).max() < res and np.abs(z[:,0]-z[:,-1]).max() < res:
return ((0,idim-1),(0,0),(0,kdim-1),0,2),((0,idim-1),(jdim-1,jdim-1),(0,kdim-1),0,2)
if abs(z[0,0,0] - z[0,0,-1]) < res:
if np.abs(x[:,:,0]-x[:,:,-1]).max() < res and np.abs(y[:,:,0]-y[:,:,-1]).max() < res and np.abs(z[:,:,0]-z[:,:,-1]).max() < res:
return ((0,idim-1),(0,jdim-1),(0,0),0,1),((0,idim-1),(0,jdim-1),(kdim-1,kdim-1),0,1)
return None
def point_in_face(point,face,return_position = False,res = 1e-3):
#Is the point in the face, if True, return True, else return None
x,y,z = face
px,py,pz = point
tx = np.abs(x - px)
ty = np.abs(y - py)
tz = np.abs(z - pz)
tx_min = tx.min()
ty_min = ty.min()
tz_min = tz.min()
if tx_min < res and ty_min < res and tz_min < res:
# there is a point of face2 belongs to face1
if return_position:
t = tx+ty+tz
m = np.where(t == t.min())
return m
return True
else:
return False
# def cut_face(face,px1,py1,px3,py3,xstep,ystep):
# if py3+ystep ==-1 and px3+xstep == -1:
# face_temp = [i[px1::xstep,py1::ystep] for i in face]
# elif px3 + xstep == -1:
# face_temp = [i[px1::xstep,py1:py3+ystep:ystep] for i in face]
# elif py3+ ystep == -1:
# face_temp = [i[px1:px3+xstep:xstep,py1::ystep] for i in face]
# else:
# face_temp = [i[px1:px3+xstep:xstep,py1:py3+ystep:ystep] for i in face]
# return face_temp
def get_double_face_x(face1, face2, point1, point3, xstep, ystep,message=None):
x1,y1,z1 = face1
x2,y2,z2 = face2
# print('shape',x2.shape)
px1,py1 = point1
px3,py3 = point3
(idim1,jdim1),(idim2,jdim2) = x1.shape,x2.shape
result = []
xstep_t = -xstep
if py3+ystep == -1:
face_temp1 = [x1[:px1+xstep_t:xstep_t,py1::ystep],
y1[:px1+xstep_t:xstep_t,py1::ystep],
z1[:px1+xstep_t:xstep_t,py1::ystep]]
else:
face_temp1 = [x1[:px1+xstep_t:xstep_t,py1:py3+ystep:ystep],
y1[:px1+xstep_t:xstep_t,py1:py3+ystep:ystep],
z1[:px1+xstep_t:xstep_t,py1:py3+ystep:ystep]]
width1 = face_temp1[0].shape[0]
face_temp2 = [x2[width1-1::-1],y2[width1-1::-1],z2[width1-1::-1]]
if is_equal_face(face_temp1,face_temp2):
if xstep<0:
result.append(((px1,0),(py1,py3),0,1))
else:
result.append(((px1,idim1-1),(py1,py3),0,1))
result.append(((0,width1-1),(0,jdim2-1),0,1))
else:
print('warning1 !!!!',message)
#find the other
# xstep_t = xstep
if py3 + ystep == -1:
face_temp1 = [x1[:px3+xstep:xstep,py1::ystep],
y1[:px3+xstep:xstep,py1::ystep],
z1[:px3+xstep:xstep,py1::ystep]]
else:
face_temp1 = [x1[:px3+xstep:xstep,py1:py3+ystep:ystep],
y1[:px3+xstep:xstep,py1:py3+ystep:ystep],
z1[:px3+xstep:xstep,py1:py3+ystep:ystep]]
width2 = face_temp1[0].shape[0]
face_temp2 = [x2[idim2-width2:],
y2[idim2-width2:],
z2[idim2-width2:]]
# print(face_temp1[0].shape,face_temp2[0].shape)
if is_equal_face(face_temp1,face_temp2):
if xstep>0:
result.append(((0,px3),(py1,py3),0,1))
else:
result.append(((idim1-1,px3),(py1,py3),0,1))
result.append(((idim2-width2,idim2-1),(0,jdim2-1),0,1))
else:
print('warning2 !!!!!!!!!',message)
print('ttt',result)
if len(result) == 0:
not_connected(45,message)
result = None
return result
def get_matched_face(face1,face2,point1,point3,xstep,ystep,message):
px1,py1 = point1
px3,py3 = point3
if (px1 - px3) * xstep>0:
#across x axis
return get_double_face_x(face1,face2,point1,point3,xstep,ystep,message)
elif (py1 - py3) * ystep>0:
#across y axis
face1_T = [i.T for i in face1]
face2_T = [i.T for i in face2]
point1_T = py1,px1
point3_T = py3,px3
result = get_double_face_x(face1_T,face2_T,point1_T, point3_T, ystep, xstep,message)
if result:
result = [(i2,i1,i3,i4) for i1,i2,i3,i4 in result]
return result
else:
# x_index = range(px1, px3+xstep, xstep)
# y_index = range(py1, py3+ystep, ystep)
# print(x_index,y_index,x1.shape)
if py3+ystep ==-1 and px3+xstep == -1:
face1_temp = [i[px1::xstep,py1::ystep] for i in face1]
elif px3 + xstep == -1:
face1_temp = [i[px1::xstep,py1:py3+ystep:ystep] for i in face1]
elif py3+ ystep == -1:
face1_temp = [i[px1:px3+xstep:xstep,py1::ystep] for i in face1]
else:
face1_temp = [i[px1:px3+xstep:xstep,py1:py3+ystep:ystep] for i in face1]
if is_equal_face(face1_temp,face2):
idim2,jdim2 = face2[0].shape
result = ((px1,px3),(py1,py3),0,1),((0,idim2-1),(0,jdim2-1),0,1)
return result
return None
def not_connected(code=0,message='no message', warningOutput=False):
#没有匹配到网格,但是这个情况有点奇怪
# if warningOutput:
# print("warning!please insure your grid is correct! code:",code,message)
return None
def SmallInBig(face1,face2,face1_info,face2_info,res = 1e-3,message = None, warningOutput = True):
#there is one point of face2 belong to face1
oblock1,oblock2 = message[-2:]
x1,y1,z1 = face1
x2,y2,z2 = face2
idim1,jdim1 = x1.shape
idim2,jdim2 = x2.shape
if idim1*jdim1 < idim2*jdim2:
exchange = True
face1,face2 = face2,face1
face1_info,face2_info = face2_info,face1_info
x1,y1,z1 = face1
x2,y2,z2 = face2
idim1,jdim1 = x1.shape
idim2,jdim2 = x2.shape
oblock1,oblock2 = oblock2, oblock1
else:
exchange = False
p3 = x2[-1,-1],y2[-1,-1],z2[-1,-1]
pif3 = point_in_face(p3,face1,return_position=True)
if pif3:
p1 = x2[0,0],y2[0,0],z2[0,0]
pif = point_in_face(p1,face1,return_position=True)
# print('pif3',x2.shape)
if pif:
# print('pif1')
px1,py1 = pif
px3,py3 = pif3
px1,py1 = px1[0],py1[0]
px3,py3 = px3[0],py3[0]
p2 = x2[0,-1],y2[0,-1],z2[0,-1]
pif2 = point_in_face(p2,face1,return_position=True)
if pif2:
px2, py2 = pif2
px2, py2 = px2[0],py2[0]
# 其中face2的三个顶点都在face1上,再检查face2面上的非顶点的一个点是否在face1上
# 避免两个面组成环的形式出现
p_in = x2[1,1],y2[1,1],z2[1,1]
pif_in = point_in_face(p_in,face1,return_position=False)
if not pif_in:
return not_connected('circle',message, warningOutput)
else:
return not_connected(2,message, warningOutput)
if px1 == px2 == px3 or py1 == py2 == py3:
#三个点在一条线
return None
#检查是否需要转置后再对应
if px2 == px1:
is_transpose = False
else:
is_transpose = True
if is_transpose:
x2, y2, z2 = x2.T, y2.T, z2.T
face2 = [x2, y2, z2]
idim2, jdim2 = x2.shape
#检查轴的对应方向
if px2>px3:
xstep = -1
else:
xstep = 1
if py1>py2:
ystep = -1
else:
ystep = 1
pdy = x2[0,1], y2[0,1], z2[0,1]
pdx = x2[1,0], y2[1,0], z2[1,0]
if idim1 > px1+xstep and jdim1 > py1:
f1pdx = x1[px1+xstep,py1],y1[px1+xstep,py1],z1[px1+xstep,py1]
else:
if idim1 == px1+xstep and xstep == 1:
f1pdx = x1[0,py1],y1[0,py1],z1[0,py1]
else:
raise Exception('condition not find of x')
if idim1 > px1 and jdim1 > py1+ystep:
f1pdy = x1[px1,py1+ystep],y1[px1,py1+ystep],z1[px1,py1+ystep]
else:
if jdim1 == py1+ystep and ystep == 1:
f1pdy = x1[px1,0],y1[px1,0],z1[px1,0]
else:
raise Exception('condition not find of y')
if not is_equal_point(pdx,f1pdx):
xstep = -xstep
#往相反的方向检查 也可能因为网格数对不上 而出错 而这种情况可能是不存在正确匹配面的,所以排除
if xstep+px1 >= x1.shape[0]:
return None
f1pdx2 = x1[px1+xstep,py1],y1[px1+xstep,py1],z1[px1+xstep,py1]
if not is_equal_point(pdx,f1pdx2):
if oblock1 is True:
return None
else:
return not_connected(12,message)
if not is_equal_point(pdy,f1pdy):
ystep = -ystep
#往相反的方向检查 也可能因为网格数对不上 而出错 而这种情况可能是不存在正确匹配面的,所以排除
if py1+ystep >= x1.shape[1]:
return
f1pdy2 = x1[px1,py1+ystep],y1[px1,py1+ystep],z1[px1,py1+ystep]
if not is_equal_point(pdy,f1pdy2):
if oblock1 is True:
return None
else:
return not_connected(344 ,message,warningOutput)
point1 = px1,py1
point3 = px3,py3
result = get_matched_face(
face1, face2, point1, point3, xstep, ystep,message
)
if result:
if len(result) == 2:
r1, r2 = result
if is_transpose:
r2 = r2[1],r2[0],r2[3],r2[2]
if exchange:
result = r2,r1
else:
result = r1,r2
return result
elif len(result) == 4:
r1,r2,r3,r4 = result
if is_transpose:
r2 = r2[1],r2[0],r2[3],r2[2]
r4 = r4[1],r4[0],r4[3],r4[2]
if exchange:
result = r2,r1,r4,r3
else:
result = r1,r2,r3,r4
return result
else:
if oblock1 is True and oblock2 is True:
return None
else:
return not_connected(4,message, warningOutput)
return not_connected('没有找到',message, warningOutput)
def FindPointInLines(points, lines, res = 1e-3):
length = points.shape[0]
p = points[0]
for k,line in enumerate(lines):
t1 = np.abs(line - p ).sum(1).min()
if t1 < res:
return 0,k
for i in range(6,-1,-1):
step = 2**i
for j in range(0,length,step):
if j % (step*2) == 0:
continue
p = points[j]
for k,line in enumerate(lines):
t1 = np.abs(line - p ).sum(1).min()
if t1 < res:
return j,k
return None, None
def FindBoundPointInLine(pStart,pEnd,points,line,res = 1e-5):
# res =
distance = lambda line0,p0:1 if np.abs(line0 - p0).sum(1).min() < res else -1
stateS = distance(line,points[pStart])
# step = 1 if pEnd > pStart else -1
# for ii in range(pStart,pEnd+step,step):
# if distance(line,points[ii]) == 1:
# print(ii)
# break
while True:
pM = (pEnd+pStart) // 2
stateM = distance(line,points[pM])
if stateM*stateS<0:
pEnd = pM
# stateE = stateM
else:
pStart = pM
stateS = stateM
if pEnd - pStart <= 1:
if stateS == 1:
# print('start',pStart,pEnd)
return pStart
else:
# print('end',pStart,pEnd)
return pEnd
def FindPartInFace(face1,face2,res = 1e-3):
x1,y1,z1 = face1
x2,y2,z2 = face2
points0 = np.vstack([x2[:,0], y2[:,0], z2[:,0]]).T
points1 = np.vstack([x2[:,-1], y2[:,-1], z2[:,-1]]).T
line1 = np.vstack([x1[:,0],y1[:,0],z1[:,0]]).T
line2 = np.vstack([x1[:,-1],y1[:,-1],z1[:,-1]]).T
lines = [line1, line2]
pN,lineN = FindPointInLines(points0,lines)
if pN is None:
return None,None
p1 = FindBoundPointInLine(0,pN,points0,lines[lineN])
# print(p1,pN,"7777777777")
p2 = FindBoundPointInLine(pN,points0.shape[0],points0,lines[lineN])
if np.abs(lines[1-lineN] - points1[p1]).sum(1).min()>res:
return None,None
if np.abs(lines[1-lineN] - points1[p2]).sum(1).min()>res:
return None,None
if p1 == p2:
return None,None
else:
return p1,p2
@to_fortran_format
def face2face(face1,face2,face1_info,face2_info,res = 1e-3,message = None,**kargs):
def TransPartConnectResult(r1,r2,exchange,face1_T,face2_T,p1):
r2 = (r2[0][0]+p1,r2[0][1]+p1),r2[1],r2[2],r2[3]
if exchange:
r1,r2 = r2,r1
face1_T,face2_T = face2_T,face1_T
if face1_T:
r1 = r1[1], r1[0], r1[3], r1[2]
if face2_T:
r2 = r2[1], r2[0], r2[3], r2[2]
return r1,r2
oblock1,oblock2 = message[-2:]
(x1min,x1max),(y1min,y1max),(z1min,z1max) = face1_info
(x2min,x2max),(y2min,y2max),(z2min,z2max) = face2_info
if x2min - x1max > res or x1min - x2max > res:
return None
if y2min - y1max > res or y1min - y2max > res:
return None
if z2min - z1max > res or z1min - z2max > res:
return None
x1,y1,z1 = face1
x2,y2,z2 = face2
idim1,jdim1 = x1.shape
idim2,jdim2 = x2.shape
if x1.shape == x2.shape:
if np.abs(x1 - x2).max() < res and np.abs(y1 - y2).max() < res and np.abs(z1 - z2).max() < res:
return ((0,idim1-1),(0,jdim1-1),0,1),((0,idim2-1),(0,jdim2-1),0,1)
if (idim1>=idim2 and jdim1>=jdim2) or (idim1<=idim2 and jdim1<=jdim2) or \
(idim1>=jdim2 and jdim1>=idim2) or (idim1<=jdim2 and jdim1<=idim2):
#face1 is a big face, face2 is included in face1,if face1 is smaller than face2, exchange them.
# print('first ')
a = SmallInBig(face1,face2,face1_info,face2_info,message=message, warningOutput=False,res = res)
# a = None
if a:
return a
else:
#满足一个面比一个面小,但是小的面上的顶点不在,这时候face1是大的面face2是小的面 注意后面进行交换
#确保第face1是大面
if idim1*jdim1 < idim2*jdim2:
exchange = True
face1,face2 = face2,face1
face1_info,face2_info = face2_info,face1_info
x1,y1,z1 = face1
x2,y2,z2 = face2
idim1,jdim1 = x1.shape
idim2,jdim2 = x2.shape
message = list(message)
message[-2],message[-1] = oblock2, oblock1
else:
exchange = False
#确保face1的j轴小于等于i轴
if jdim1 > idim1:
face1_T = True
x1,y1,z1 = face1
face1 = x1.T, y1.T, z1.T
x1,y1,z1 = face1
idim1,jdim1 = jdim1, idim1
else:
face1_T = False
if jdim1 == jdim2 or jdim1 == idim2:
p1 = None
if idim2 == jdim2:
p1,p2 = FindPartInFace(face1,face2)
if p1 is not None:
equalCheck = True
else:
equalCheck = False
else:
equalCheck = False
if jdim1 == idim2 and equalCheck is False:
face2_T = True
x2,y2,z2 = face2
face2 = x2.T, y2.T, z2.T
x2,y2,z2 = face2
idim2,jdim2 = jdim2, idim2
else:
face2_T = False
if p1 is None:
p1,p2 = FindPartInFace(face1,face2)
if p1 is not None:
face2 = x2[p1:p2+1], y2[p1:p2+1], z2[p1:p2+1]
t = SmallInBig(face1,face2,face1_info,face2_info,message=message,res = res)
if t:
if len(t) == 2:
r1,r2 = t
return TransPartConnectResult(r1,r2,exchange,face1_T,face2_T,p1)
elif len(t) is 4:
r1,r2,r3,r4 = t
r1,r2 = TransPartConnectResult(r1,r2,exchange,face1_T,face2_T,p1)
r3,r4 = TransPartConnectResult(r3,r4,exchange,face1_T,face2_T,p1)
return r1,r2,r3,r4
else:
return not_connected('不存在所属关系',message)
return None
def generate_blocks_info(blocks):
def get_block_info(block):
face_info = dict()
x,y,z = block['X'],block['Y'],block['Z']
face_info['K0'] = (x[:,:,0].min(), x[:,:,0].max()), ( y[:,:,0].min(), y[:,:,0].max()), (z[:,:,0].min(), z[:,:,0].max())
face_info['KE'] = (x[:,:,-1].min(),x[:,:,-1].max()), (y[:,:,-1].min(),y[:,:,-1].max()), (z[:,:,-1].min(),z[:,:,-1].max())
face_info['J0'] = (x[:,0,:].min(), x[:,0,:].max()), ( y[:,0,:].min(), y[:,0,:].max()), (z[:,0,:].min(), z[:,0,:].max())
face_info['JE'] = (x[:,-1,:].min(),x[:,-1,:].max()), (y[:,-1,:].min(),y[:,-1,:].max()), (z[:,-1,:].min(),z[:,-1,:].max())
face_info['I0'] = (x[0,:,:].min(), x[0,:,:].max()), ( y[0,:,:].min(), y[0,:,:].max()), (z[0,:,:].min(), z[0,:,:].max())
face_info['IE'] = (x[-1,:,:].min(),x[-1,:,:].max()), (y[-1,:,:].min(),y[-1,:,:].max()), (z[-1,:,:].min(),z[-1,:,:].max())
return face_info
return [get_block_info(i) for i in blocks]
def generate_blocks_face(blocks):
def get_block_faces(block):
faces = dict()
x,y,z = block['X'],block['Y'],block['Z']
faces['K0'] = x[:,:,0], y[:,:,0], z[:,:,0]
faces['KE'] = x[:,:,-1], y[:,:,-1], z[:,:,-1]
faces['J0'] = x[:,0,:], y[:,0,:], z[:,0,:]
faces['JE'] = x[:,-1,:], y[:,-1,:], z[:,-1,:]
faces['I0'] = x[0,:,:], y[0,:,:], z[0,:,:]
faces['IE'] = x[-1,:,:], y[-1,:,:], z[-1,:,:]
return faces
return [get_block_faces(block) for block in blocks]
def expand_to_3d(key,result,shape):
#因为face2face返回的结果是针对一个二维的平面,并不包含第三个轴,所以需要转换到和Ogrid_check的结果一样的结果
idim,jdim,kdim = shape
r1,r2,index1,index2 = result
if key[0] == 'I':
if key[1] == '0':
result0 = (1,1), r1, r2, index1+1,index2+1
else:
result0 = (idim,idim), r1, r2, index1+1,index2+1
elif key[0] == 'J':
if key[1] == '0':
result0 = r1, (1,1), r2, index1,index2+1
else:
result0 = r1, (jdim,jdim), r2, index1,index2+1
elif key[0]=='K':
if key[1] == '0':
result0 = r1,r2, (1,1),index1,index2
else:
result0 = r1,r2, (kdim,kdim),index1,index2
else:
raise Exception('key is an error variables x ,y,z not in key.')
return result0
def block_group(bid,groups):
#检查该block属于哪个group,不同group之间不进行匹配
if not groups:
return -1
for gid,group in enumerate(groups):
if bid in group:
return gid
return -1
@similar_idenfication
def one2one(blocks,periodic_faces=None,periodic_faces_result = None,periodic_only = False,groups=[]):
#perodic_faces is list like [((block1,face1),(block2,face2),rotated),....]
#example [((0,'I0'),(1,'IE'),False),...],block id is ctype, start from 0
blocks = standard_mesh(blocks)
def get_periodic_():
p_result = []
for (id1,key1),(id2,key2),rotated in periodic_faces:
face1 = blocks_faces[id1][key1]
face2 = blocks_faces[id2][key2]
rr = get_periodic_faces(face1,face2,rotated)
if rr is not None:
(r1,r2) = rr
r1 = expand_to_3d(key1,r1,blocks[id1]['X'].shape)
r2 = expand_to_3d(key2,r2,blocks[id2]['X'].shape)
p_result.append((id1+1,r1,id2+1,r2))
if not rotated:
result.append(p_result[-1])
return p_result
blocks_info = generate_blocks_info(blocks)
blocks_faces = generate_blocks_face(blocks)
result = []
keys = ['K0','KE','J0','JE','I0','IE']
try:
oblocks = dict()
for i,block in enumerate(blocks):
k = Ogrid_check(block)
if k is not None:
oblocks[i] = True
result.append((i+1,k[0],i+1,k[1]))
if periodic_faces:
p_result = get_periodic_()
if periodic_faces_result is not None:
periodic_faces_result.extend(p_result)
if periodic_only:
return periodic_faces_result
for i,block in enumerate(blocks):
# if i != 3:
# continue
for j in range(len(blocks)):
# if j != 6:
# continue
if i>=j:
continue
if block_group(i,groups) != block_group(j,groups):
continue
for key1 in keys:
face1 = blocks_faces[i][key1]
info1 = blocks_info[i][key1]
for key2 in keys:
# if key1 != 'KE' or key2 != 'IE':
# continue
face2 = blocks_faces[j][key2]
info2 = blocks_info[j][key2]
res = get_res(blocks,i+1)
t = face2face(face1,face2,info1,info2,message=(i+1,j+1,key1,key2,oblocks.get(i,False),oblocks.get(j,False)),res = res)
if t is not None:
if len(t) == 2:
r1,r2 = t
r1 = expand_to_3d(key1,r1,block['X'].shape)
r2 = expand_to_3d(key2, r2, blocks[j]['X'].shape)
result.append((i+1,r1,j+1,r2))
elif len(t) == 4:
r1,r2,r3,r4 = t
r1 = expand_to_3d(key1,r1,block['X'].shape)
r2 = expand_to_3d(key2, r2, blocks[j]['X'].shape)
r3 = expand_to_3d(key1,r3,block['X'].shape)
r4 = expand_to_3d(key2, r4, blocks[j]['X'].shape)
result.extend(((i+1,r1,j+1,r2),(i+1,r3,j+1,r4)))
except Exception as e:
print(e)
print(i,j,key1,key2,'yyyyyyyyyyyyyyyyyyyyy')
raise Exception('stop')
#检查是否通过oneone检测
result = [i for i in result if not_line(i)]
one2one_second(result,blocks_faces,blocks_info,blocks,oblocks)
# print("before ",len(result))
Varify_one2one(blocks,result)
# print("after ",len(result))
return result
def not_line(odata):
#排除 oneonedata 是一条线的情况
t = odata[1]
d1 = t[t[-1]-1]
d2 = t[t[-2]-1]
if d1[0] == d1[1] or d2[0] == d2[1]:
return False
else:
return True
def get_faces_tag(result,blocks_faces_shape,faces_tag=None,iscenter = False):
if faces_tag is None:
faces_tag = [{k:np.zeros(v,dtype='int') for k,v in i.items()} for i in blocks_faces_shape]
IJK = "IJK"
if iscenter:
bias = 1
else:
bias = 0
for b1,t1,b2,t2 in result:
for bid,data in [(b1,t1),(b2,t2)]:
ijk = 6-data[-1]-data[-2]
if data[ijk-1][0] == 1:
t = '0'
else:
t = 'E'
faceid = IJK[ijk-1]+t
(ist,iet),(jst,jet),(kst,ket) = data[:3]
ftag = faces_tag[bid-1][faceid]
if iet<ist:
ist,iet = iet,ist
if jet<jst:
jst,jet = jet,jst
if ket<kst:
kst,ket = ket,kst
if ijk==1:
ftag[jst-1:jet - bias,kst-1:ket - bias] = 1
elif ijk==2:
ftag[ist-1:iet - bias,kst-1:ket - bias] = 1
else:
ftag[ist-1:iet - bias,jst-1:jet - bias] = 1
return faces_tag
def find_face(faces_tag):
tags = ['I0','IE','J0','JE','K0','KE']
result = []
for i,block_faces in enumerate(faces_tag):
for faceid in tags:
face = block_faces[faceid]
if face.any() and (not face.all()):
k = face.argmin()
h0,w0 = face.shape
w = k % w0
h = (k-w)// w0
for j in range(w+1,w0):
if face[h,j] != 0:
w2 = j + 1
break
else:
w2 = w0
for j in range(h+1,h0):
if face[j,w] != 0:
h2 = j + 1
break
else:
h2 = h0
if h>0:
h = h-1
if w>0:
w = w-1
if h2-h == 1 or w2 - w ==1:
continue
result.append((i,faceid,(h,h2,w,w2)))
for i,block_faces in enumerate(faces_tag):
for faceid in tags:
face = block_faces[faceid]
face[face>2] = 0
return result
def one2one_second(result0,blocks_faces,blocks_info,blocks,oblocks):
#对one2one的结果进行check,然后对未匹配的面(一个面的一部分)再进行一次匹配
blocks_faces_shape = [{k:v[0].shape for k,v in i.items()} for i in blocks_faces]
faces_tag = get_faces_tag(result0,blocks_faces_shape)
while True:
result = []
faces = find_face(faces_tag)
bids = list(range(len(blocks_faces)))
tags = ['I0','IE','J0','JE','K0','KE']
for bid,faceid,(h,h2,w,w2) in faces:
x,y,z = blocks_faces[bid][faceid]
face1 = x[h:h2,w:w2],y[h:h2,w:w2],z[h:h2,w:w2]
info1 = blocks_info[bid][faceid]
bids.sort(key = lambda x:(x-bid)**2)
for bid2 in bids:
rr = None
for tag in tags:
if bid==bid2 and faceid==tag:
continue
face2 = blocks_faces[bid2][tag]
info2 = blocks_info[bid2][tag]
rr = face2face(face1,face2,info1,info2,message=(bid+1,bid2+1,faceid,tag,False,False))
if rr is not None:
if len(rr) == 2:
r1,r2 = rr
t1,t2 = r1[:2]
t1 = t1[0]+h,t1[1]+h
t2 = t2[0]+w,t2[1]+w
r1 = t1,t2,r1[2],r1[3]
r1 = expand_to_3d(faceid,r1,blocks[bid]['X'].shape)
r2 = expand_to_3d(tag, r2, blocks[bid2]['X'].shape)
result.append((bid+1,r1,bid2+1,r2))
else:
assert len(rr) == 4
r1,r2,r3,r4 = rr
t1,t2 = r1[:2]
t1 = t1[0]+h,t1[1]+h
t2 = t2[0]+w,t2[1]+w
r1 = t1,t2,r1[2],r1[3]
r1 = expand_to_3d(faceid,r1,blocks[bid]['X'].shape)
r2 = expand_to_3d(tag, r2, blocks[bid2]['X'].shape)
t1,t2 = r3[:2]
t1 = t1[0]+h,t1[1]+h
t2 = t2[0]+w,t2[1]+w
r3 = t1,t2,r3[2],r3[3]
r3 = expand_to_3d(faceid,r3,blocks[bid]['X'].shape)
r4 = expand_to_3d(tag, r4, blocks[bid2]['X'].shape)
result.extend(((bid+1,r1,bid2+1,r2),(bid+1,r3,bid2+1,r4)))
break
if rr is not None:
break
blocks_faces_shape = [{k:v[0].shape for k,v in i.items()} for i in blocks_faces]
faces_tag = get_faces_tag(result,blocks_faces_shape,faces_tag)
if len(result) == 0:
break
else:
temp =[]
for b1,t1,b2,t2 in result:
if b1>b2:
t = b2,t2,b1,t1
else:
t = b1,t1,b2,t2
temp.append(t)
result0.extend(temp)
def get_res(blocks,bid,coeff=0.1,result = {}):
#获取第bid个block中网格距离最小的一个边的长度
t = result.get(bid,None)
if t is None:
result[bid] = get_min_distance(blocks[bid])
return result[bid]*coeff
def Varify_one2one(blocks,result):
def get_face(block,r,bid):
((i1,i2),(j1,j2),(k1,k2),ind1,ind2) = r
x,y,z = block['X'],block['Y'],block['Z']
istep = 1 if i2>i1 else -1
jstep = 1 if j2>j1 else -1
kstep = 1 if k2>k1 else -1
i1,j1,k1 = i1-1,j1-1,k1-1
inds = ind1 + ind2
if 6 - inds == 1:
h,w = abs(j2-j1-1)+1,abs(k2-k1-1)+1
face = x[i1,j1::jstep,k1::kstep][:h,:w],y[i1,j1::jstep,k1::kstep][:h,:w],z[i1,j1::jstep,k1::kstep][:h,:w]
elif 6 - inds == 2:
h,w = abs(i2-i1-1)+1,abs(k2-k1-1)+1
face = x[i1::istep,j1,k1::kstep][:h,:w],y[i1::istep,j1,k1::kstep][:h,:w],z[i1::istep,j1,k1::kstep][:h,:w]
elif 6 - inds == 3:
h,w = abs(i2-i1-1)+1,abs(j2-j1-1)+1
face = x[i1::istep,j1::jstep,k1][:h,:w],y[i1::istep,j1::jstep,k1][:h,:w],z[i1::istep,j1::jstep,k1][:h,:w]
else:
raise Exception('One2One check error')
if ind1 > ind2:
face = face[0].T,face[1].T,face[2].T
return face
error_one2one = []
for nnt,(bid1,r1,bid2,r2) in enumerate(result):
f1 = get_face(blocks[bid1-1],r1,bid1)
f2 = get_face(blocks[bid2-1],r2,bid2)
res = min(get_res(blocks,bid1-1),get_res(blocks,bid2-1))
t = is_equal_face(f1,f2,res = res / 100000)
if t is False:
t = is_parallel_face(f1,f2)
if t is False:
t = is_rotated_face(f1,f2)
if t is False:
# raise Exception('One2One check error 12')
error_one2one.append(nnt)
error_one2one.reverse()
for i in error_one2one:
result.pop(i)
if error_one2one:
print("please check this one-one block data")
for i in error_one2one:
print(result[i])
def is_all_one2one(nface,one2one_block,shape):
#检查该block的nface是否被全部one2oneblock了,nface(1~6)
s1,s2 = 0,0
for iface,onedata in one2one_block:
if iface != nface:
continue
if iface <= 2:
s1 += max(onedata[1]) - min(onedata[1])
s2 += max(onedata[2]) - min(onedata[2])
elif iface <= 4:
s1 += max(onedata[0]) - min(onedata[0])
s2 += max(onedata[2]) - min(onedata[2])
else:
s1 += max(onedata[0]) - min(onedata[0])
s2 += max(onedata[1]) - min(onedata[1])
if nface <= 2:
s1 = shape[1] - s1
s2 = shape[2] - s2
elif nface <= 4:
s1 = shape[0] - s1
s2 = shape[2] - s2
else:
s1 = shape[0] - s1
s2 = shape[1] - s2
if max(s1,s2) > 1:
return False
else:
return True
def get_used_surface(result,entire=False):
def get_nface(t):
axis = 3 - t[3] - t[4] + 2
dim = min(t[axis][0],2)
if entire:
return axis*2+dim,t
else:
return axis*2+dim
rr = [(i[j*2]-1,get_nface(i[j*2+1])) for i in result for j in range(2)]
return rr
def transfer_one2one_str(result):
result.sort(key=lambda x:x[0])
def expand_var(vv):
GRID,((ISTA,IEND),(JSTA,JEND),(KSTA,KEND),ISVA1,ISVA2) = vv
return GRID,ISTA,JSTA,KSTA,IEND,JEND,KEND,ISVA1,ISVA2
bstr = [' 1-1 BLOCKING DATA:']
bstr.append('{:>10s}'.format('NBLI'))
bstr.append('{:>10d}'.format(len(result)))
title = ' NUMBER GRID : ISTA JSTA KSTA IEND JEND KEND ISVA1 ISVA2'
bstr.append(title)
str_format = '{:>9d}{:>7d}{:>11d}'+'{:>7d}'*5+'{:>8d}'*2
bstr += [str_format.format(i+1,*expand_var(v[:2])) for i,v in enumerate(result)]
bstr.append(title)
bstr += [str_format.format(i+1,*expand_var(v[2:])) for i,v in enumerate(result)]
return '\n'.join(bstr)
def get_periodic_faces(face1,face2,rotated = False,res = 1e-3):
#该函数返回结果是 fortran类型 start from 1
if not rotated:
x1,y1,z1 = face1
x2,y2,z2 = face2
px1,py1,pz1 = 0,0,0
px2,py2,pz2 = 0,0,0
for i,j in [(0,0),(-1,-1),(0,-1),(-1,0)]:
px1+=x1[i,j]
py1+=y1[i,j]
pz1+=z1[i,j]
px2+=x2[i,j]
py2+=y2[i,j]
pz2+=z2[i,j]
x2 =x2 - (px2-px1)/4
y2 =y2 - (py2-py1)/4
z2 =z2 - (pz2-pz1)/4
face2 = x2,y2,z2
a = SmallInBig(face1,face2,None,None,message=(False,False),res = res)
if a:
r1,r2 = a
r1 = ((r1[0][0]+1,r1[0][1]+1),(r1[1][0]+1,r1[1][1]+1),r1[2]+1,r1[3]+1)
r2 = ((r2[0][0]+1,r2[0][1]+1),(r2[1][0]+1,r2[1][1]+1),r2[2]+1,r2[3]+1)
rr = r1,r2
return rr
else:
return None
else:
return get_rotated_periodic_faces(face1,face2)
def get_rotated_periodic_faces(face1,face2,res = 1e-3,is_check=False):
x1,y1,z1 = face1
x2,y2,z2 = face2
idim1,jdim1 = x1.shape
idim2,jdim2 = x2.shape
if idim1 == idim2 and jdim1 == jdim2 :
iTranspose,jTranspose = 1,1
da = (x2*x1 + y2*y1)/(((x2**2+y2**2)*(x1**2+y1**2))**(1/2))
dz = np.abs(z1-z2 )
if da.max() - da.min() < res and dz.max()<res:
return ((1,idim1),(1,jdim1),1,2), ((1,idim2)[::iTranspose],(1,jdim2)[::jTranspose],1,2)
iTranspose = -1
x2, y2, z2 = x2[::-1],y2[::-1],z2[::-1]
da = (x2*x1 + y2*y1)/(((x2**2+y2**2)*(x1**2+y1**2))**(1/2))
dz = np.abs(z1-z2 )
if da.max() - da.min() < res and dz.max()<res:
return ((1,idim1),(1,jdim1),1,2), ((1,idim2)[::iTranspose],(1,jdim2)[::jTranspose],1,2)
iTranspose,jTranspose = 1,-1
x2, y2, z2 = x2[::-1,::-1],y2[::-1,::-1],z2[::-1,::-1]
da = (x2*x1 + y2*y1)/(((x2**2+y2**2)*(x1**2+y1**2))**(1/2))
dz = np.abs(z1-z2 )
if da.max() - da.min() < res and dz.max()<res:
return ((1,idim1),(1,jdim1),1,2), ((1,idim2)[::iTranspose],(1,jdim2)[::jTranspose],1,2)
iTranspose,jTranspose = -1,-1
x2, y2, z2 = x2[::-1],y2[::-1],z2[::-1]
da = (x2*x1 + y2*y1)/(((x2**2+y2**2)*(x1**2+y1**2))**(1/2))
dz = np.abs(z1-z2 )
if da.max() - da.min() < res and dz.max()<res:
return ((1,idim1),(1,jdim1),1,2), ((1,idim2)[::iTranspose],(1,jdim2)[::jTranspose],1,2)
if not is_check:
print('Warning these two faces may not be rotated periodic',da.max() - da.min())
return None
else:
return None
def GetFace(block,faceId):
x,y,z = block['X'], block['Y'], block['Z']
if faceId[1] == '0':
dim = 0
else:
dim = -1
if faceId[0].upper() == 'I':
return x[dim],y[dim],z[dim]
elif faceId[0].upper() == 'J':
return x[:,dim],y[:,dim],z[:,dim]
elif faceId[0].upper() == 'K':
return x[:,:,dim],y[:,:,dim],z[:,:,dim]
else:
raise Exception('faceId error',faceId)
def GetFaceAngle(point0,face,pos=None):
#默认旋转轴是z轴
def get_p_list(face,pos):
x,y = face[:2]
p_list = [(0,0),(0,-1),(-1,-1),(-1,0)]
distance = [((i,j),math.sqrt(x[i,j]**2+y[i,j]**2)) for i,j in p_list]
distance.sort(key = lambda k:k[1])
# print(distance,'distahce')
distance = [i[0] for i in distance]
if pos.lower() == 'top':
return distance[-2:]
else:
return distance[:2]
px,py = point0[:2]
x,y = face[:2]
norm0 = math.sqrt(px*px+py*py)
amax,amin = -600,600
if pos is None:
p_list = [(0,0),(0,-1),(-1,-1),(-1,0)]
else:
p_list = get_p_list(face,pos)
for i, j in p_list:
px1,py1 = x[i,j], y[i,j]
fz = px*px1 + py*py1
fm = norm0*math.sqrt(px1*px1+py1*py1)
t = fz/fm
t = 1 if t>1 else t
t = -1 if t<-1 else t
angle = math.acos(t)
if px*py1 - py*px1<0:
angle = -angle
amin = min(amin,angle)
amax = max(amax,angle)
return amax - amin ,amin,amax
def GetGroupFacesAngle(faces,blocks):
face_list = [GetFace(blocks[bid],fid) for bid,fid in faces]
point0 = face_list[0][0][0,0],face_list[0][1][0,0],0
angles = [GetFaceAngle(point0,face,'top')[0] for face in face_list]
return sum(angles)
# def GetLinesAngle(point0,lines):
# #默认旋转轴是z轴
# px,py = point0[:2]
# norm0 = math.sqrt(px**2 + py**2)
# amax,amin = -600,600
# for pxpy in lines:
# for px1,py1 in pxpy:
# fz = px*px1 + py*py1
# fm = norm0*math.sqrt(px1*px1+py1*py1)
# t = fz/fm
# t = 1 if t>1 else t
# t = -1 if t<-1 else t
# angle = math.acos(t)
# if px*py1 - py*px1<0:
# angle = -angle
# amin = min(amin,angle)
# amax = max(amax,angle)
# return amax - amin ,amin,amax
def FindBottomTopLine(surf):
in_face_x, in_face_y = surf
point00 = in_face_x[0,0],in_face_y[0,0]
point01 = in_face_x[0,-1],in_face_y[0,-1]
point11 = in_face_x[-1,-1],in_face_y[-1,-1]
point10 = in_face_x[-1,0],in_face_y[-1,0]
point_list = [point00,point01,point11,point10]
distance = []
for i,(px,py) in enumerate(point_list):
if i == 3:
j = 0
else:
j = i+1
p2x,p2y = point_list[j]
pxm,pym = (px+p2x)/2 , (py+p2y)/2
distance.append((i,j,math.sqrt(pxm**2+pym**2)))
distance.sort(key = lambda x:x[-1])
in_top_line = distance[-1]
in_bottom_line = distance[0]
return in_top_line,in_bottom_line,point_list
# def GetFacesAngle(point0,faces,position = 'Top'):
# bottom_top_lines = [FindBottomTopLine(f[:2]) for f in faces]
# bottom_lines = [(points[ib],points[jb]) for (it,jt,_),(ib,jb,_),points in bottom_top_lines]
# if position == 'Top':
# top_lines = [(points[it],points[jt]) for (it,jt,_),(ib,jb,_),points in bottom_top_lines]
# return GetLinesAngle(point0,top_lines)
# else:
# bottom_lines = [(points[ib],points[jb]) for (it,jt,_),(ib,jb,_),points in bottom_top_lines]
# return GetLinesAngle(point0, bottom_lines)
# def PatchedInterfaceRotatedSingle(blocks,faces1,faces2,periodic1 = 0,periodic2 = 1,position = 'Top'):
# #faces1:[(2,'I0'),(3,'J0')]
# #默认face2是旋转面
# fs1 = [GetFace(blocks[i], faceId) for i,faceId in faces1]
# fs2 = [GetFace(blocks[i], faceId) for i,faceId in faces2]
# fx,fy,_ = fs1[0]
# point0 = fx[0,0], fy[0,0]
# GetFacesAngle(point0,fs1,position)
# angleSpan1, angle1Min, angle1Max = GetFacesAngle(point0,fs1,position)
# angleSpan2, angle2Min, angle2Max = GetFacesAngle(point0,fs2,position)
# # angle2Min = min([i[1] for i in angles2])
# # angle2Max = max([i[2] for i in angles2])
# # angleSpan2 = angle2Max - angle2Min
# # angleSpan1 = 2*np.pi/round(2*np.pi/angleSpan1)
# # angleSpan2 = 2*np.pi/round(2*np.pi/angleSpan2)
# #旋转过后的角度
# if periodic1>0:
# angle1MaxRotated = angle1Max + angleSpan1*periodic1
# angle1MinRotated = angle1Min
# elif periodic1<0:
# angle1MaxRotated = angle1Max
# angle1MinRotated = angle1Min + angleSpan1*periodic1
# else:
# angle1MaxRotated = angle1Max
# angle1MinRotated = angle1Min
# if periodic2>0:
# angle2MaxRotated = angle2Max + angleSpan2*periodic2
# angle2MinRotated = angle2Min
# elif periodic2<0:
# angle2MaxRotated = angle2Max
# angle2MinRotated = angle2Min + angleSpan2*periodic2
# else:
# angle2MaxRotated = angle2Max
# angle2MinRotated = angle2Min
# # if not patchedPeriodic:
# f1tof2Positive = math.ceil((angle2MaxRotated - angle1MinRotated)/angleSpan1)
# f1tof2Negative = -(math.ceil((angle1MaxRotated - angle2MinRotated )/angleSpan1) - 1)
# f2tof1Positive = math.ceil((angle1MaxRotated - angle2MinRotated)/angleSpan2)
# f2tof1Negative = -(math.ceil((angle2MaxRotated - angle1MinRotated )/angleSpan2) - 1)
# return (faces1, angleSpan1, f1tof2Negative, f1tof2Positive), (faces2, angleSpan2, f2tof1Negative, f2tof1Positive)
# # else:
# # assert abs(angleSpan1 - angleSpan2)<1e-3
# # dt = (angle2Max - angle1Max)/angleSpan1
# # return (faces1, angleSpan1, dt, dt), (faces2, angleSpan2, -dt, -dt)
def PatchedInterfaceRotatedPeriodic(blocks,faces1,faces2):
#计算旋转周期壁面,该壁面无法进行oneoneblock赋值,只能进行插值计算
fs1 = [GetFace(blocks[i], faceId) for i,faceId in faces1]
fs2 = [GetFace(blocks[i], faceId) for i,faceId in faces2]
fx,fy,_ = fs1[0]
point0 = fx[0,0], fy[0,0]
angels1 = [GetFaceAngle(point0,i) for i in fs1]
angels2 = [GetFaceAngle(point0,i) for i in fs2]
ma1_list = [i[2] for i in angels1]
ma2_list = [i[2] for i in angels2]
ma1,ma2 = max(ma1_list),max(ma2_list)
span = ma2 - ma1
return (faces1,span,1,1), (faces2, span, -1, -1)
# def PatchedInterfaceRotated(blocks,faces1,faces2,periodic1 = 0,periodic2 = 1,patchedPeriodic = False):
# if patchedPeriodic:
# return PatchedInterfaceRotatedPeriodic(blocks, faces1, faces2)
# (f1,a1,n1,p1),(f2,a2,n2,p2) = PatchedInterfaceRotatedSingle(blocks,faces1,faces2,periodic1 ,periodic2 ,position = 'Top')
# (_,a3,n11,p11),(_,a4,n22,p22) = PatchedInterfaceRotatedSingle(blocks,faces1,faces2,periodic1 ,periodic2 ,position = 'Bottom')
# t = (f1,a1,min(n1,n11),max(p1,p11)),(f2,a2,min(n2,n22),max(p2,p22))
# # print(a1,a2,n1,p1,n2,p2)
# # print(a1,a3,n11,p11,n22,p22)
# return t
def MergeBlocks(blocks,b1,b2,one2oneData=None,patchedData=None,boundaryData=None):
#b1,b2 start from 0
if b2 < b1:
b1, b2 = b2, b1
block1 = blocks[b1]
block2 = blocks[b2]
shape1, shape2 = block1['X'].shape, block2['X'].shape
def detectEqualFace(dim,pos):
if dim == 0:
face1 = block1['X'][pos],block1['Y'][pos],block1['Z'][pos]
face20 = block2['X'][0],block2['Y'][0],block2['Z'][0]
face21 = block2['X'][-1],block2['Y'][-1],block2['Z'][-1]
elif dim == 1:
face1 = block1['X'][:,pos],block1['Y'][:,pos],block1['Z'][:,pos]
face20 = block2['X'][:,0],block2['Y'][:,0],block2['Z'][:,0]
face21 = block2['X'][:,-1],block2['Y'][:,-1],block2['Z'][:,-1]
elif dim == 2:
face1 = block1['X'][:,:,pos],block1['Y'][:,:,pos],block1['Z'][:,:,pos]
face20 = block2['X'][:,:,0],block2['Y'][:,:,0],block2['Z'][:,:,0]
face21 = block2['X'][:,:,-1],block2['Y'][:,:,-1],block2['Z'][:,:,-1]
else:
raise Exception('dim Error')
if is_equal_face(face1,face20):
return 0
elif is_equal_face(face1,face21):
return -1
else:
return None
dim1 = None
for i in range(3):
for j in [0,-1]:
t = detectEqualFace(i,j)
if t is not None:
dim1,pos1,dim2,pos2 = i,j,i,t
if dim1 is None:
raise Exception('No detect equal face')
assert pos1 == -1 and pos2 == 0
blocks.pop(b2)
h,w,d = shape2
kk = [0,0,0]
kk[dim2] = 1
h1,w1,d1 = kk
block1['X'] = np.concatenate((block1['X'],block2['X'][h1:h,w1:w,d1:d]),axis = dim2)
block1['Y'] = np.concatenate((block1['Y'],block2['Y'][h1:h,w1:w,d1:d]),axis = dim2)
block1['Z'] = np.concatenate((block1['Z'],block2['Z'][h1:h,w1:w,d1:d]),axis = dim2)
if one2oneData:
def deal_one2one(rr,b1,b2):
bb1,rr1,bb2,rr2 = rr
bb1 -= 1
bb2 -= 1
result = []
length = shape1[dim1]-1
delete_face = False
for b,r in [(bb1,rr1),(bb2,rr2)]:
if b > b2:
b = b - 1
elif b == b2:
b = b1
ise,jse,kse,v1,v2 = r
if dim2 == 0:
if ise[0] == 1 and ise[1] == 1:
delete_face = True
ise = ise[0]+length, ise[1]+length
elif dim2 == 1:
if jse[0] == 1 and jse[1] == 1:
delete_face = True
jse = jse[0]+length, jse[1]+length
elif dim2 == 2:
if kse[0] == 1 and kse[1] == 1:
delete_face = True
kse = kse[0]+length, kse[1]+length
r = ise,jse,kse,v1,v2
result.extend((b+1,r))
if delete_face:
return None
else:
return result
result_one2one = [deal_one2one(i,b1,b2) for i in one2oneData]
result_one2one = [i for i in result_one2one if i]
Varify_one2one(blocks,result_one2one)
else:
result_one2one = None
if patchedData:
t = b1,b2,shape1,shape2,dim1
mfun = mergedPatchedData
patchedData2 = [(mfun(i[0],t),[mfun(ff,t) for ff in i[1]]) for i in patchedData]
else:
patchedData2 = None
if boundaryData:
boundaryData2 = dict()
for key,value in boundaryData.items():
blockid = int(key[:-2])-1
if blockid == b2:
assert value['bctype'] != 2005
continue
elif blockid == b1:
assert value['bctype'] != 2005
continue
elif blockid > b2:
blockid -= 1
key = '{}{}'.format(blockid+1,key[-2:])
boundaryData2[key] = value
else:
boundaryData2 = None
return blocks,result_one2one, patchedData2, boundaryData2
def mergedPatchedData(element_patchedData,mergedData):
(blockId,faceId),xie_eta = element_patchedData[:2]
if mergedData:
if faceId[0].lower() == 'i':
dimf = 0
elif faceId[0].lower() == 'j':
dimf = 1
else:
dimf = 2
else:
return element_patchedData
targetId,mergeId,(h1,w1,d1),(h2,w2,d2),dim1 = mergedData
if blockId > mergeId:
blockId = blockId - 1
elif blockId == mergeId:
blockId = targetId
h,w,d = h1+h2-1, w1+w2 -1, d1+d2-1
if dim1 is 0:
h,w,d = h1+h2-1,w1,d1
w1,d1 = 1,1
elif dim1 is 1:
h,w,d = h1,w1+w2-1,d1
h1,d1 = 1,1
else:
h,w,d = h1,w1,d1+d2-1
h1,w1 = 1,1
if dim1 != dimf:
if dimf is 0:
#xie = j eta = k
xie_eta = w1,w,d1,d
elif dimf is 1:
#xie = k eta = i
xie_eta = d1,d,h1,h
elif dimf is 2:
#xie = j eta = i
xie_eta = w1,w,h1,h
elif blockId == targetId:
blockId = targetId
if dim1 != dimf:
if dimf is 0 :
#xie = j eta = k
xie_eta = 1,w1,1,d1
elif dimf is 1:
#xie = k eta = i
xie_eta = 1,d1,1,h1
elif dimf is 2:
#xie = j eta = i
xie_eta = 1,w1,1,h1
if len(element_patchedData) == 2:
return (blockId, faceId), xie_eta
elif len(element_patchedData) ==3:
return (blockId, faceId), xie_eta,element_patchedData[2]
else:
raise Exception('Error 2342354')
def AdjustNegativeGrid(blocks):
return [{k:v[::-1,:,:] for k,v in b.items()} if CheckBlockVolume(b)<0 else b for b in blocks]
def patchedFace2Faces(blocks,face1,faces,faces_type,rotate_angle='positive',steady=False):
if faces_type.lower() == 'periodic':
_,f2 = PatchedInterfaceRotatedPeriodic(blocks,[face1],faces)
rotate_angle = f2[1]*f2[2]
to_face_result = face1,(0,0,0,0)
from_face_result = [(faces[0],(0,0,0,0),rotate_angle)]
return to_face_result,from_face_result
face1_xyz = GetFace(blocks[face1[0]],face1[1])
point0 = face1_xyz[0][-1,-1],face1_xyz[1][-1,-1],face1_xyz[2][-1,-1]
faces_xyz = [GetFace(blocks[bid],key) for bid,key in faces]
# k = GetFacesAngle(point0,faces_xyz,'Top')
# print(k)
face1_theta_top = GetFaceAngle(point0,face1_xyz,'Top')
face1_theta_bottom = GetFaceAngle(point0,face1_xyz,'bottom')
faces_theta_top = [GetFaceAngle(point0,i,'Top') for i in faces_xyz]
faces_theta_bottom = [GetFaceAngle(point0,i,'bottom') for i in faces_xyz]
theta_min = [i[-2] for i in faces_theta_top]
theta_max = [i[-1] for i in faces_theta_top]
theta_delta = [i[0] for i in faces_theta_top]
Theta = sum(theta_delta)#整个faces的角度
if abs(max(theta_max) - min(theta_min) - Theta) > 0.00001:
print(max(theta_max) , min(theta_min),Theta)
print(face1,faces)
raise Exception('patched error')
faces_theta_top_extend = [(face_number,j,delta,amin+j*Theta,amax+j*Theta) for face_number,(delta,amin,amax) in enumerate(faces_theta_top) for j in range(-3,4)]
faces_theta_bottom_extend = [(face_number,j,delta,amin+j*Theta,amax+j*Theta) for face_number,(delta,amin,amax) in enumerate(faces_theta_bottom) for j in range(-3,4)]
if rotate_angle.lower() == 'positive':
rotate_angle = Theta
elif rotate_angle.lower() == 'negative':
rotate_angle = -Theta
else:
rotate_angle = float(rotate_angle)
if steady:
rotate_angle = 0
if faces_type.lower() == 'rotor':
rotate_angle *=-1
pd,pmi,pma = face1_theta_top
face1_theta_top_extend = [pd+abs(rotate_angle),min(pmi,pmi+rotate_angle),max(pma,pma+rotate_angle)]
face1_theta_bottom_extend = [
face1_theta_bottom[0]+abs(rotate_angle),
min(face1_theta_bottom[1],face1_theta_bottom[1]+rotate_angle),
max(face1_theta_bottom[2],face1_theta_bottom[2]+rotate_angle)
]
def isPatched(element_faces,element_face1):
_,_,_,mi,ma = element_faces
_,fmi,fma = element_face1
if fmi <= mi <= fma or fmi <= ma <= fma:
return True
elif ma>=fma and mi <= fmi:
return True
else:
return False
f1 = [i for i in faces_theta_top_extend if isPatched(i,face1_theta_top_extend)]
f2 = [i for i in faces_theta_bottom_extend if isPatched(i,face1_theta_bottom_extend)]
from_faces = list({i[:2] for i in f1+f2})
from_faces.sort(key = lambda x:faces_theta_top[x[0]][1]+x[1]*Theta)
to_face_result = face1,(0,0,0,0)
from_faces_result = [(faces[i],(0,0,0,0),j*Theta) for i,j in from_faces]
# for i in from_faces_result:
# print(i)
# exit()
return to_face_result,from_faces_result
def translatePatcheDataToString(blocks,pdata,ITOSS=0,ITMAX=0):
if ITMAX is None:
ITMAX = 0
def getITMAX_ITOSS(faces,itmax=0):
if itmax is not 0:
return itmax,0
blockId, faceId = faces[0]
i,j,k = blocks[blockId]['X'].shape
key = faceId[0].lower()
if key == 'i':
t = max(j,k)
elif key == 'j':
t = max(i,k)
else:
t = max(i,j)
if t>itmax:
itmax = t
return t,0
title = ' DYNAMIC PATCH INPUT DATA\n NINTER\n{:>10d}\n INT IFIT LIMIT ITMAX MCXIE MCETA C-0 IORPH ITOSS\n'
title_block = [getITMAX_ITOSS(to_face,ITMAX) for to_face,_ in pdata]
NINTER = len(pdata)
result = title.format(NINTER)
fstr1 = '{:>6d}'*2+'{:>9d}'*7+'\n'
for i,(itmax,itoss) in enumerate(title_block):
result += fstr1.format(i+1,1,1,itmax,0,0,0,0,itoss)
fstr2 = ' INT TO XIE1 XIE2 ETA1 ETA2 NFB\n'
fstr2 += '{:>6d}'*2+'{:>9d}'*5+'\n'
translate_table = str.maketrans('ijkIJK0eE','123123122')
fromStr1 = ' FROM XIE1 XIE2 ETA1 ETA2 FACTJ FACTK\n'
fromStr1 += '{:>12d}'+'{:>9d}'*4+'{:>9.4f}{:9.4f}\n'
dxStr = ' DX DY DZ DTHETX DTHETY DTHETZ\n'
dxStr += '{:>12.4f}'+'{:>9.4f}'*5+'\n'
fromStr = fromStr1+dxStr
strings_result = [result]
for i,(((toid,tokey),(xie1,xie2,eta1,eta2)),from_faces) in enumerate(pdata):
TO = '{}{}'.format(toid+1,tokey.translate(translate_table))
if xie1 != 0:
print((xie1,xie2,eta1,eta2),toid,tokey,'warning')
strings_result.append(fstr2.format(i+1,int(TO),xie1,xie2,eta1,eta2,len(from_faces)))
for (fromId,faceId),(xie1,xie2,eta1,eta2),delta_theta in from_faces:
FROM = '{}{}'.format(fromId+1,faceId.translate(translate_table))
delta_theta = delta_theta/3.1415926*180
t = fromStr.format(int(FROM),xie1,xie2,eta1,eta2,0,0,0,0,0,0,0,delta_theta)
strings_result.append(t)
return ''.join(strings_result)
def reverse_block(blocks,bid):
#使i轴和k轴反向
t = blocks[bid]
t = {k:v[::-1,:,::-1].copy() for k,v in t.items()}
blocks[bid]=t
def read_plot3d_unfmt(filename):
float_type = {4:'float32',8:'float64'}
int_type = {4:'int32',8:"int64"}
if isinstance(filename,str) or isinstance(filename,Path):
fp = open(filename,'rb')
else:
fp = filename
filename = fp.name
multiblock = np.frombuffer(fp.read(4), dtype = 'int32')[0]
if multiblock==4:
n_blocks = np.frombuffer(fp.read(8), dtype = 'int32')[0]
else:
n_blocks = 1
fp.seek(0,0)
k = np.frombuffer(fp.read(4), dtype= 'int32' )[0]
blocks = np.frombuffer(fp.read(k), dtype = 'int32').reshape(n_blocks,-1)
fp.read(4)
dimension = (k // 4) // n_blocks
result = []
precision=None
for shape in blocks:
k = np.frombuffer(fp.read(4), dtype= 'int32' )[0]
if dimension==3:
imax,jmax,kmax = shape
size = imax*jmax*kmax
else:
imax,jmax = shape
size = imax*jmax
if precision is None:
precision = k //(size) //dimension
if precision ==4 or precision == 8:
IBLANK = False
np_dim = dimension
else:
np_dim = dimension + 1
precision = k //(size) //np_dim
IBLANK = True
if IBLANK:
bl_data = np.frombuffer(fp.read(k),dtype = float_type[precision])
else:
bl_data = np.frombuffer(fp.read(k),dtype = float_type[precision])
fp.read(4)
if dimension == 3:
bl_data.shape = (np_dim,kmax,jmax,imax)
bl_data = bl_data.transpose((0,3,2,1))
t = dict(zip('XYZ',bl_data))
else:
bl_data.shape = (np_dim,jmax,imax)
bl_data = bl_data.swapaxes(2,1)
t = dict(zip('XY',bl_data))
if IBLANK:
shape0 = bl_data[-1].shape
t['IBLANK'] = np.frombuffer(bl_data[-1].copy().data,dtype='int32')
t['IBLANK'].shape = shape0
result.append(t)
return result
def get_min_distance(block):
#获取block中网格距离最小的一个边的长度
x,y,z = block['X'],block['Y'],block['Z']
imin = np.sqrt((x[:-1,:,:] - x[1:,:,:])**2 + (y[:-1,:,:] - y[1:,:,:])**2 + (z[:-1,:,:] - z[1:,:,:])**2).min()
jmin = np.sqrt((x[:,:-1,:] - x[:,1:,:])**2 + (y[:,:-1,:] - y[:,1:,:])**2 + (z[:,:-1,:] - z[:,1:,:])**2).min()
kmin = np.sqrt((x[:,:,:-1] - x[:,:,1:])**2 + (y[:,:,:-1] - y[:,:,1:])**2 + (z[:,:,:-1] - z[:,:,1:])**2).min()
return min((imin,jmin,kmin))
def CheckBlockVolume(block):
x,y,z = block['X'], block['Y'], block['Z']
t = [(0,0,0),(1,0,0),(1,1,0),(0,1,0),
(0,0,1),(1,0,1),(1,1,1),(0,1,1)]
p = [(x[i],y[i],z[i]) for i in t]
volume = hexahedronArea(*p)
return volume
def tetrahedronArea(p1,p2,p3,p4):
px,py,pz = p4
v1x,v1y,v1z = p1[0] - px, p1[1] - py, p1[2] - pz
v2x,v2y,v2z = p2[0] - px, p2[1] - py, p2[2] - pz
v3x,v3y,v3z = p3[0] - px, p3[1] - py, p3[2] - pz
tx = v1y*v2z - v2y*v1z
ty = v1z*v2x - v2z*v1x
tz = v1x*v2y - v1y*v2x
volume = tx*v3x + ty*v3y + tz*v3z
return volume/6
def hexahedronArea(p1,p2,p3,p4,p5,p6,p7,p8):
v = tetrahedronArea(p2,p4,p5,p1)
v += tetrahedronArea(p7,p2,p5,p6)
v += tetrahedronArea(p4,p7,p5,p8)
v += tetrahedronArea(p4,p2,p7,p3)
v += tetrahedronArea(p7,p5,p2,p4)
return v
def reverse_line1(ldata):
bid1,l1,bid2,l2 = ldata
exchange = False
if l1[-2] == 1 or l1[-2] == 2:
if l1[-1] - l1[-2] != 1:
exchange =True
if l1[-2] == 3:
if l1[-1] != 1:
exchange = True
if exchange:
l1 = list(l1)
l2 = list(l2)
l1[-2],l1[-1] = l1[-1],l1[-2]
l2[-2],l2[-1] = l2[-1],l2[-2]
ldata = bid1,l1,bid2,l2
return ldata
@click.command()
@click.argument('plot3d_grid')
def main(plot3d_grid):
t = read_plot3d_unfmt(plot3d_grid)
k = one2one(t)
q = transfer_one2one_str(k)
print(q)
open('1-1_blocking.txt','w').write(q)
print('数据写入:1-1_blocking.txt')
if __name__=='__main__':
main() | yxs-one2one | /yxs_one2one-1.1-py3-none-any.whl/yxs_one2one.py | yxs_one2one.py |
import ctypes
import numpy as np
import sys
from pathlib import Path
import os
__version__='1.0.1'
def get_dll():
p = Path.home() / '.yxspkg'/'pytecio'
if not p.is_dir():
os.makedirs(p)
if sys.platform.startswith('win'):
dll_path = p / 'tecio.dll'
url = 'https://raw.githubusercontent.com/blacksong/pytecio/master/2017r3_tecio.dll'
elif sys.platform.startswith('linux'):
dll_path = p / 'tecio.so'
url = 'https://raw.githubusercontent.com/blacksong/pytecio/master/2017r2_tecio.so'
if not dll_path.is_file():
from urllib import request
print('Downloading dll from github:',url)
request.urlretrieve(url,dll_path)
return ctypes.cdll.LoadLibrary(str(dll_path))
GLOBAL_DLL = get_dll()
class zone_data(dict):
def __init__(self,parent,zone_n):
super().__init__()
self.parent = parent
self.zone_n = zone_n
self.update({i:None for i in parent.nameVars})
def __getitem__(self,key):
if isinstance(key, int):
key = self.parent.nameVars[key]
t = super().__getitem__(key)
if t is None:
var_n = self.parent.nameVars_dict[key] + 1
t = self.parent._read_zone_var(self.zone_n, var_n)
self[key] = t
return t
else:
return t
def __setitem__(self,key,value):
if isinstance(key, int):
key = self.parent.nameVars[key]
if key not in self.parent.nameVars:
self.parent._add_variable(self.zone_n,key,value)
super().__setitem__(key,value)
def __getattr__(self,attr):
if attr == 'Elements':
self.Elements = self.parent._retrieve_zone_node_map(self.zone_n)
return self.Elements
else:
raise Exception('no attribute {}'.format(attr))
#zone_n:the number of zones, start from 1 to end, var_n is the same
#经测试Double类型的数据FieldDataType_Double的值为2
FieldDataType_Double = 2
FieldDataType_Float = 1
FieldDataType_Int32 = 3 # -100:not defined
FieldDataType_Int16 = -100
FieldDataType_Byte = -100
Structed_Grid = 0
class SzpltData(dict):
def __init__(self,filename,isread=False):
super().__init__()
if not isinstance(filename,str):
self.GenerateDataFromOtherFormat(filename)
return
self.dll = GLOBAL_DLL
self.filename = filename
self.added_new_zone = False
self.filehandle = self._get_filehandle()
self.title = self._tecDataSetGetTitle()
self.numVars = self._tecDataSetGetNumVars()
self.nameVars = self._tecVarGetName()
self.fileType = self._tecFileGetType()
self.numZones = self._tecDataSetGetNumZones()
self.nameZones = self._tecZoneGetTitle()
self.nameZones_dict = {k:i for i,k in enumerate(self.nameZones)}
self.nameVars_dict = {k:i for i,k in enumerate(self.nameVars)}
def cal_zone(i,zone_name):
d = dict()
d['varTypes'] = [self._tecZoneVarGetType(i+1,j+1) for j in range(self.numVars)]
d['passiveVarList'] = [self._tecZoneVarIsPassive(i+1,j+1) for j in range(self.numVars)]
d['shareVarFromZone'] = [self._tecZoneVarGetSharedZone(i+1,j+1) for j in range(self.numVars)]
# valueLocation: value 1 represent the data is saved on nodes, value 0 means on elements center
d['valueLocation'] = [self._tecZoneVarGetValueLocation(i+1,j+1) for j in range(self.numVars)]
d['IJK'] = self._tecZoneGetIJK(i+1)
d['zoneType'] = self._tecZoneGetType(i+1)
d['solutionTime'] = self._tecZoneGetSolutionTime(i+1)
d['strandID'] = self._tecZoneGetStrandID(i+1)
d['shareConnectivityFromZone'] = self._tecZoneConnectivityGetSharedZone(i+1)
d['faceNeighborMode'] = self._tecZoneFaceNbrGetMode(i+1)
d['numFaceConnections'] = self._tecZoneFaceNbrGetNumConnections(i+1)
if d['numFaceConnections'] > 0:
d['faceConnections'] = self._tecZoneFaceNbrGetConnections(i+1)
d['parentZone'] = self._tecZoneGetParentZone(i+1)
d['name'] = zone_name
d['aux'] = self._retrieve_aux_data(i+1)
return d
self.zone_info = [cal_zone(i,zone_name) for i,zone_name in enumerate(self.nameZones)]
self.update({name:zone_data(self,i+1) for i,name in enumerate(self.nameZones)})
# self._retrieve_zone_node_map(1)
# self._retrieve_aux_data(1)
if isread:
[zone[var_name] for zone in self.values() for var_name in self.nameVars]
def __getitem__(self,key):
if isinstance(key, int):
key = self.nameZones[key]
return super().__getitem__(key)
def __setitem__(self,key,value):
self.added_new_zone = True
return super().__setitem__(key,value)
def _read_zone_var(self,zone_n,var_n):
info = self.zone_info[zone_n - 1]
numValues = self._tecZoneVarGetNumValues(zone_n, var_n)
if info['passiveVarList'][var_n - 1] is 0:
fieldDataType = info['varTypes'][var_n-1]
if fieldDataType is FieldDataType_Float:
d = self._get_data_all_type(zone_n, var_n, numValues, ctypes.c_float, self.dll.tecZoneVarGetFloatValues)
# np_array = np.array(d)
elif fieldDataType is FieldDataType_Double:
d = self._get_data_all_type(zone_n, var_n, numValues, ctypes.c_double, self.dll.tecZoneVarGetDoubleValues)
# np_array = np.array(d)
elif fieldDataType is FieldDataType_Int32:
d = self._get_data_all_type(zone_n, var_n, numValues, ctypes.c_int, self.dll.tecZoneVarGetInt32Values)
# np_array = np.array(d)
elif fieldDataType is FieldDataType_Int16:
d = self._get_data_all_type(zone_n, var_n, numValues, ctypes.c_int, self.dll.tecZoneVarGetInt16Values)
# np_array = np.array(d)
elif fieldDataType is FieldDataType_Byte:
d = self._get_data_all_type(zone_n, var_n, numValues, ctypes.c_int, self.dll.tecZoneVarGetByteValues)
# np_array = np.array(d)
else:
raise Exception('FieldDataType Error:not defined data type')
d = np.array(d)
if info['zoneType'] is Structed_Grid:
#structed grid
Imax,Jmax,Kmax = info['IJK']
if d.size != Imax*Jmax*Kmax:
Imax =max(Imax - 1,1)
Jmax =max(Jmax - 1,1)
Kmax =max(Kmax - 1,1)
d = d.reshape((Kmax,Jmax,Imax)).transpose((2,1,0))
return d
else:
return np.array([])
def _get_data_all_type(self, zone_n, var_n, numValues, c_type, fun):
t = (c_type*numValues)()
fun(self.filehandle, zone_n, var_n, 1, numValues, t)
return t
def _get_filehandle(self):
'''get the filehandle'''
p = ctypes.c_int(13)
p1 = ctypes.pointer(p)
filehandle = ctypes.pointer(p1)
name = ctypes.c_char_p(self.filename.encode())
self.dll.tecFileReaderOpen(name,filehandle)
return filehandle[0]
def _tecDataSetGetTitle(self):
'''get the title of data set'''
s = ctypes.c_char_p()
ll = ctypes.pointer(s)
self.dll.tecDataSetGetTitle(self.filehandle,ll)
t = ll[0].decode()
return t
def _tecDataSetGetNumVars(self):
t = ctypes.c_int(0)
p = ctypes.pointer(t)
self.dll.tecDataSetGetNumVars(self.filehandle,p)
return p[0]
def _tecVarGetName(self):
def get_name(i):
s = ctypes.c_char_p()
ll = ctypes.pointer(s)
self.dll.tecVarGetName(self.filehandle,i,ll)
return ll[0].decode()
name_list = [get_name(i) for i in range(1,self.numVars+1)]
return name_list
def _tecFileGetType(self):
'''获取文件类型,即数据存储的格式在写文件的时候可以用到'''
s = ctypes.c_int(-100)
ll = ctypes.pointer(s)
self.dll.tecFileGetType(self.filehandle,ll)
t = ll[0]
return t
def _tecDataSetGetNumZones(self):
'''获取数据总共包含的zone的个数'''
t = ctypes.c_int(0)
p = ctypes.pointer(t)
self.dll.tecDataSetGetNumZones(self.filehandle,p)
return p[0]
def _tecZoneGetTitle(self):
'''获取每个zone的名字'''
def get_name(i):
s = ctypes.c_char_p()
ll = ctypes.pointer(s)
self.dll.tecZoneGetTitle(self.filehandle,i,ll)
return ll[0].decode()
name_list = [get_name(i) for i in range(1,self.numZones+1)]
return name_list
def _tecZoneVarGetType(self,zone_n,var_n):
'''获取数据存储的类型 是double(64) 还是single(32)double型返回True'''
p = self._return_2_int(zone_n,var_n,self.dll.tecZoneVarGetType)
#if p is FieldDataType_Double, it is double format
return p
def _tecZoneVarGetSharedZone(self,zone_n,var_n):
''' '''
return self._return_2_int(zone_n,var_n,self.dll.tecZoneVarGetSharedZone)
def _tecZoneVarGetValueLocation(self,zone_n,var_n):
''' '''
return self._return_2_int(zone_n,var_n,self.dll.tecZoneVarGetValueLocation)
def _tecZoneVarIsPassive(self,zone_n,var_n):
''' '''
return self._return_2_int(zone_n, var_n, self.dll.tecZoneVarIsPassive)
def _return_1_int(self,n,fun):
'''执行fun(filehandle,int,&int)函数并返回结果'''
p = ctypes.pointer(ctypes.c_int(0))
fun(self.filehandle,n,p)
return p[0]
def _add_variable(self,zone_n,var_name,value):
''' add a new variable to all zones'''
info = self.zone_info[zone_n -1]
self.nameVars.append(var_name)
self.nameVars_dict[var_name] = len(self.nameVars) - 1
info['varTypes'].append(info['varTypes'][-1])
info['shareVarFromZone'].append(0)
I,J,K = info['IJK']
if info['zoneType'] is Structed_Grid:#structed IJK type
if value.size == I*J*K:
valueLocation = 1
else:
valueLocation = 0
else:
if value.size == I:
valueLocation = 1
else:
valueLocation = 0
info['valueLocation'].append(valueLocation)
info['passiveVarList'].append(0)
for zone_p, item in enumerate(self.zone_info):
if zone_n == zone_p+1:
continue
else:
item['varTypes'].append(item['varTypes'][-1])
item['shareVarFromZone'].append(0)
item['valueLocation'].append(valueLocation)
item['passiveVarList'].append(1)
for zone_data_ in self.values():
zone_data_[var_name] = None
def _return_2_int(self,zone_n,var_n,fun):
'''执行fun(filehandle,int,int,&int)函数并返回结果'''
p = ctypes.pointer(ctypes.c_int(0))
fun(self.filehandle,zone_n,var_n,p)
return p[0]
def _return_n_array(self,fun,c_type, numValues,*d):
'''输入参数是n个整数,返回长为numValues的c_type类型的一个数组并转化为ndarry'''
t = (c_type*numValues)()
fun(self.filehandle, *d, t)
return np.array(t)
def _tecZoneGetType(self,zone_n):
'''获取zone的类型'''
t = self._return_1_int(zone_n,self.dll.tecZoneGetType)
if t is 6 or t is 7:
raise Exception('Unsupported zone type')
return t
def _tecZoneGetIJK(self,zone_n):
'''获取该zone 的ijk的值'''
iMax = ctypes.pointer(ctypes.c_int(0))
jMax = ctypes.pointer(ctypes.c_int(0))
kMax = ctypes.pointer(ctypes.c_int(0))
self.dll.tecZoneGetIJK(self.filehandle,zone_n,iMax,jMax,kMax)
t = iMax[0], jMax[0], kMax[0]
return t
def _tecZoneConnectivityGetSharedZone(self,zone_n):
shareConnectivityFromZone = self._return_1_int(zone_n,self.dll.tecZoneConnectivityGetSharedZone)
return shareConnectivityFromZone
def _tecZoneFaceNbrGetMode(self,zone_n):
faceNeighborMode = self._return_1_int(zone_n,self.dll.tecZoneFaceNbrGetMode)
return faceNeighborMode
def _tecZoneFaceNbrGetNumConnections(self,zone_n):
numFaceConnections = self._return_1_int(zone_n,self.dll.tecZoneFaceNbrGetNumConnections)
return numFaceConnections
def _tecZoneFaceNbrGetConnections(self,zone_n):
numFaceValues = self._return_1_int(zone_n,self.dll.tecZoneFaceNbrGetNumValues)
are64Bit = self._return_1_int(zone_n,self.dll.tecZoneFaceNbrsAre64Bit)
if are64Bit:
faceConnections = self._return_n_array(self.dll.tecZoneFaceNbrGetConnections64,
ctypes.c_long,numFaceValues,zone_n)
else:
faceConnections = self._return_n_array(self.dll.tecZoneFaceNbrGetConnections,
ctypes.c_int,numFaceValues,zone_n)
return faceConnections
def _tecZoneGetSolutionTime(self,zone_n):
d = ctypes.c_double(0.0)
p = ctypes.pointer(d)
self.dll.tecZoneGetSolutionTime(self.filehandle,zone_n,p)
solutionTime = p[0]
return solutionTime
def _tecZoneGetStrandID(self,zone_n):
StrandID = self._return_1_int(zone_n,self.dll.tecZoneGetStrandID)
return StrandID
def _tecZoneGetParentZone(self,zone_n):
parentZone = self._return_1_int(zone_n,self.dll.tecZoneGetParentZone)
return parentZone
def _tecZoneVarGetNumValues(self,zone_n,var_n):
numValues = self._return_2_int(zone_n,var_n,self.dll.tecZoneVarGetNumValues)
return numValues
def _tecZoneFaceNbrGetNumValues(self,zone_n):
k = self._return_1_int(zone_n,self.dll.tecZoneFaceNbrGetNumValues)
return k
def _retrieve_zone_node_map(self,zone_n):
info = self.zone_info[zone_n-1]
if info['zoneType'] is not Structed_Grid and info['shareConnectivityFromZone'] is 0:
jMax = info['IJK'][1]
numValues = self._tecZoneNodeMapGetNumValues(zone_n,jMax)
is64Bit = self._tecZoneNodeMapIs64Bit(zone_n)
if is64Bit is not 0:
#is64bit True
nodeMap = self._return_n_array(self.dll.tecZoneNodeMapGet64, ctypes.c_long, numValues, zone_n,1,jMax)
else:
nodeMap = self._return_n_array(self.dll.tecZoneNodeMapGet, ctypes.c_int, numValues, zone_n,1,jMax)
return nodeMap.reshape((jMax,-1))
def _retrieve_aux_data(self,zone_n):
numItems = self._tecZoneAuxDataGetNumItems(zone_n)
if numItems!=0:
aux_data = dict()
for whichItem in range(1,numItems+1):
name = ctypes.c_char_p()
value = ctypes.c_char_p()
name_p = ctypes.pointer(name)
value_p = ctypes.pointer(value)
self.dll.tecZoneAuxDataGetItem(self.filehandle,zone_n,whichItem,name_p,value_p)
name = name_p[0].decode()
value = value_p[0].decode()
aux_data[name]=value
return aux_data
else:
return None
def _tecZoneAuxDataGetNumItems(self,zone_n):
return self._return_1_int(zone_n,self.dll.tecZoneAuxDataGetNumItems)
def _retrieve_custom_label_sets(self,zone_n):
pass
def _tecCustomLabelsGetNumSets(self,zone_n):
return self._return_1_int(zone_n,self.dll.tecCustomLabelsGetNumSets)
def _tecZoneNodeMapGetNumValues(self,zone_n,jmax):
return self._return_2_int(zone_n,jmax,self.dll.tecZoneNodeMapGetNumValues)
def _tecZoneNodeMapIs64Bit(self, zone_n):
return self._return_1_int(zone_n,self.dll.tecZoneNodeMapIs64Bit)
def close(self):
self.dll.tecFileReaderClose(ctypes.pointer(self.filehandle))
def write(self,filename,verbose = True):
k = write_tecio(filename,self,verbose=verbose)
k.close()
def judge_valuelocation_passive(self,zone_name,var_name,name0):
I,J,K = self[zone_name][name0].shape
value = self[zone_name][var_name]
# print(zone_name,var_name,value is None)
if value is None:
return var_name, 1, 1, 'float32'
if self.Unstructed:
if value.size == I:
valueLocation = 1
else:
valueLocation = 0
else:
#Structed_grid
if value.size == I*J*K:
valueLocation = 1
else:
valueLocation = 0
return var_name, valueLocation, 0, str(value.dtype)
def sort_nameVars(self):
def fun_key(name):
if name.find('Coordinate') != -1:
return ord(name[-1])
if name.lower() in 'xyz':
return 256 + ord(name)
return sum([ord(i) for i in name]) + 500
self.nameVars.sort(key = fun_key)
def judge_unstructed(self,dataset):
self.Unstructed = False
for i in dataset.values():
for j in i.values():
shape = j.shape
if j.ndim>1:
if shape[1]*shape[2] > 1:
self.Unstructed = False
return
else:
self.Unstructed = True
return
def GenerateDataFromOtherFormat(self,dataset):
#将其他类型的数据转化为SzpltData类型
if isinstance(dataset,SzpltData):
self = SzpltData
return
elif isinstance(dataset,list) or isinstance(dataset,tuple):
dataset = {str(i+1):v for i,v in enumerate(dataset)}
aux_data = []
for v in dataset.values():
for j in v.keys():
if not isinstance(v[j],np.ndarray):
aux_data.append(j)
break
dataset = {i:{j:vd for j,vd in v.items() if j not in aux_data} for i,v in dataset.items()}
self.judge_unstructed(dataset)
self.update(dataset)
self.nameZones = list(self.keys())
name0 = list(self[self.nameZones[0]].keys())[0]
loc_pass = [self.judge_valuelocation_passive(zone,vname,name0) for zone in self.keys() for vname in self[zone].keys()]
loc_pass = set(loc_pass)
loc_pass_name = set([i[:3] for i in loc_pass])
self.nameVars = [i[0] for i in loc_pass_name]
assert len(set(self.nameVars)) == len(loc_pass_name)
nameVars_ = list(self[self.nameZones[0]].keys())
for i in self.nameVars:
if i not in nameVars_:
nameVars_.append(i)
self.nameVars = nameVars_
self.sort_nameVars()
empty = np.array([])
for zone_name_,zone in self.items():
I,J,K = zone[name0].shape
for var_name,location,passive,dtype in loc_pass:
if var_name not in zone:
if passive is 0:
if not self.Unstructed:
if location == 1:
t = np.zeros((I,J,K),dtype=dtype)
else:
t = np.zeros((I-1,J-1,K-1),dtype=dtype)
else:
if location == 1:
t = np.zeros((I,J,K),dtype=dtype)
else:
print(zone_name_,var_name)
raise Exception("Unstructed grid center value")
else:
t = empty
zone[var_name] = t
self.title = 'Pytecio data'
def cal_zone_info(name_zone,value_location):
d = dict()
zone_value = self[name_zone]
empty = np.array([])
shape = self[name_zone][self.nameVars[0]].shape
zoneType = Structed_Grid
if len(shape) == 1:
shape = shape[0],1,1
zoneType = 1
elif len(shape)==2:
shape = 1,shape[0],shape[1]
d['varTypes'] = [self.get_varTypes(name_zone,j) for j in self.nameVars]
d['passiveVarList'] = [0 if zone_value.get(i,empty).size>0 else 1 for i in self.nameVars]
d['shareVarFromZone'] = [0] * len(self.nameVars)
# valueLocation: value 1 represent the data is saved on nodes, value 0 means on elements center
d['valueLocation'] = value_location
d['IJK'] = shape
d['zoneType'] = zoneType
d['solutionTime'] = .0
d['strandID'] = 0
d['shareConnectivityFromZone'] = 0
d['faceNeighborMode'] = 0
d['numFaceConnections'] = 0
d['parentZone'] = 0
d['name'] = name_zone
return d
temp_zone = self[self.nameZones[0]]
value_location = [sum(temp_zone[key].shape) for key in self.nameVars]
max_location = max(value_location)
value_location = [0 if i<max_location else 1 for i in value_location]
self.zone_info = [cal_zone_info(i,value_location) for i in self.nameZones]
self.fileType = 0
self.added_new_zone = False
def get_varTypes(self,name_zone,name_var):
varTypes={'int32':3,'float64':2,'float32':1}
d = self[name_zone][name_var]
dtype = str(d.dtype)
if dtype == 'int64':
d = d.astype('int32')
self[name_zone][name_var] = d
dtype = 'int32'
return varTypes[dtype]
class write_tecio:
fileFormat = 0 #.szplt
def __init__(self,filename,dataset=None ,verbose = True):
'''
dataset 只要是包含两层字典的数据都可以 like d[key_zone][key_var],如果是非SzpltData类型的数据,目前只支持结构化的数据
'''
self.filename = filename
self.verbose = verbose
if hasattr(dataset,'added_new_zone') and dataset.added_new_zone:
dataset = {k:{k2:dataset[k][k2] for k2 in dataset[k].keys()} for k in dataset.keys()}
if not isinstance(dataset,SzpltData):
dataset = SzpltData(dataset)
self.dataset = dataset
self.dll = GLOBAL_DLL
self.filehandle = self._get_filehandle()
empty = np.array([])
for i,zone_name in enumerate(dataset.nameZones):
info = dataset.zone_info[i]
I,J,K = info['IJK']
zone_set = dataset[zone_name]
varTypes = self._list_to_int_array(info['varTypes'])
#因为这里有个bug所以我加了这样一句转化,原因是第一个zone共享了第一个zone 在创建的时候会导致失败,所以在写文件时强制取消shared
shareVarFromZone = self._list_to_int_array(info['shareVarFromZone'])
valueLocation = self._list_to_int_array(info['valueLocation'])
info['passiveVarList'] = [0 if zone_set.get(i,empty).size>0 else 1 for i in dataset.nameVars]
passiveVarList = self._list_to_int_array(info['passiveVarList'])
if info['zoneType'] == Structed_Grid:
outputZone = self._tecZoneCreateIJK(zone_name,I,J,K,varTypes, shareVarFromZone,
valueLocation, passiveVarList, info['shareConnectivityFromZone'], info['numFaceConnections'], info['faceNeighborMode'])
else:
outputZone = self._tecZoneCreateFE(zone_name, info['zoneType'], I, J, varTypes, shareVarFromZone,
valueLocation, passiveVarList, info['shareConnectivityFromZone'], info['numFaceConnections'], info['faceNeighborMode'])
self._tecZoneSetUnsteadyOptions(outputZone, info['solutionTime'], info['strandID'])
if info['parentZone'] != 0:
self._tecZoneSetParentZone(outputZone,info['parentZone'])
if info['numFaceConnections'] > 0:
faceConnections = info['faceConnections']
if isinstance(faceConnections,list) or isinstance(faceConnections,tuple):
faceConnections = np.array(faceConnections,dtype='int64')
print(faceConnections)
if faceConnections.itemsize == 8:
self._write_data_all_type(self.dll.tecZoneFaceNbrWriteConnections64,
faceConnections.ctypes,outputZone)
else:
self._write_data_all_type(self.dll.tecZoneFaceNbrWriteConnections32,
faceConnections.ctypes,outputZone)
if info.get('aux') is not None:
for key,value in info['aux'].items():
key_p = ctypes.c_char_p(key.encode())
value_p = ctypes.c_char_p(value.encode())
self.dll.tecZoneAddAuxData(self.filehandle,outputZone,key_p,value_p)
for j,var_name in enumerate(dataset.nameVars):
var_n = j+1
data=zone_set[var_name].copy(order='C')
if info['zoneType'] is Structed_Grid:
if data.ndim == 2:
shape = data.shape
data.shape = 1,shape[0],shape[1]
if data.size > 0:
data = data.transpose((2,1,0)).copy()
ff = [min(i,j) for j in info['shareVarFromZone']]
if info['passiveVarList'][var_n - 1] is 0 and ff[var_n -1] is 0:
fieldDataType = info['varTypes'][var_n-1]
if fieldDataType is FieldDataType_Float:
self._write_data_all_type(self.dll.tecZoneVarWriteFloatValues, data.ctypes, outputZone, var_n, 0, data.size)
elif fieldDataType is FieldDataType_Double:
self._write_data_all_type(self.dll.tecZoneVarWriteDoubleValues, data.ctypes, outputZone, var_n, 0, data.size)
elif fieldDataType is FieldDataType_Int32:
self._write_data_all_type(self.dll.tecZoneVarWriteInt32Values, data.ctypes, outputZone, var_n, 0, data.size)
elif fieldDataType is FieldDataType_Int16:
self._write_data_all_type(self.dll.tecZoneVarWriteInt16Values, data.ctypes, outputZone, var_n, 0, data.size)
elif fieldDataType is FieldDataType_Byte:
self._write_data_all_type(self.dll.tecZoneVarWriteByteValues, data.ctypes, outputZone, var_n, 0, data.size)
else:
print(fieldDataType,'iiiiiiiiiiiii')
raise Exception('FieldDataType Error:not defined data type')
self._write_zone_node_map(outputZone, info, zone_set)
def _write_zone_node_map(self,zone_n,info, zone_set):
# info = self.dataset.zone_info[self.dataset.nameZones[zone_n-1]]
if info['zoneType'] is not Structed_Grid and info['shareConnectivityFromZone'] is 0:
Elements = zone_set.Elements
numValues = Elements.size
if Elements.itemsize is 8:
#is64bit True
self._write_data_all_type(self.dll.tecZoneNodeMapWrite64, Elements.ctypes, zone_n,0,1,numValues)
else:
self._write_data_all_type(self.dll.tecZoneNodeMapWrite32, Elements.ctypes, zone_n,0,1,numValues)
def _list_to_int_array(self,l):
t = (ctypes.c_int*len(l))()
for i,j in enumerate( l):
t[i] = j
return t
def _get_filehandle(self):
p = ctypes.c_int(13)
p1 = ctypes.pointer(p)
filehandle = ctypes.pointer(p1)
name = ctypes.c_char_p(self.filename.encode())
fileType = self.dataset.fileType
name_str = ','.join([str(i) for i in self.dataset.nameVars])
# name_str
var_list_str = ctypes.c_char_p(name_str.encode())
title_str = ctypes.c_char_p(self.dataset.title.encode())
if self.filename.endswith('.szplt'):
fileFormat = 1
else:
raise Exception('file format error')
self.dll.tecFileWriterOpen(name,title_str,var_list_str,fileFormat,fileType,2,None,filehandle)
#官方例子中有这么一个东西,看名字叫debug 感觉不用也可以,就是在输出szplt文件时输出一些信息
if self.verbose:
outputDebugInfo = 1
self.dll.tecFileSetDiagnosticsLevel(filehandle[0],outputDebugInfo)
return filehandle[0]
def _tecZoneCreateIJK(self,zoneTitle, iMax, jMax, kMax, varTypes, shareVarFromZone,
valueLocation, passiveVarList, shareConnectivityFromZone, numFaceConnections, faceNeighborMode):
p = ctypes.pointer(ctypes.c_int(0))
zone_title = ctypes.c_char_p(zoneTitle.encode())
self.dll.tecZoneCreateIJK(self.filehandle, zone_title, iMax, jMax, kMax, varTypes,shareVarFromZone,
valueLocation, passiveVarList, shareConnectivityFromZone, numFaceConnections, faceNeighborMode,p)
return p[0]
def _tecZoneCreateFE(self,zoneTitle, zoneType, iMax, jMax, varTypes,shareVarFromZone,
valueLocation, passiveVarList, shareConnectivityFromZone, numFaceConnections, faceNeighborMode):
t = ctypes.c_int(0)
p = ctypes.pointer(t)
zone_title = ctypes.c_char_p(zoneTitle.encode())
self.dll.tecZoneCreateFE(self.filehandle, zone_title, zoneType, iMax, jMax, varTypes,shareVarFromZone,
valueLocation, passiveVarList, shareConnectivityFromZone, numFaceConnections, faceNeighborMode,p)
return p[0]
def _tecZoneSetUnsteadyOptions(self,zone_n, solutionTime=0, StrandID=0):
if solutionTime !=0 or StrandID != 0:
solutionTime = ctypes.c_double(solutionTime)
self.dll.tecZoneSetUnsteadyOptions(self.filehandle,zone_n, solutionTime, StrandID)
def _tecZoneSetParentZone(self,zone_n,zone_parent):
self.dll.tecZoneSetParentZone(self.filehandle,zone_n,zone_parent)
def _write_data_all_type(self,fun,data, *d):
fun(self.filehandle, *d, data)
def close(self):
self.dll.tecFileWriterClose(ctypes.pointer(self.filehandle))
def read(filename,isread=False):
return SzpltData(filename,isread)
def write(filename,dataset,verbose = True):
t = write_tecio(filename,dataset, verbose=verbose)
t.close()
def cal_zone(number,g,q):
g = g[number]
q = q[number]
k = {i:g[i] for i in 'XYZ'}
y = {'VAR{}'.format(key):val for key,val in q.items() if isinstance(key,int)}
k.update(y)
return k
if __name__=='__main__':
pass | yxs-pytecio | /yxs_pytecio-1.0.1-py3-none-any.whl/yxs_pytecio.py | yxs_pytecio.py |
from __future__ import absolute_import
import os
import sys
__version__='1.1'
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
# try:
# from pip._internal import main as _main # isort:skip # noqa
# except:
# from pip import main as _main
def yxspkg_required_main(_main,args):
sys.argv = args
_main()
def main():
print('####################################')
argvs = sys.argv[:]
for i in argvs:
if '-i' == i:
break
else:
sys.argv.extend(('-i','https://pypi.tuna.tsinghua.edu.cn/simple'))
print('Commands ',' '.join(sys.argv),'\n')
# if sys.argv[1] == 'install' and sys.argv[2] == '*':
# from multiprocessing import Process
# modules = ['lxml','pandas','bs4','requests','PyQt5','imageio','rsa','scipy','matplotlib','opencv-python',
# 'tushare','lulu','yxspkg_encrypt','yxspkg_tecfile','yxspkg_wget','IPython',
# 'yxspkg_songzgif','tensorflow','keras','PyInstaller','twine','torch','torchvision',
# 'mpl_finance','quandl','xlrd','pandas_datareader','pytecio','webfile','cheroot']
# a = []
# for i in modules:
# argvs[2] = i
# s = Process(target = yxspkg_required_main,args =(argvs[:],) )
# s.start()
# s.join()
# d ={'opencv-python':'cv2'}
# for i in modules:
# try:
# m = d.get(i,i)
# exec('import '+m)
# a.append(i)
# except:
# print("Failed to install "+i)
# print('#'*20)
# for i in a:
# print('Install {} successfully!'.format(i))
# else:
# _main()
import pip
version = pip.__version__.split('.')[0]
if int(version) >= 20:
from pip._internal.cli import main
_main = main.main
else:
try:
from pip._internal import main as _main # isort:skip # noqa
except:
from pip import main as _main
_main()
if __name__ == '__main__':
main() | yxspkg-pip | /yxspkg_pip-1.1-py3-none-any.whl/yxspkg_pip.py | yxspkg_pip.py |
import os.path as _path
import os
import base64 as _base64
import math as _math
import array as _array
import tarfile as _tarfile
import sys
try:
import rsa as _rsa
except:
pass
import time
__version__='1.2.3'
__author__='Blacksong'
def rsa_md5(data):
n='52'*32
e = '1005'
def modpow(b, e, m):
result = 1
while (e > 0):
if e & 1:
result = (result * b) % m
e = e >> 1
b = (b * b) % m
return result
def bytes_to_int(bytes_):
n = 0
for i in bytes_:
n = n << 8
n += i
return n
result = modpow(bytes_to_int(data), int(e, 16), int(n, 16))
return int(result).to_bytes(32,'big')
def _data_type():
x=_array.array('L')
if x.itemsize==8:return 'L'
else:return 'Q'
_array_type=_data_type()
def _bytes8(b,m=8):
n=m-len(b)%m
b+=(chr(n)*n).encode()
return b
def _compress_tarfile(dirname,outfile=None):
'''make a direct to a tar.gz file'''
ss=time.clock()
dirname=_path.normpath(dirname)
if outfile is None:
outfile = dirname + '.tar.gz'
tar=_tarfile.open(outfile,'w:gz',compresslevel=9)
dr=_path.dirname(dirname)+os.sep
for r,d,fs in os.walk(dirname):
for f in fs:
af=r+os.sep+f
print('compress ',f)
tar.add(af,af.replace(dr,''))
tar.close()
print(time.clock()-ss)
return outfile
def _extarct_tarfile(filename,target_path=None):
'''make a direct to a tar.gz file'''
if target_path is None:target_path=_path.dirname(_path.abspath(filename))
tar=_tarfile.open(filename,'r:gz')
for f in tar.getnames():
tar.extract(f,target_path)
print('extract ',target_path+os.sep+f)
tar.close()
def _getkey(passwd):
if passwd is None:passwd=b'SGZ'
if isinstance(passwd,str):passwd=passwd.encode()
key=rsa_md5(passwd)[:32]
s=[key[i]*key[8+i]*key[16+i]*key[24+i]+(i*37) for i in range(8)]
return s
def _enpt(x,key,order=None):
n1,n2,n3,a,b,c,d,m=key
if order!=None:n1,n2,n3=order
for i in range(len(x)):
n1,n2,n3=n2,n3,(a*n1+b*n2+c*n3+d)%m
x[i]=(x[i]+n3)%0xffffffffffffffff
return n1,n2,n3
def encrypt(parameter,output=None,passwd=None):
if passwd is None:
passwd = '11'*16
if _path.isdir(parameter):
istar = True
parameter=_compress_tarfile(parameter)
else:
istar=False
key0=_getkey(passwd)
if output==None:
output=parameter+'.yxs'
size=_path.getsize(parameter)
filename=_path.split(parameter)[1]
size_name=len(filename.encode())
size_bu=8-(size+size_name+3)%8
b=bytearray(3+size_bu)
b[0]=size_bu
b[1]=size_name//256
b[2]=size_name%256
b+=filename.encode()
data=open(parameter,'rb')
length=8*1024*1024
b+=data.read(length-size_bu-size_name-3)
order0=key0[:3]
fp=open(output,'wb')
size0=0
while True:
x=_array.array(_array_type)
x.frombytes(b)
order0=_enpt(x,key=key0,order=order0)
fp.write(x.tobytes())
b=data.read(length)
if not b:break
size0+=length
sys.stdout.write('\b\b\b\b\b\b\b\b\b\b{:.2f}'.format(size0/size))
fp.close()
data.close()
if istar:os.remove(parameter)
def _deph(x,key,order=None):
n1,n2,n3,a,b,c,d,m=key
if order!=None:n1,n2,n3=order
for i in range(len(x)):
n1,n2,n3=n2,n3,(a*n1+b*n2+c*n3+d)%m
x[i]=(x[i]-n3)%0xffffffffffffffff
return n1,n2,n3
def decipher(parameter,output=None,passwd=None):
if passwd is None:
passwd = '11'*16
key0=_getkey(passwd)
data=open(parameter,'rb')
size=_path.getsize(parameter)
length=8*1024*1024
b=data.read(8*1024)
x=_array.array(_array_type)
x.frombytes(b)
order0=key0[:3]
order0=_deph(x,key=key0,order=order0)
b=x.tobytes()
size_bu=b[0]
size_name=b[1]*256+b[2]
o_name=b[3+size_bu:3+size_bu+size_name].decode('utf8')
if output is None:
output=o_name
fp=open(output,'wb')
fp.write(b[3+size_bu+size_name:])
size0=8*1024-3-size_bu-size_name
while True:
b=data.read(length)
if not b:break
size0+=length
sys.stdout.write('\b\b\b\b\b\b\b\b\b\b{:.2f}'.format(size0/size))
x=_array.array(_array_type)
x.frombytes(b)
order0=_deph(x,key=key0,order=order0)
fp.write(x.tobytes())
fp.close()
if o_name[-7:]=='.tar.gz':
target_path=_path.dirname(_path.abspath(parameter))
_extarct_tarfile(output,target_path=target_path)
os.remove(output)
def encode(b,passwd):
key0=_getkey(passwd)
x=_array.array(_array_type)
x.frombytes(_bytes8(b))
_enpt(x,key=key0)
return x.tobytes()
def decode(b,passwd):
key0=_getkey(passwd)
x=_array.array(_array_type)
x.frombytes(b)
_deph(x,key=key0)
b=x.tobytes()
return b[:-b[-1]]
def b64encode(b,passwd=None):
return _base64.b64encode(encode(b,passwd))
def b64decode(b,passwd=None):
return decode(_base64.b64decode(b),passwd)
def spencode(b,passwd=None,str_set=b''):
if not b:return b
if len(str_set)<2:
str_set=list(range(ord('A'),ord('A')+26))+list(range(ord('a'),ord('a')+26))+list(range(ord('0'),ord('0')+10))
if passwd is None:b=_bytes8(b)
else:b=encode(b,passwd)
str_set=bytearray(str_set)
nb,ns=len(b),len(str_set)
x=_array.array(_array_type)
w=_math.ceil(x.itemsize*_math.log(256)/_math.log(ns))
x.frombytes(b)
y=bytearray(len(x)*w)
t=0
for i in x:
for j in range(w-1,-1,-1):
y[t+j]=str_set[i%ns]
i=i//ns
t+=w
return y
def spdecode(b,passwd=None,str_set=b''):
if not b:return b
if len(str_set)<2:
str_set=list(range(ord('A'),ord('A')+26))+list(range(ord('a'),ord('a')+26))+list(range(ord('0'),ord('0')+10))
str_set=bytearray(str_set)
t_set=bytearray(256)
for i,j in enumerate(str_set):
t_set[j]=i
nb,ns=len(b),len(str_set)
x=_array.array(_array_type,[0])
w=_math.ceil(x.itemsize*_math.log(256)/_math.log(ns))
b=bytearray(b)
x*=nb//w
t=0
for i in range(nb//w):
s=0
for j in range(t,t+w):
s=s*ns+t_set[b[j]]
t+=w
x[i]=s
b=x.tobytes()
if passwd is None:b=b[:-b[-1]]
else:b=decode(b,passwd)
return b
def newkeys(n):#产生rsa秘钥
return _rsa.newkeys(n)
def rsaencode(b,public):
length = (len(bin(public.n))-2)//8-11
crypt_list = [_rsa.encrypt(b[i-length:i],public) for i in range(length,len(b)+1,length)]
leaved = len(b) % length
if leaved>0:
crypt_list.append(_rsa.encrypt(b[-leaved:],public))
return b''.join(crypt_list)
def rsadecode(b,private):
length = (len(bin(private.n))-2)//8
assert len(b) % length == 0
decrypt_list = [_rsa.decrypt(b[i-length:i],private) for i in range(length,len(b)+1,length)]
return b''.join(decrypt_list)
def parse_commands(args):
d = dict()
i = 0
while True:
if i>=len(args):
break
if args[i].startswith('--'):
d[args[i][2:]] = True
i+=1
elif args[i].startswith('-'):
d[args[i][1:]] = args[i+1]
i += 2
else:
d[args[i]] = True
i += 1
return d
def run_git(args):
def get_files():
files = os.popen('git ls-files').read()
return files.split()
# passwd = args['p']
if args.get('push') is not None:
files = get_files()
print(files)
def main(args = None):
if args is None:
args = sys.argv[1:]
d = parse_commands(args)
if d.get('e') is not None:
print('encrypt')
encrypt(d.get('e'),d.get('o'),d.get('p'))
elif d.get('d') is not None:
print('decipher')
decipher(d.get('d'),d.get('o'),d.get('p'))
elif d.get('git') is not None:
run_git(d)
if __name__=='__main__':
os.chdir('F:\\pythonAPP\\wechat_pyui')
a = 'encrypt git push'.split()
main(a) | yxspkg_encrypt | /yxspkg_encrypt-1.2.3.tar.gz/yxspkg_encrypt-1.2.3/yxspkg_encrypt.py | yxspkg_encrypt.py |
==============================
SongZ GIF
==============================
SongZ GIF is a GUI to make gif based on pyqt5, moviepy, imageio, scipy.misc and numpy.
The full documentation for API can be found here.
-----------------
Installation
-----------------
To get the latest version:
$ python -m pip install yxspkg_songzgif --user
-----------------
Usage
-----------------
To start with the module:
$ python -m yxspkg_songzgif.gif
-----------------
Support
-----------------
If you have any questions or comments please send an email to [email protected]
| yxspkg_songzgif | /yxspkg_songzgif-1.4.2.tar.gz/yxspkg_songzgif-1.4.2/README.rst | README.rst |
from PyQt5.QtCore import QTimer,Qt,QSize,QPoint,QEvent
from PyQt5.QtGui import QImage, QPixmap,QPainter,QFont,QColor,QPen,QCursor,QKeySequence,QIcon,QPalette
from PyQt5.QtWidgets import (QApplication, QLabel,QWidget,QMessageBox,QDesktopWidget,QMenu,QAction)
from numpy import stack
import imageio
import sys
from os import path
import os
import time
import ctypes
if sys.platform.startswith('win'):#为了使任务栏图标和标题栏图标一样,需要ctypes的设置
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("myappid")
del ctypes
imread=imageio.imread
__version__='1.1.5'
__author__='Blacksong'
class datarc(dict):
def __init__(self,name):
super().__init__()
for i in open(name,'r'):
s=i.strip()
l=s.find('<')
r=s.find('>')
if l!=0 or r==-1:continue
key=s[l+1:r].strip()
value=s[r+1:].lstrip()
self[key]=value
if self['win_title'].strip() == 'True':
self['win_title'] = True
else:
self['win_title'] = False
def setDefault():
environ=os.environ
if sys.platform.startswith('win'):
home_path=path.join(environ['HOMEDRIVE'],environ['HOMEPATH'],'.songzgifrc')
else:
home_path=path.join(environ['HOME'],'.songzgifrc')
if not path.exists(home_path):
content='''<autoplay_interval>5
<background_color>rgba(255,255,255,255)
<closebutton_background_color>rgba(0,0,0,0)
<title_background_color>rgba(0,0,0,0)
<border_background_color>rgba(0,0,0,0)
<win_title>True'''
fp=open(home_path,'w')
fp.write(content)
fp.close()
return datarc(home_path)
def ndarry2qimage(npimg): #ndarry图片转化为qimage图片
if npimg.dtype!='uint8':
npimg=npimg.astype('uint8')
shape=npimg.shape
if len(shape)==3 and shape[2]==4:
return QImage(npimg.tobytes(),shape[1],shape[0],shape[1]*shape[2],QImage.Format_RGBA8888)
if len(shape)==2:
npimg=stack((npimg,npimg,npimg),2)
shape=npimg.shape
s=QImage(npimg.tobytes(),shape[1],shape[0],shape[1]*shape[2],QImage.Format_RGB888)
return s
class YTitleLabel(QLabel):
def __init__(self,*d):
super().__init__(*d)
self.parent=d[0]
self.name_label=QLabel(self)
self.name_label.setStyleSheet('QWidget{background-color:rgba(0,0,0,0)}' )
self.name_label.hide()
self.name_label.move(10,3)
def mousePressEvent(self,e):
if self.parent.resizeWindow:
self.parent.mousePressEvent(e)
return
self.xt,self.yt=self.parent.x(),self.parent.y() #窗口最原始的位置
self.x0,self.y0=self.xt+e.x(),self.yt+e.y()
def mouseMoveEvent(self,e):
if self.parent.resizeWindow:
self.parent.mouseMoveEvent(e)
return
x,y=self.parent.x()+e.x(),self.parent.y()+e.y()
dx,dy=x-self.x0,y-self.y0
self.parent.move(self.xt+dx,self.yt+dy)
def mouseDoubleClickEvent(self,e):
if self.parent.isMaximized():
self.parent.showNormal()
else:
self.parent.showMaximized()
def enterEvent(self,e):
if not self.parent.source_name:return
name=path.basename(self.parent.source_name)
self.name_label.setText(name)
self.name_label.show()
def leaveEvent(self,e):
self.name_label.hide()
class YDesignButton(QLabel):
position_dict={'center':Qt.AlignCenter,'left':Qt.AlignLeft,'hcenter':Qt.AlignHCenter,'vcenter':Qt.AlignVCenter,'justify':Qt.AlignJustify}
def __init__(self,parent):
super().__init__(parent)
self.parent=parent
self.clicked_connect_func=lambda :None
self.setScaledContents(True)
self.normal_qimg=None
self.focus_qimg=None
def setNormalQimg(self,lines,color_background,color_text,img_size,width_pen=4):
self.normal_qimg=self.getDrawLine(lines,color_background,color_text,img_size,width_pen)
self.setPixmap(self.normal_qimg)
if self.focus_qimg is None:
self.focus_qimg = self.normal_qimg
def setFocusQimg(self,lines,color_background,color_text,img_size,width_pen=4):
self.focus_qimg=self.getDrawLine(lines,color_background,color_text,img_size,width_pen)
def getDrawLine(self,lines,color_background,color_text,img_size,width_pen=4):
qp=QPainter()
img=QImage(img_size[0],img_size[1],QImage.Format_RGBA8888)
img.fill(QColor(*color_background))
qp.begin(img)
qp.setPen(QPen(QColor(*color_text),width_pen,Qt.SolidLine))
for i,j,m,n in lines:
qp.drawLine(QPoint(i,j),QPoint(m,n))
qp.end()
qimg=QPixmap.fromImage(img)
return qimg
def mousePressEvent(self,e):
self.clicked_connect_func()
def enterEvent(self,e):
self.setPixmap(self.focus_qimg)
def leaveEvent(self,e):
self.setPixmap(self.normal_qimg)
class NextPage(YDesignButton): #翻页按钮
def __init__(self,*d):
super().__init__(*d)
self.clicked_connect_func=lambda a:None
self.setStyleSheet('QWidget{background-color:rgba(0,0,0,0)}' )
l=[(10,50,70,50),(50,10,90,50),(90,50,50,90)]
self.setNormalQimg(l,(0,0,0,0),(255,255,255,0),(100,100),10)
self.setFocusQimg(l,(0,0,0,0),(255,255,255,0),(100,100),15)
def clicked_connect(self,func):
self.clicked_connect_func=func
def mousePressEvent(self,e):
self.clicked_connect_func(e)
class PrePage(YDesignButton): #翻页按钮
def __init__(self,*d):
super().__init__(*d)
self.setStyleSheet('QWidget{background-color:rgba(0,0,0,0)}' )
self.clicked_connect_func=lambda a:None
l=[(30,50,90,50),(50,10,10,50),(50,90,10,50)]
self.setNormalQimg(l,(0,0,0,0),(255,255,255,0),(100,100),10)
self.setFocusQimg(l,(0,0,0,0),(255,255,255,0),(100,100),15)
def clicked_connect(self,func):
self.clicked_connect_func=func
def mousePressEvent(self,e):
self.clicked_connect_func(e)
class BorderLine(QLabel):
def __init__(self,*d):
super().__init__(*d)
self.setStyleSheet('QWidget{background-color:%s}' % default_value.get('border_background_color','rgba(0,0,0,100)'))
class YViewerLabel(QLabel):
def __init__(self,*d):
super().__init__(*d)
self.parent=d[0]
self.shape=(20,20)
self.ndimg_s=None
self.lefttop = None
self.is_focus = False
def showimage(self,ndimg):
self.ndimg_s=ndimg
x,y,w,h=self.geometry_img
ndimg=ndimg[y:y+h,x:x+w]
qimg=ndarry2qimage(ndimg)
self.setPixmap(QPixmap.fromImage(qimg))
def mousePressEvent(self,e):
if e.button()==Qt.RightButton and self.parent.autoplay:
self.start_time=time.time()
self.single_right=True
if self.parent.resizeWindow:
self.parent.mousePressEvent(e)
return
self.lefttop=self.get_lefttop()
self.dl=0,0
self.xt,self.yt=self.x(),self.y() #图片
self.x0,self.y0=self.xt+e.x(),self.yt+e.y()
self.pw,self.ph=self.parent.width(),self.parent.height()
def setImage(self,geometry_img=None,shape=None):
self.geometry_img=geometry_img
self.shape=shape
def mouseMoveEvent(self,e):
if self.parent.resizeWindow:
self.parent.mouseMoveEvent(e)
return
x,y=self.x()+e.x(),self.y()+e.y()
if x<0 or y<0 or x>self.pw or y>self.ph:return
dx,dy=x-self.x0,y-self.y0
self.move(self.xt+dx,self.yt+dy)
self.dl=dx,dy
def mouseDoubleClickEvent(self,e):
self.single_right=False
if e.button()==Qt.RightButton:
self.parent.setAutoplay()
return
if self.parent.isMaximized():
self.parent.showNormal()
else:
self.parent.showMaximized()
def mouseReleaseEvent(self,e):
if e.button()==Qt.RightButton and self.parent.autoplay and self.single_right:
self.end_time=time.time()
if self.end_time-self.start_time>0.8:
self.parent.change_autoplay(self.end_time-self.start_time)
if self.lefttop is not None:
self.update_geometry(self.lefttop,self.dl)
def get_lefttop(self): #
factor=self.parent.factor
end_geometry=self.geometry()
gx,gy=end_geometry.x(),end_geometry.y()
x_img,y_img=self.geometry_img[:2]
ox=gx-x_img*factor
oy=gy-y_img*factor
return ox,oy
def update_geometry(self,origon=None,dl=None):
if self.ndimg_s is None:return
factor=self.parent.factor
end_geometry=self.geometry()
gx,gy,gw,gh=end_geometry.x(),end_geometry.y(),end_geometry.width(),end_geometry.height()
w,h=self.parent.width(),self.parent.height()
x_img,y_img,w_img,h_img=self.geometry_img
x_new,y_new,w_new,h_new=gx,gy,gw,gh
if gx<-w:
dx=(-w-gx)/factor
x_img+=dx
x_new=-w
if gy<-h:
dy=(-h-gy)/factor
y_img+=dy
y_new=-h
if x_img>0 and gx>-w:
dx=min((gx+w)/factor,x_img)
x_img-=dx
x_new-=dx*factor
if y_img>0 and gy>-h:
dx=min((gy+h)/factor,y_img)
y_img-=dx
y_new-=dx*factor
w_new=w+w-x_new
h_new=h+h-y_new
w_img=min(w_new/factor,self.shape[1]-x_img)
h_img=min(h_new/factor,self.shape[0]-y_img)
self.geometry_img=int(x_img),int(y_img),int(w_img),int(h_img)
x_img,y_img,w_img,h_img=self.geometry_img
w_new=w_img*factor
h_new=h_img*factor
x_new,y_new,w_new,h_new=int(x_new),int(y_new),int(w_new),int(h_new)
ox=x_new-factor*x_img
oy=y_new-factor*y_img
if origon:
ox1,oy1=origon
dx,dy=dl
ox1,oy1=ox1+dx,oy1+dy
dx,dy=ox-ox1,oy-oy1
x_new-=dx
y_new-=dy
self.setGeometry(x_new,y_new,w_new,h_new)
self.showimage(self.ndimg_s)
# print(x_new,y_new,w_new,h_new,' ' , gx,gy,gw,gh)
def enterEvent(self,event):
self.is_focus = True
def leaveEvent(self,event):
self.is_focus = False
class GifPreview(QWidget): #预览gif
gif_types=('.gif',)
image_types=('.jpg','.jpeg','.ico','.bmp','.png','.tiff','.icns')
def __init__(self,s='SongZ Viewer',name=None):
super().__init__()
global default_value
default_value=setDefault()
if default_value['win_title']:
default_value['border_background_color']='rgba(0,0,0,0)'
self.offset = 10000 #如果要显示win title 则将自身的title和closebutton移动相应的位置
self.displayWindowTitle = True
else:
self.displayWindowTitle = False
self.offset = 0
self.setWindowFlags(Qt.FramelessWindowHint)#FramelessWindowHint
print(default_value)
self.isMaximized_value=False
self.timer=None
self.autoplay=False
self.resizeWindow=False
self.border=0 #边界宽度
self.label=YViewerLabel(self)
self.label.setScaledContents(True)
self.background_color=(255,255,255,255)
# self.setStyleSheet('QWidget{background-color:%s}' % default_value['background_color'])
background_color = default_value['background_color']
nl = background_color.find('(')
numbers = background_color[nl+1:-1].split(',')
numbers = [int(i) for i in numbers]
palette = QPalette()
palette.setColor(self.backgroundRole(), QColor(*numbers))
self.setPalette(palette)
self.setMinimumSize(200,100)
self.minimumSize_window=200,100
self.title_height=26
self.bottom_height=0
self.first_window=True
self.CloseButton=YDesignButton(self)
self.CloseButton.setNormalQimg([(30,30,70,70),(30,70,70,30)],(0,0,0,0),(0,0,0,0),(100,100),4)
self.CloseButton.setFocusQimg([(30,30,70,70),(30,70,70,30)],(255,0,0,255),(255,255,255),(100,100),4)
self.CloseButton.setStyleSheet('QWidget{background-color:%s}' % default_value['closebutton_background_color'])
self.Geometry_Desktop=QDesktopWidget().availableGeometry()
self.max_image_height=self.Geometry_Desktop.height()
self.nextbutton_size=(50,self.max_image_height)
self.RGBLabel = QLabel(self)
self.RGBLabel.setStyleSheet("background:transparent")
self.CloseButton.resize(self.title_height,self.title_height)
self.CloseButton.clicked_connect_func=(self.close)
#标题栏
self.TitleLabel=YTitleLabel(self)
self.TitleLabel.move(0,self.offset)
self.TitleLabel.setStyleSheet('QWidget{background-color:%s}' % default_value['title_background_color'] )
#翻页按钮
self.nextbutton=NextPage(self)
self.nextbutton.resize(*self.nextbutton_size)
self.nextbutton.clicked_connect(self.next_image)
self.prebutton=PrePage(self)
self.prebutton.resize(*self.nextbutton_size)
self.prebutton.clicked_connect(self.previous_image)
self.factor=1
self.factor_max=1000
self.factor_min=0.04
self.leftborder=BorderLine(self)
self.rightborder=BorderLine(self)
self.topborder=BorderLine(self)
self.bottomborder=BorderLine(self)
self.timer = None
self.source_name=name
if name:
self.open_file(name)
self.dir_images=self.get_images()
self.dir_images_n=self.dir_images.index(path.abspath(name))
else:
self.dir_images=None
self.resize(400,400)
self.show()
self.setMinimumSize(400,400)
self.create_right_key_menu()
if sys.platform.startswith('win'):
try:
icon_path = path.join(os.environ['HOMEDRIVE'] , os.environ['HOMEPATH'] , '.yxspkg','songzviewer','songzviewer.png')
self.system_icon = QIcon(icon_path)
self.setWindowIcon(self.system_icon)
except:
pass
def create_right_key_menu(self):
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_right_menu)
self.rightMenu = QMenu(self)
self.editAct = QAction("Edit with songzgif", self, triggered=self.edit_with_songzgif)
self.rightMenu.addAction(self.editAct)
self.rightMenu.addSeparator()
def edit_with_songzgif(self):
from yxspkg.songzgif import gif
self.gifMaker = gif.GifMaker(self.image_name)
def show_right_menu(self, pos): # 重载弹出式菜单事件
pos = QCursor.pos()
pos.setX(pos.x()+2)
pos.setY(pos.y()+2)
self.rightMenu.exec_(pos)
def isMaximized(self):
if sys.platform.startswith('darwin'):
return self.isMaximized_value
else:
return super().isMaximized()
def showNormal(self):
if sys.platform.startswith('darwin'):
self.isMaximized_value=False
self.setGeometry(self.Geometry_Normal)
else:
super().showNormal()
self.setPosition()
def showMaximized(self):
if sys.platform.startswith('darwin'):
self.isMaximized_value=True
self.Geometry_Normal=self.geometry()
self.setGeometry(self.Geometry_Desktop)
else:
super().showMaximized()
# w,h=self.width(),self.height()
# t=self.get_base_factor(w,h,self.label.shape)
# print(t,w,h)
# self.scaleImage(t/self.factor)
self.setPosition()
def get_images(self):
dname=path.dirname(path.abspath(self.source_name))
t=[path.join(dname,i) for i in os.listdir(dname) if path.splitext(i)[-1].lower() in self.gif_types or path.splitext(i)[-1].lower() in self.image_types]
return t
def get_base_factor(self,w,h,shape):
if shape[1]/shape[0]>w/h:
t=w/shape[1]
else:
t=h/shape[0]
return t
def next_image(self,e):
if not self.dir_images:return
if e is True or e.button()==Qt.LeftButton:
self.dir_images_n+=1
if self.dir_images_n>=len(self.dir_images):
self.dir_images_n=0
self.open_file(self.dir_images[self.dir_images_n])
def previous_image(self,e):
if not self.dir_images:return
if e is True or e.button()==Qt.LeftButton:
self.dir_images_n-=1
if self.dir_images_n<0:
self.dir_images_n = len(self.dir_images)-1
self.open_file(self.dir_images[self.dir_images_n])
def open_file(self,name):
self.image_name = name
if name is not None:
self.setWindowTitle(name.split(os.sep)[-1])
try:
if path.splitext(name)[-1].lower() in self.gif_types:
size = self.gif(name)
else:
size = self.image(name)
self.setWindowTitle(name.split(os.sep)[-1]+' [{}x{}]'.format(size[1],size[0]))
return
except Exception as e:
self.setWindowTitle(name.split(os.sep)[-1])
print(e,self.source_name)
self.label.setText('cannot open file:{0}\nError:{1}'.format(self.source_name,e))
if self.first_window:
self.resize(400,400)
self.move_center()
self.show()
self.first_window=False
def move_center(self):
w,h=self.width(),self.height()
w0,h0=self.Geometry_Desktop.width(),self.Geometry_Desktop.height()
x0,y0=self.Geometry_Desktop.x(),self.Geometry_Desktop.y()
self.move((w0-w)/2+x0,(h0-h)/2+y0)
def gif(self,name):
if isinstance(name,str):
try:
x=imageio.get_reader(name)
meta=x.get_meta_data()
fps=1000/meta.get('duration',None)
jpgs=list(x)
size=jpgs[0].shape
size=size[1],size[0]
except Exception as e:
print('imageio',e)
x=imageio.get_reader(name,'ffmpeg')
meta=x.get_meta_data()
fps=meta['fps']
size=meta['size']
jpgs=list(x)
else:
jpgs,fps,size=name
self.preview((jpgs,fps,(size[1],size[0])))
return size
def image(self,name):
s=imread(name)
shape=s.shape
self.preview(([s],0.0001,shape[:2]))
return shape[1],shape[0]
def update_image(self):
if self.nn>=len(self.jpgs):
self.nn=0
self.present_image=self.jpgs[self.nn]
self.label.showimage(self.present_image)
self.nn+=1
def scaleImage(self,factor):
tt=self.factor*factor
if tt>self.factor_max or tt<self.factor_min:return
self.factor_max=100000
lefttop=self.label.get_lefttop()
w,h=self.label.geometry_img[-2:]
w0,h0=self.width()/2,self.height()/2
dx=(w0-lefttop[0])*(1-factor)
dy=(h0-lefttop[1])*(1-factor)
self.factor*=factor
self.label.resize(w*self.factor,h*self.factor)
self.label.update_geometry(lefttop,(dx,dy))
if self.factor*self.label.shape[0]<self.max_image_height:
self.setPosition()
x,y,w,h=self.label.geometry_img
if w<30 or h<30:
self.factor_max=max(self.factor,5)
def setPosition(self):
title_height=self.title_height
bottom_height=self.bottom_height
w,h=self.width(),self.height()
self.CloseButton.move(w-title_height,self.offset)
self.TitleLabel.resize(w-self.title_height,self.title_height)
# h-=title_height+bottom_height
w_label,h_label=self.label.width(),self.label.height()
self.label.move((w-w_label)/2,(h-h_label)/2)
self.nextbutton.move(w-self.nextbutton_size[0]-5,title_height)
self.prebutton.move(5,title_height)
self.leftborder.resize(1,h)
self.topborder.resize(w,1)
self.rightborder.setGeometry(w-1,0,1,h)
self.bottomborder.setGeometry(0,h-1,w,1)
self.label.update_geometry()
self.RGBLabel.setGeometry(0,h-20,300,20)
def change_autoplay(self,t):
self.setAutoplay()
self.setAutoplay(t)
def setAutoplay(self,t=None):
if t is None:
t=float(default_value['autoplay_interval'])
if self.autoplay is True:
self.autoplay=False
self.timer_auto.stop()
else:
self.autoplay = True
self.timer_auto=QTimer(self)
self.timer_auto.timeout.connect(lambda :self.next_image(True))
self.timer_auto.start(int(t*1000))
def preview(self,parent):
self.nn=0
self.jpgs, self.fps ,shape = parent
self.label.setImage((0,0,shape[1],shape[0]),shape)
if self.first_window:
m=max(shape)
t=1/max(1,m/self.max_image_height)
self.resize(shape[1]*t+self.border*2,shape[0]*t+self.border*2+self.bottom_height)
self.first_window=False
self.move_center()
else:
w,h=self.width(),self.height()
t=self.get_base_factor(w,h,shape)
if self.timer:self.timer.stop()
self.label.resize(shape[1]*t,shape[0]*t)
self.factor=t
self.setPosition()
self.update_image()
if self.fps != 0:
self.timer=QTimer(self)
self.timer.timeout.connect(self.update_image)
t=int(1/self.fps*1000)
self.timer.start(t)
self.show()
def isresizeMouse(self,x,y):
x0,y0=self.x(),self.y()
w0,h0=self.width(),self.height()
width=4
distance=8
if x<x0+width and y0+h0-distance>y>y0+distance:#left
self.setCursor(QCursor(Qt.SizeHorCursor))
self.resizeWindow='Left'
elif x>x0+w0-width and y0+h0-distance>y>y0+distance:#right
self.setCursor(QCursor(Qt.SizeHorCursor))
self.resizeWindow='Right'
elif y<y0+width and x0+w0-distance>x>x0+distance:#top
self.setCursor(QCursor(Qt.SizeVerCursor))
self.resizeWindow='Top'
elif y>y0+h0-width and x0+w0-distance>x>x0+distance:#bottom
self.setCursor(QCursor(Qt.SizeVerCursor))
self.resizeWindow='Bottom'
elif x<x0+distance and y<y0+distance:#LeftTop
self.setCursor(QCursor(Qt.SizeFDiagCursor))
self.resizeWindow='LeftTop'
elif x>x0-distance+w0 and y>y0-distance+h0:#RightBottom
self.setCursor(QCursor(Qt.SizeFDiagCursor))
self.resizeWindow='RightBottom'
elif x<x0+distance and y>y0-distance+h0:#LeftBottom
self.setCursor(QCursor(Qt.SizeBDiagCursor))
self.resizeWindow='LeftBottom'
else:
self.resizeWindow = False
self.setCursor(QCursor(Qt.ArrowCursor))
def runResizeWindow(self):
pos=QCursor.pos()
x,y=pos.x(),pos.y()
if self.resizeWindow == 'Left':
wt=self.w0+self.x0-x
if self.minimumSize_window[0]>wt:return
self.setGeometry(x,self.y0,wt,self.h0)
elif self.resizeWindow == 'Right':
wt=x-self.x0
self.resize(wt,self.h0)
elif self.resizeWindow == 'Bottom':
ht=y-self.y0
self.resize(self.w0,ht)
elif self.resizeWindow == 'Top':
ht=self.y0-y+self.h0
if self.minimumSize_window[1]>ht:return
self.setGeometry(self.x0,y,self.w0,ht)
elif self.resizeWindow == 'RightBottom':
wt,ht=x-self.x0,y-self.y0
self.resize(wt,ht)
elif self.resizeWindow == 'LeftTop':
wt,ht=self.x0-x+self.w0,self.y0-y+self.h0
if self.minimumSize_window[0]>wt:
wt=self.minimumSize_window[0]
x=self.x()
if self.minimumSize_window[1]>ht:
ht=self.minimumSize_window[1]
y=self.y()
self.setGeometry(x,y,wt,ht)
elif self.resizeWindow == 'LeftBottom':
wt,ht=self.x0-x+self.w0,y-self.y0
if self.minimumSize_window[0]>wt:
wt=self.minimumSize_window[0]
x=self.x()
if self.minimumSize_window[1]>ht:
ht=self.minimumSize_window[1]
self.setGeometry(x,self.y0,wt,ht)
def mousePressEvent(self,e):
if self.resizeWindow:
self.x0,self.y0=self.x(),self.y()
self.w0,self.h0=self.width(),self.height()
def mouseMoveEvent(self,e):
if self.resizeWindow:
self.runResizeWindow()
def resizeEvent(self,e):
self.setPosition()
def keyPressEvent(self,e):
if e.matches(QKeySequence.MoveToPreviousLine):
self.scaleImage(1/0.7)
elif e.matches(QKeySequence.MoveToNextLine):
self.scaleImage(0.7)
elif e.matches(QKeySequence.MoveToPreviousChar):
self.previous_image(True)
elif e.matches(QKeySequence.MoveToNextChar):
self.next_image(True)
def wheelEvent(self,e):
if self.first_window is True:
return
if e.angleDelta().y()>0:
factor=1/0.8
else:
factor=0.8
self.scaleImage(factor)
# def get_xy_of_image(self,pos = None):
# if pos is None:
# pos = QCursor.pos()
# x,y = pos.x()-self.x()-self.label.x(),pos.y()-self.label.y()-self.geometry().y()
# label_x,label_y = self.label.width(),self.label.height()
# x0,y0,w,h=self.label.geometry_img
# dx, dy = int(x/label_x*w), int(y/label_y*h)
# x0+=dx
# y0+=dy
# return y0,x0
def RGB2HSV(self,R,G,B):
Cmax = max(R,G,B)
Cmin = min(R,G,B)
delta = Cmax - Cmin
if delta == 0:
H = 0
elif Cmax == R:
H = 60*((G-B)/delta)
if H<0:
H += 360
elif Cmax == G:
H = 60*((B-R)/delta + 2)
else:
H = 60*((R-G)/delta + 4)
if Cmax == 0:
S = 0
else:
S = delta / Cmax
V = Cmax/255
return H,S,V
def displayRGB(self,pos):
if not self.label.is_focus:
return
x,y = pos.x()-self.x()-self.label.x(),pos.y()-self.label.y()-self.geometry().y()
label_x,label_y = self.label.width(),self.label.height()
x0,y0,w,h=self.label.geometry_img
dx, dy = int(x/label_x*w), int(y/label_y*h)
x0+=dx
y0+=dy
try:
RGB = self.present_image[y0,x0]
except:
RGB = 0,0,0
# print(y0,x0,RGB,x,y)
HSV = self.RGB2HSV(int(RGB[0]),int(RGB[1]),int(RGB[2]))
if len(RGB)==3:
s = "{},{} ,RGB:{},{},{}, HSV:{:3.0f}, {:.3f}, {:.3f}".format(y0,x0,*RGB,*HSV)
else:
s = "{},{} ,RGBA:{},{},{},{}, HSV:{:3.0f}, {:.3f}, {:.3f}".format(y0,x0,*RGB,*HSV)
self.RGBLabel.setText(s)
def eventFilter(self,source,event):
t=event.type()
if t == QEvent.MouseMove:
if event.buttons() == Qt.NoButton:
pos=QCursor.pos()
# print(dir(event))
self.displayRGB(pos)
# self.hide_button(pos.x()-self.x())
if not self.isMaximized() and not self.displayWindowTitle:
self.isresizeMouse(pos.x(),pos.y())
return super().eventFilter(source,event)
def main(name=None):
if len(sys.argv)==2:
name=sys.argv[1]
app = QApplication(sys.argv)
Viewer = GifPreview(name=name)
app.installEventFilter(Viewer)
sys.exit(app.exec_())
if __name__ == '__main__':
main() | yxspkg_songzviewer | /yxspkg_songzviewer-1.1.5.tar.gz/yxspkg_songzviewer-1.1.5/yxspkg_songzviewer.py | yxspkg_songzviewer.py |
import numpy as np
from os.path import getsize
import pandas as pd
import re
__version__='1.8'
__author__='Blacksong'
class _zone_data(dict):
def __init__(self,*d,**dd):
super().__init__(*d,**dd)
self.Elements = None
self.tec_cellcentered=False
def _setPlot3d_type(self):#文件格式为plot3d格式时运行这个函数 增加x,y,z,ptype这个变量
self.x=None
self.y=None
self.z=None
self.ptype=None
return self
def rename(self,old,new):
if old == new:return
self[new]=self[old]
self.pop(old)
def DataFrame(self,center=False,columns=None):
''''将数据转化为DataFrame格式,
center:是否将数据都转化为中心格式
'''
if columns is None:
columns = self.keys()
dic = {i:self[i] if not center else self.center_value(i) for i in columns}
return pd.DataFrame(dic)
def fit_mesh(self,zone_data,var,X='x',Y='y',Z=None):
'''
将zong_data中的变量var,根据Elements适应,调整var中数据的存储顺序
X,Y,Z:表述存储网格坐标的变量名
'''
if Z:
mesh = [X,Y,Z]
else:
mesh = [X,Y]
assert self.Elements.shape == zone_data.Elements.shape
m1 = {i:self.center_value(i) for i in mesh}
m2 = {i:zone_data.center_value(i) for i in mesh}
m2['__var'] = var
m1 = pd.DataFrame(m1)
m2 = pd.DataFrame(m2)
m2.index = m1.index
m1.sort_values(mesh, inplace=True)
m2.sort_values(mesh, inplace=True)
m1['__var'] = m2['__var'].values
m1.sort_index(inplace=True)
return m1['__var'].values
def set_data(self,names,values,attribute):
self.names=names
self.data=values
self.attribute=attribute
for i,v in zip(names,values):
self[i]=v
def __getitem__(self,k):
if isinstance(k,str):
return super().__getitem__(k)
else:
if self.attribute == 'fluent_prof':
return self.data[:,k]
def is_centered(self,name): #判断一个tecplot变量是不是centered变量
nodes = int(self.attribute['Nodes'])
if len(self[name])==nodes:
return False
else:
return True
def center_value(self,name):# 获取tecplot文件中某个变量在cell中心的值,即求出各个节点的平均值
elements = self.Elements - 1
n = elements.shape[1]
elements_flat = elements.flatten()
data = self[name][elements_flat].reshape((-1,n))
return data.mean(1)
def __add__(self,other):#重载加法运算
z = class_read()
names = list(self.keys())
values = [self[i] + other[i] for i in names]
z.update(zip(names,values))
return z
def __sub__(self,other):#重载减法运算
z = class_read()
names = list(self.keys())
values = [self[i] - other[i] for i in names]
z.update(zip(names,values))
return z
def __mul__(self,other):#重载乘法运算
z = class_read()
names = list(self.keys())
if isinstance(other,_zone_data):
values = [self[i] * other[i] for i in names]
else:
values = [self[i] * other for i in names]
z.update(zip(names,values))
return z
class class_read(dict):
def __init__(self,filename=None,filetype=None,**kargs):
if filename is None:return
self.fp=open(filename,'r')
self.default_filetypes={'prof':'fluent_prof','dat':'tecplot_dat','out':'fluent_residual_out',
'out2':'fluent_monitor_out','csv':'csv','txt':'txt','fmt':'plot3d','plot3d':'plot3d'}
self.data=None
if filetype is None:
key=filename.split('.')[-1].lower()
if key=='out':key=self.__recognize_out(self.fp)
self.filetype=self.default_filetypes[key]
else:
self.filetype=filetype
self.filesize=getsize(filename)
if self.filetype=='tecplot_dat':
self._read_dat()
elif self.filetype=='fluent_prof':
self._read_prof()
elif self.filetype=='fluent_residual_out':
self._read_out()
elif self.filetype=='fluent_monitor_out':
self.__read_out2()
elif self.filetype=='csv':
self.__read_csv(filename)
elif self.filetype == 'plot3d':
self.__read_plot3d(filename)
self.fp.close()
def __read_plot3d(self,filename):
self.data=list()
d=np.array(self.fp.read().split(),dtype='float64')
n=int(d[0])
shape=d[1:1+n*3].astype('int')
start=1+n*3
for i in range(n):
zt,yt,xt=shape[i*3:i*3+3]
block=_zone_data()._setPlot3d_type()
block.x=d[start:start+xt*yt*zt]
start+=xt*yt*zt
block.y=d[start:start+xt*yt*zt]
start+=xt*yt*zt
block.z=d[start:start+xt*yt*zt]
start+=xt*yt*zt
block.ptype=d[start:start+xt*yt*zt].astype('int')
start+=xt*yt*zt
block.x.shape=(xt,yt,zt)
block.y.shape=(xt,yt,zt)
block.z.shape=(xt,yt,zt)
block.ptype.shape=(xt,yt,zt)
block.x=block.x.swapaxes(2,0)
block.y=block.y.swapaxes(2,0)
block.z=block.z.swapaxes(2,0)
block.ptype=block.ptype.swapaxes(2,0)
self[i]=block
self.data.append(block)
def __read_csv(self,filename):
title=self.fp.readline()
tmp=np.loadtxt(self.fp,dtype='float64',delimiter=',')
title=title.strip().split(',')
for i,j in enumerate(title):
self[j]=tmp[:,i]
self.data=tmp
def __recognize_out(self,fp):
fp.readline()
t=fp.readline()
t=t.split()
key='out'
if t:
if t[0]=='"Iteration"':
key='out2'
fp.seek(0,0)
return key
def __read_out2(self):
self.fp.readline()
t=self.fp.readline()
t=t.lstrip()[11:].strip()[1:-1]
d=self.fp.read().encode().strip()
d=d.split(b'\n')
d=[tuple(i.split()) for i in d]
x=np.array(d,dtype=np.dtype({'names':["Iteration",t],'formats':['int32','float64']}))
self["Iteration"]=x['Iteration']
self[t]=x[t]
self.data=x
def _read_out(self):#fluent residual file
items=[]
items_n=0
data=[]
iter_pre='0'
time_index=False
for i in self.fp:
if i[:7]==' iter ':
if items_n!=0:continue
j=i.strip().split()
items.extend(j)
if items[-1]=='time/iter':
items.pop()
items.extend(('time','iter_step'))
time_index=True
items_n=len(items)
if items_n==0:continue
else:
j=i.split()
if len(j)==items_n:
if j[0].isdigit():
if j[0]==iter_pre:continue
iter_pre=j[0]
if time_index:j.pop(-2)
data.append(tuple(j))
if time_index:items.pop(-2)
a=np.array(data,dtype=np.dtype({'names':items,'formats':['i']+['f']*(len(items)-2)+['i']}))
for i,k in enumerate(items):
self[k]=a[k]
self.data=a
def _read_prof(self):
fp=self.fp
d=fp.read()
d=d.replace('\r','')
d=d.split('((')
d.pop(0)
data=[]
def read(x):
x=x.split('(')
title=x[0].split()[0]
x.pop(0)
data=[]
name=[]
ii=0
for i in x:
c=i.split('\n')
ii+=1
name.append(c[0])
data.append(c[1:-2])
data[-1].pop()
values=np.array(data,dtype='float32')
if len(values)!=len(name):return False
t=_zone_data()
t.set_data(name,values,self.filetype)
return title,t
for i in d:
k,v=read(i)
self[k]=v
def _parse_variables(self,string_list):#解析tecplot文件的变量名有哪些
return re.findall('"([^"]*)"',''.join(string_list))
def _parse_zone_type(self,string_list):# 解析tecplot文件
s=' '.join(string_list)
attri = dict(re.findall('(\w+)=([^ ,=]+)',s))
attri.update( dict(re.findall('(\w+)="([\w ]+)"',s)))
k = re.findall('VARLOCATION=\(([^=]+)=CELLCENTERED\)',s)#检查是否有cellcentered变量
auxdata = re.findall(' AUXDATA [^ ]*',s)
if auxdata:
attri['AUXDATA'] = '\n'.join(auxdata)
a=[]
if k:
for i in k[0][1:-1].split(','):
if i.find('-')!=-1:
start,end = i.split('-')
a.extend(range(int(start),int(end)+1))
else:
a.append(int(i))
a.sort()
attri['CELLCENTERED'] = a
return attri
def _read_dat(self):#解析tecplot_dat数据格式
fp=self.fp
title = fp.readline()
assert title.lstrip().startswith('TITLE')!=-1#查看文件开头是否是TITLE
string = fp.readline().strip()
assert string.startswith('VARIABLES') #查看文件第二行开头是否是VARIABLES
string_list=[string,]#获取包含所有变量名的字符串
for i in fp:
i=i.strip()
if not i.startswith('"'):
string = i
break
else:
string_list.append(i)
self._variables=self._parse_variables(string_list) #对字符串进行解析得到变量名
print('variables',self._variables)
while True:
if not string:
string = fp.readline()
if not string:
break
string_list=[string,]#获取包含zone name, element, nodes,zonetype, datapacking的字段
for i in fp:
i=i.strip()
if i.startswith("DT=("):
string = i
break
else:
string_list.append(i)
self._tecplot_attribute=self._parse_zone_type(string_list) #获取包含zone name, element, nodes,zonetype, datapacking 返回形式为字典
print('zone info',self._tecplot_attribute)
string = string[len('DT=('):-1].strip().split()
self._DT=string #保存每个变量的类型
assert len(self._variables) == len(string)
if self._tecplot_attribute['DATAPACKING']=='BLOCK':
self._parse_block()
if self._tecplot_attribute['DATAPACKING'] == 'POINT':
self._parse_point()
string = None
def _read_numbers(self,fp,nums):#读取文件一定数目的 数据
data = fp.readline().split()
n = len(data)
strings = [fp.readline() for _ in range(int(nums/n)-1)]
data.extend(''.join(strings).split())
nn = nums - len(data)
assert nn>=0
if nn>0:
for i in fp:
data.extend(i.split())
if len(data) == nums:
break
return data
def _parse_Elements(self,zonedata):#解析tecplot的Element
elements = int(self._tecplot_attribute['Elements'])
data_elements = self.fp.readline().split()
num_points = len(data_elements)
data = self._read_numbers(self.fp,num_points*(elements-1))
data_elements += data
zonedata.Elements = np.array(data_elements,dtype=np.int).reshape((-1,num_points))
def _parse_block(self,isElements=True,isBlock=True):#解析tecplot block方式存储的数据
cellcentered = self._tecplot_attribute['CELLCENTERED']
if cellcentered:
variables,nodes,elements = self._variables,int(self._tecplot_attribute['Nodes']),int(self._tecplot_attribute['Elements'])
value_list = []
for i in range(len(variables)):
if i+1 in cellcentered:
nums = elements
else:
nums = nodes
data = self._read_numbers(self.fp,nums)
value_list.append( np.array(data,dtype = 'float64'))
zonedata = _zone_data()
zonedata.set_data(variables,value_list,self._tecplot_attribute)
self[self._tecplot_attribute['T']] = zonedata
if isElements:
self._parse_Elements(zonedata)
else:
self._parse_point(isElements,isBlock)
def _parse_point(self,isElements=True,isBlock=False):
variables,nodes,elements = self._variables,int(self._tecplot_attribute['Nodes']),int(self._tecplot_attribute['Elements'])
nn=nodes*len(variables)
data = self._read_numbers(self.fp,nn)
if isBlock:
data = np.array(data,dtype = 'float').reshape((len(variables),-1))
else:
data = np.array(data,dtype = 'float').reshape((-1,len(variables))).T
zonedata = _zone_data() #设置zonedata数据
zonedata.set_data(self._variables,data,self._tecplot_attribute)
self[self._tecplot_attribute['T']] = zonedata
if isElements:
#添加Elements的属性
self._parse_Elements(zonedata)
def __getitem__(self,k):
if isinstance(k,str):
return super().__getitem__(k)
else:return self.data[k]
def enable_short_name(self):#启用简单名 即将名字命名为 原来名字的第一个单词
for i in list(self.keys()):
for j in list(self[i].keys()):
self[i].rename(j,j.split()[0])
self.rename(i,i.split()[0])
def rename(self,old,new):
if old == new:return
self[new]=self[old]
self.pop(old)
def write(self,filename):
write(self,filename)
def __add__(self,other):#重载加法运算
z = class_read()
names = list(self.keys())
values = [self[i] + other[i] for i in names]
z.update(zip(names,values))
return z
def __sub__(self,other):#重载减法运算
z = class_read()
names = list(self.keys())
values = [self[i] - other[i] for i in names]
z.update(zip(names,values))
return z
def __mul__(self,other):#重载乘法运算
z = class_read()
names = list(self.keys())
if isinstance(other,class_read):
values = [self[i] * other[i] for i in names]
else:
values = [self[i] * other for i in names]
z.update(zip(names,values))
return z
class data_ndarray(np.ndarray):
def write(self,filename):
write(self,filename)
def setfiletype(self,filetype):
self.filetype=filetype
def read(filename,filetype=None,**kargs):
ext=filename.split('.')[-1].lower()
if ext=='txt':
data = [i.split() for i in open(filename) if i.lstrip() and i.lstrip()[0]!='#']
data=np.array(data,dtype='float64')
data=data_ndarray(data.shape,dtype=data.dtype,buffer=data.data)
data.setfiletype('txt')
else:
data=class_read(filename)
return data
class write:
def __init__(self,data,filename,filetype=None):
default_filetypes={'prof':'fluent_prof','dat':'tecplot_dat','out':'fluent_residual_out',
'out2':'fluent_monitor_out','csv':'csv','txt':'txt','fmt':'plot3d','plot3d':'plot3d'}
ext=filename.split('.')[-1].lower()
if filetype is None:
filetype=default_filetypes.get(ext,None)
if filetype is None:
filetype=data.filetype
if filetype=='fluent_prof':
self.__write_prof(data,filename)
elif filetype=='tecplot_dat':
self.__write_dat(data,filename)
elif filetype=='csv':
self.__write_csv(data,filename)
elif filetype=='fluent_monitor_out':
self.__write_out2(data,filename)
elif filetype=='fluent_residual_out':
self.__write_out(data,filename)
elif filetype=='txt':
np.savetxt(filename,data)
elif filetype=='plot3d':
self.__write_plot3d(data,filename)
else:
raise EOFError('file type error!')
def __write_plot3d(self,data,filename):
fp=open(filename,'w')
def writelines(ffp,write_data,line_max):
ffp.write('\n')
n_line=int(write_data.size/line_max)
write_data.reshape((-1,n_line))
s=write_data.astype('U')[:n_line*line_max]
s.resize((n_line,line_max))
s_lines=[' '.join(i) for i in s]
ffp.write('\n'.join(s_lines))
n = write_data.size-n_line*line_max
if n:
ffp.write('\n'+ ' '.join(write_data[-n:]))
shape=list()
for i,v in enumerate(data.data):
shape.extend(v.x.shape)
fp.write(str(i+1)+'\n')
fp.write(' '.join([str(i) for i in shape]))
for i,v in enumerate(data.data):
x=v.x.swapaxes(0,2)
x.resize(x.size)
y=v.y.swapaxes(0,2)
y.resize(y.size)
z=v.z.swapaxes(0,2)
z.resize(z.size)
p=v.ptype.swapaxes(0,2)
p.resize(p.size)
writelines(fp,x,5)
writelines(fp,y,5)
writelines(fp,z,5)
writelines(fp,p,5)
def __write_out(self,data,filename):
fp=open(filename,'w')
self.__write_delimiter(data,fp,' ',title_format='',specified_format=' %d',specified_titles=['iter'],other_format='%.8e')
fp.close()
def __write_out2(self,data,filename):
fp=open(filename,'w')
value=[i for i in data.keys() if i!='Iteration'][0]
fp.write('"Convergence history of %s"\n' % value)
self.__write_delimiter(data,fp,' ',title_format='"',specified_format='%d',specified_titles=['Iteration'])
fp.close()
def __write_csv(self,data,filename):
fp=open(filename,'w')
self.__write_delimiter(data,fp,',')
fp.close()
def __write_delimiter(self,data,fp,delimiter,title_format='',specified_format='',specified_titles=[],other_format='%.15e'):
other_titles=[i for i in data.keys() if i not in specified_titles]
title=specified_titles+other_titles
title_w=[title_format+i+title_format for i in title]
fp.write(delimiter.join(title_w)+'\n')
s=np.vstack([data[i] for i in title]).T
data_format=specified_format+delimiter+delimiter.join([other_format]*len(other_titles))+'\n'
for i in s:
fp.write(data_format % tuple(i))
def __write_prof(self,data,filename):
fp=open(filename,'wb')
for i in data.keys():
keys=list(data[i].keys())
keys.sort()
keys.sort(key=lambda x:len(x))
n=len(data[i][keys[0]])
fs='(('+i+' point '+str(n)+')\n'
fp.write(fs.encode())
for k in keys:
fs='('+k+'\n'
fp.write(fs.encode())
[fp.write((str(j)+'\n').encode()) for j in data[i][k]]
fp.write(')\n'.encode())
fp.write(')\n'.encode())
def __write_dat(self,data,filename):#写入tecplot dat文件,目前只支持写入DATAPACKING=POINT类型的数据DATAPACKING=BLOCK类型的数据也会被改写为POINT类型
fp = open(filename,'w')
fp.write('TITLE = "Python Write"\n')
zones = list(data.keys()) #获取所有zone的名字
variables = list(data[zones[0]].keys())#获取变量名
fp.write('VARIABLES = ')
fp.writelines(['"{}"\n'.format(i) for i in variables])
for i in zones:
zonedata = data[i]
z = zonedata.attribute
nodes, elements = int(z['Nodes']), int(z['Elements'])
fp.write('ZONE T="{}"\n'.format(i))
fp.write(' STRANDID={}, SOLUTIONTIME={}\n'.format(z.get('STRANDID',1),z.get('SOLUTIONTIME',0)))
fp.write(' Nodes={0}, Elements={1}, ZONETYPE={2}\n'.format(nodes, elements, z['ZONETYPE']))
if z['DATAPACKING'] == 'POINT':
fp.write('DATAPACKING=POINT\n')
if z.get('AUXDATA') is not None:
fp.write(z.get('AUXDATA')+'\n')
fp.write('DT=('+'SINGLE '*len(variables)+')\n')
fs = ' {}'*len(variables)+'\n'
for value in zip(*([zonedata[j] for j in variables])):
fp.write(fs.format(*value))
fs = ' {}'*len(zonedata.Elements[0])+'\n'
else:
fp.write(' DATAPACKING=BLOCK\n')
cellcentered = [str(i+1) for i,v in enumerate(variables) if zonedata.is_centered(v)]
if cellcentered:
s =','.join(cellcentered)
fs = ' VARLOCATION=([{}]=CELLCENTERED)\n'.format(s)
fp.write(fs)
if z.get('AUXDATA') is not None:
fp.write(z.get('AUXDATA')+'\n')
fp.write('DT=('+'SINGLE '*len(variables)+')\n')
ofs = ' {}'*5+'\n'
for var in variables:
value = zonedata[var]
for i in range(5,len(value)+1,5):
fp.write(ofs.format(*value[i-5:i]))
leave = len(value) % 5
if leave != 0:
fs = ' {}'*leave+'\n'
fp.write(fs.format(*value[-leave:]))
if zonedata.Elements is not None:
fs = ' {}'*len(zonedata.Elements[0])+'\n'
for i in zonedata.Elements:
fp.write(fs.format(*i))
if __name__=='__main__':
# from matplotlib import pyplot as plt
import time
from IPython import embed
# s=time.time()
a=read('lumley.dat')
# print(time.time()-s)
# embed()
a.write('lumley_2.dat')
# b = read('lumley_2.dat')
# print(a.keys(),b.keys())
# for zone in a.keys():
# for var in a[zone].keys():
# print(zone,var)
# print(np.allclose(a[zone][var], b[zone][var]))
# print(a.elements)
# embed()
# a.write('test_tec.dat')
# for i in zip(range(3),range(4)):
# print(i)
# a=read('lumley2.dat')
# a.enable_short_name()
# b=read('test_tec.dat')
# b.enable_short_name()
# for i in a.keys():
# for j in a[i].keys():
# print(i,j)
# for i in a.keys():
# for j in a[i].keys():
# print(i,j)
# t=np.allclose(a[i][j] , b[i][j])
# print(t)
# s='ZONE T="face_m Step 1 Incr 0" STRANDID=1, SOLUTIONTIME=0 Nodes=52796, Elements=104098, ZONETYPE=FELineSeg DATAPACKING=POINT'
# m=re.findall('(\w+)=(\w+)',s)
# attri = dict()
# print(m)
#
# print(attri,attri2) | yxspkg_tecfile | /yxspkg_tecfile-1.9.tar.gz/yxspkg_tecfile-1.9/yxspkg_tecfile.py | yxspkg_tecfile.py |
import requests
import re,os
from multiprocessing.pool import ThreadPool
from os import path
from hashlib import md5
from bs4 import BeautifulSoup
import sys
import shelve
import time
__version__='0.1.2'
__author__="Blacksong"
class Wget:
def __init__(self,url,**kargs):
url0=url
url=url.split('?')[0]
self.n=0
dbname=md5(url.encode()).hexdigest()[:7]+'.pydb'
record_db=shelve.open(dbname)
self.filetype=kargs.get('filetype','.jpg')
self.session=self.setbrowser()
self.srcpro=None
self.rule=kargs.get('rule',list())
self.re_rule=kargs.get('re_rule',list())
self.max_download=8
self.num_download = 0
self.asyncThread=ThreadPool(self.max_download)
self.htm=url.split('/')[2]
dirname = kargs.get('dirname',None)
if dirname is None:
dirname = self.htm
if not path.isdir(dirname):
os.makedirs(dirname)
self.dirname = dirname
self.auto=kargs.get('auto',True)
print(self.htm)
self.rule_list=[re.sub('[^A-Za-z]','', url)]
[self.rule_list.append(re.sub('[^A-Za-z]','', i)) for i in self.rule]
self.rule_list=list(set(self.rule_list))
self.rule_dir=[path.dirname(url)]
[self.rule_dir.append(path.dirname(i)) for i in self.rule]
self.rule_dir=list(set(self.rule_dir))
self.re_rule=[re.compile(i) for i in self.re_rule]
url=url0
print(self.re_rule,'\n',self.rule_dir,'\n',self.rule_list)
self.autofind(url)
try:
halt=record_db.get('halt',False)
if halt == True:
self.href=record_db.get('href',[(url,{})])
self.pagedb = record_db.get('pagedb',set())
self.srcdb = record_db.get('srcdb',set())
record_db['halt']=False
else:
self.href=[(url,{})]
self.pagedb=set()
self.srcdb=set()
self.main()
record_db.close()
if path.isfile(dbname): os.remove(dbname)
except:
print('the program is halted!')
record_db['halt']=True
record_db['srcdb']=self.srcdb
record_db['pagedb']=self.pagedb
record_db['href']=[i for i in self.href if i!=None]
self.asyncThread.close()
self.asyncThread.join()
def my_hash(self,x):
return int(md5(x.encode()).hexdigest()[:8],16)
def setbrowser(self):
headers='''User-Agent: Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0
DNT: 1
Connection: keep-alive
Upgrade-Insecure-Requests: 1'''
headers=headers.split('\n')
d=dict()
for i in headers:
n=i.find(':')
d[i[:n]]=i[n+1:]
headers=d
s=requests.session()
s.auth = ('user', 'pass')
# s.headers=headers
return s
def autofind(self,url):
print('Autofind the label of the target picture')
volume=0
x,y=self.Analyze(url)
r=set()
for i in y:
s=str(i[1])
if s in r:continue
r.add(s)
try:
t=len(self.session.get(i[0],timeout=30).content)
except:
t=0
if volume<t:
volume=t
tt=i[1]
self.srcpro=tuple(tt.items())
def main(self):
'''主循环函数,控制寻找页面以及查找页面资源链接,主要起控制作用'''
def run(u):
print('Analyse the html ',u)
hr,src=self.Analyze(u)
for i in src:
if self.UsefulSrc0(*i):
self.num_download+=1
self.asyncThread.apply_async(self.Download,(i[0],u))
while self.num_download>self.max_download+4:
time.sleep(1)
print('Downloading !!')
# self.Download(i[0],u)
self.pagedb.add(self.my_hash(u))
for i in hr:
if self.UsefulHtml0(*i):
self.href.append(i)
while True:
ii=0
n=len(self.href)
while ii<n:
if self.UsefulHtml0(*self.href[ii]):
run(self.href[ii][0])
self.href[ii]=None
ii+=1
self.href=[i for i in self.href if i!=None]
if len(self.href)==0:break
def DivSplit(self,s):
'''将一个html页面分成多个div块,主要通过寻找div标签的位置,返回一个记录了div块所在位置以及各个块的名字的链表'''
a=[]
[a.append((-1,i.span())) for i in re.finditer('< *div[^><]*>', s)]
b=[]
for i,j in a:
if i==1:
b.append((i,j[0]))
else:
t=s[j[0]:j[1]]
n=re.findall('id *= *"[^"]*"|class *= *"[^"]*"', t)
d=dict([i.replace('"','').split('=') for i in n])
b.append((i,j[0],d))
b.sort(key=lambda x:x[1])
return b
def DivSplit2(self,s):
'''将一个html页面分成多个div块,主要通过寻找div标签的位置,返回一个记录了div块所在位置以及各个块的名字的链表'''
a=[(-1,0,{'id':'mystart'})]
for i in re.finditer('(id|class) *=["\' ]*[^"\']*', s):
j=re.sub('["\' ]', '',i.group())
n=j.find('=')
d={j[:n]:j[n+1:]}
a.append((-1,i.span()[1],d))
a.sort(key=lambda x:x[1])
return a
def Download(self,url,purl,nn=[0]):
'''下载url'''
tf=re.sub('\W','', purl)
filename=self.dirname+'/'+tf[-min(len(tf),10):]+md5(url.encode()).hexdigest()[:5]+self.filetype
if not path.isfile(filename):
nn[0]+=1
if nn[0]%50==0:
print('Downloading ',url)
x=self.session.get(url,timeout=30)
t=open(filename,'wb').write(x.content)
else:
print('file already exist')
self.srcdb.add(self.my_hash(url))
self.num_download -= 1
return filename
def UsefulHtml0(self,url,pro):
'''判断一个页面的url是否是有用的'''
if self.my_hash(url) in self.pagedb:return False
if self.re_rule:
for i in self.re_rule_list:
if i.search(url):return True
if not self.rule:
if not re.search(self.htm, url):return False
t= re.sub('[^A-Za-z]','', url)
d=path.dirname(url)
if t in self.rule_list:return True
if d in self.rule_dir:return True
if self.auto:
return False
return self.UsefulHtml(url,pro)
def UsefulHtml(self,url,pro):
'''判断一个页面的url是否是 有用的,这个函数可以在不同的环境中重写,其中pro是该链接所在div的属性'''
return True
def UsefulSrc0(self,url,pro):
if self.my_hash(url) in self.srcdb:return False
if self.auto:
for k,v in self.srcpro:
if v.isdigit():continue
if pro.get(k)!=v:return False
return self.UsefulSrc(url,pro)
def UsefulSrc(self,url,pro):
return True
def correct_url(self,s,website,webdir):
if s[0]=='/':return website+s
elif s=='#':return ''
elif s.find('http')!=-1:return s
else: return webdir+s
def Analyze(self,url):
'''返回 href 和 src的链接,返回值为一个二元tuple'''
s=self.session.get(url,timeout=30).text
divs=self.DivSplit(s)
href=[]
src=[]
split_url=url.split('/')
website='/'.join(split_url[:3])
webdir='/'.join(split_url[:-1])+'/'
for i in re.finditer(' *(href|src) *=["\' ]*[^ )("\';\+>}]+', s):
div=self.FindDiv(divs, i.span()[0])
j=i.group()
j=re.sub('["\' \\\\]', '', j) #针对某些网站将url写在javascript中,用到了转义符\
if j[0]=='h':
j=j.replace('href=', '')
j=self.correct_url(j,website,webdir)
if len(j)==0:continue
href.append((j,div))
if j[0]=='s':
j=j.replace('src=', '')
if j.find(self.filetype)==-1:continue
div=self.FindDiv(divs, i.span()[0])
j=self.correct_url(j,website,webdir)
if len(j)==0:continue
src.append((j,div))
return href,src
def FindDiv(self,divs,pos):
a,b=0,len(divs)
if b==0:
return {'id':'nodivs'}
if pos>divs[-1][1]:return divs[-1][2]
while b-a>1:
t=int((a+b)/2)
p0=divs[t][1]
if pos>p0:a=t
else:b=t
return divs[a][2]
class Wget_novel:#下载小说
def __init__(self,url,novel_name='wz.txt'):
content = requests.get(url)
html = content.content.decode('gbk').encode('utf8').decode('utf8')
bs=BeautifulSoup(html)
self.url=url
self.fp=open(novel_name,'w')
self.__get_author(bs)
# self.__start(url)
def __start(self,url):
error=0
while True:
content = requests.get(url)
html = content.content.decode('gbk').encode('utf8').decode('utf8')
bs=BeautifulSoup(html)
title_info=self.__get_chapter(bs)
print(title_info)
content_info=self.__get_content(bs)
next_info=self.__get_next(bs)
if title_info is not None:
self.fp.write('\n'+title_info[0]+' '+title_info[1]+'\n')
self.fp.write(content_info)
if next_info is False:break
url=next_info
def __get_author(self,bs):
m=0
for i in bs.center.find_all('span'):
m+=1
print(i.text)
if m==3:break
def __get_content(self,bs):
x=bs.find(attrs={'id':'content'})
sx=str(x)
a=x.find_all('a')
for i in a:
sx=sx.replace(str(i),'')
sx=re.sub('<[^>]*>','\n',sx)
sx=re.sub('[\n]+','\n',sx)
return sx
def __get_next(self,bs,html=None):
c=bs.find(attrs={'class':'page'})
t=c.find_all('a')
for i in t:
if i.text.find('下')!=-1:
href=i['href']
if len(href)==0:return False
if href[0]!='/':
url=self.url.split('/')
url[-1]=href
return '/'.join(url)
def __get_chapter(self,bs,html=None):
title=bs.find(attrs={'class':'title'}).h1.string
title_re='(第? ?([一二三四五六七八九十零百千万\d]*)章)'
t=re.findall(title_re,title)
if len(t)==0:return None
n=self.__ChineseNumber_to_number(t[0][1])
m=title.find(t[0][0][-1])
name=title[m+1:].strip()
if name[0]==':' or name[0]==':':name=name[1:].lstrip()
return t[0][0],name,n
def __ChineseNumber_to_number(self,s):
t=str.maketrans('一二三四五六七八九','123456789')
t2=str.maketrans(dict(zip(['零','十','百','千','万','亿'],['','0 ','00 ','000 ',' 10000 ',' 100000000 '])))
t.update(t2)
s=s.translate(t)
l=s.rstrip().split()
n,m=0,0
for i in l:
j=int(i)
if j<10000:
if j==0:j=10
n+=j
else:
n*=j
m+=n
n=0
m+=n
return m
def main(*d,**karg):
url = sys.argv[1]
Wget(url)
if __name__=='__main__':
pass
main() | yxspkg_wget | /yxspkg_wget-0.1.2.tar.gz/yxspkg_wget-0.1.2/yxspkg_wget.py | yxspkg_wget.py |
import re
import string
zh_punctuations = \
',!?;:' \
'()[]〔〕【】‘’“”〝〞〖〗{}︻︼﹄﹃「」﹁﹂『』“”‘’' \
'.。,、;:?!ˉˇ¨`~~‖∶"'`|·… — ~ - 〃' \
'、。‧《》〈〉﹏…——~{}~⦅⦆"#$%&'*+./'
zh_punctuations = set(c for c in zh_punctuations if ord(c) > 256)
other_punctuations = '① ② ③ ④ ⑤ ⑥ ⑦ ⑧ ⑨ ⑩ ㈠ ㈡ ㈢ ㈣ ㈤ ㈥ ㈦ ㈧ ㈨ ㈩ №' \
'⑴ ⑵ ⑶ ⑷ ⑸ ⑹ ⑺ ⑻ ⑼ ⑽ ⑾ ⑿ ⒀ ⒁ ⒂ ⒃ ⒄ ⒅ ⒆ ⒇' \
'⒈ ⒉ ⒊ ⒋ ⒌ ⒍ ⒎ ⒏ ⒐ ⒑ ⒒ ⒓ ⒔ ⒕ ⒖ ⒗ ⒘ ⒙ ⒚ ⒛' \
'Ⅰ Ⅱ Ⅲ Ⅳ Ⅴ Ⅵ Ⅶ Ⅷ Ⅸ Ⅹ Ⅺ Ⅻ ⅰ ⅱ ⅲ ⅳ ⅴ ⅵ ⅶ ⅷ ⅸ ⅹ' \
'§№☆★○●◎◇◆□■△▲※→←↑↓〓#&@\^_' \
'⊙●○①⊕◎Θ⊙¤㊣▂ ▃ ▄ ▅ ▆ ▇ █ █ ■ ▓ 回 □ 〓≡ ╝╚╔ ╗╬ ═ ╓ ╩ ┠ ┨┯ ┷┏ ┓┗ ┛┳⊥『』┌♀◆◇◣◢◥▲▼△▽⊿' \
'๑•ิ.•ั๑ ๑๑ ♬✿.。.:* ☂☃ ☄ ★ ☆ ☇ ☈ ☉ ☒☢ ☺ ☻ ☼ ☽☾ ♠ ♡ ♢ ♣ ♤ ♥ ♦ ♧ ♨ ♩ εїз℡❣·۰•●○● ゃōゃ♥ ♡๑۩ﺴ ☜ ☞ ☎ ☏♡ ⊙◎ ☺ ☻✖╄ஐﻬ ► ◄ ▧ ▨ ♨ ◐ ◑ ↔ ↕ ▪ ▫ ☼ ♦ ? ▄ █▌ ?? ? ▬♦ ◊ ◦ ☼ ♠♣ ▣ ▤ ▥ ▦ ▩ ◘ ◙ ◈ ♫ ♬ ♪ ♩ ♭ ♪ の ☆ → あぃ £ # @ & * ¥✰ ☆ ★ ¤ ☼ ♡ ღ☻ ☺ ⊕ ☉ Θ o O ㊝ ⊙ ◎ ◑ ◐ ۰ • ● ▪ ▫ 。 ゚ ๑ ☜ ☞ ♨ ☎ ☏ ︻ ︼ ︽ ︾ 〈 〉 ︿ ﹀ ∩ ∪ ﹁ ﹂﹃﹄﹝ ﹞ < > ≦ ≧ ﹤ ﹥ 「 」 ︵ ︶︷ ︸︹︺〔 〕 【 】 《 》 ( ) { } ﹙ ﹚ 『』﹛﹜╳ + - ﹢ × ÷ = ≠ ≒ ∞ ˇ ± √ ⊥ ∠ ∟ ⊿ ㏒ ▶ ▷ ◀ ◁ ★ ☆ ☉ ☒☢ ☺ ☻ ☼ ♠ ♡ ♣ ♤ ♥ ♦ ♧ ♨ ♩ ? ? ㍿ ♝ ♞ ♯♩♪♫♬♭♮ ☎ ☏ ☪ ♈ ♨ ºº ₪ ¤ 큐 « » ™ ♂✿ ♥ の ↑ ↓ ← → ↖ ↗ ↙ ↘ ㊣ ◎ ○ ● ⊕ ⊙ ○ △ ▲ ☆ ★ ◇ ◆ ■ □ ▽ ▼ § ¥ 〒 ¢£ ※ ♀ ♂ © ® ⁂ ℡ ↂ? ▣ ▤ ▥ ▦ ▧ ♂ ♀ ♥ ♡ ☜ ☞ ☎ ☏ ⊙ ◎ ☺ ☻ ► ◄ ▧ ▨ ♨ ◐ ◑ ↔ ↕ ♥ ♡ ▪ ▫ ☼ ♦ ? ▄ █ ▌ ? ? ? ▬ ♦ ◊ ◘ ◙ ◦ ☼ ♠ ♣ ▣ ▤ ▥ ▦ ▩ ◘ ◙ ◈ ♫ ♬ ♪ ♩ ♭ ♪ ✄☪☣☢☠ ⅰⅱⅲⅳⅴⅵⅶ ⅷⅸⅹⅺⅻⅠⅡⅢⅣⅤⅥⅦ Ⅷ Ⅷ ⅨⅩⅪⅫ ㊊㊋㊌㊍㊎㊏㊐㊑㊒㊓㊔㊕㊖㊗㊘㊜㊝㊞㊟㊠㊡㊢㊣㊤㊥㊦㊧㊨㊩㊪㊫㊬㊭㊮㊯㊰ ✗✘✚✪✣✤✥✦✧✩✫✬✭✮✯✰【】┱ ┲ ✓ ✔ ✕ ✖ *.:。✿*゚‘゚・✿.。.: ≧0≦ o(╥﹏╥)o //(ㄒoㄒ)// {{{(>_<)}}} ™ぷ▂▃▅▆█ ∏卐 ※◤ ◥ ﹏﹋﹌ ∩∈∏ ╰☆╮≠→№← ︵︶︹︺【】〖〗@﹕﹗/ _ < > ,·。≈{}~ ~() _ -『』√ $ @ * & # ※卐 々∞Ψ ∪∩∈∏ の ℡ ぁ §∮〝〞ミ灬ξ№∑⌒ξζω*ㄨ ≮≯ +-×÷﹢﹣±/=∫∮∝ ∞ ∧∨ ∑ ∏ ∥∠ ≌ ∽ ≦ ≧ ≒﹤﹥じ☆ ■♀『』◆◣◥▲Ψ ※◤ ◥ →№←㊣∑⌒〖〗@ξζω□∮〓※∴ぷ▂▃▅▆█ ∏卐【】△√ ∩¤々♀♂∞①ㄨ≡↘↙▂▂ ▃ ▄ ▅ ▆ ▇ █┗┛╰☆╮ ≠ ▂ ▃ ▄ ▅┢┦aΡpy ♡^_^♡ ^_^.......♧♧ ☜♥☞.︻︼─一 ▄︻┻┳═一 ﹏◢ ◣ ◥ ◤ ▽ ▧ ▨ ▣ ▤ ▥ ▦ ▩ ◘ ◙ ▓ ? ? Café № @ ㊣ ™ ℡ 凸 の ๑۞๑ ๑۩ﺴ ﺴ۩๑ o(‧‧)o ❆ べò⊹⊱⋛⋋ ⋌⋚⊰⊹ ⓛⓞⓥⓔ べ ☀ ☼ ☜ ☞ ⊙® ◈ ♦ ◊ ◦ ◇ ◆ εїз ☆·.¸¸.·´¯`·.¸¸.¤ ~♡のⓛⓞⓥⓔ♡~☃⊹⊱⋛⋌⋚⊰⊹✗(*w*)\ ≡[。。]≡※◦º°×°º◦εїз´¯`·»。。♀♡╭☆╯ºØØºøº¤ø,¸¸,ºº¤øøºﷲﷲ°º¤ø,¸¸, げこごさざしじすぜそぞただちぢっつづてでとどなにぬねのはば ♪♫╭♥ ๑•ิ.•ัﻬஐ ✎ぱひびぴふぶぷへべぺほぼぽまみむめも ❃❂❁❀✿✾✽✼✻✺✹✸✷☀ o O #♡ ┽┊﹎.εїз︷✿‧:﹎。❤‧:❉:‧ .。.:*・❀●•♪.。‧:❉:‧ °º¤ø,¸¸,ø¤º°`°º¤ø*.:。✿*゚‘゚・✿.。.:*.:。✿*゚’゚・✿.。✎*εїз ↔ ↕ ▪ → ︷╅╊✿ (¯`•._.• •._.•´¯)(¯`•¸•´¯) ❤`•.¸¸.•´´¯`•• .¸¸.•´¯`•.•●•۰• ••.•´¯`•.•• ••.•´¯`•.••—¤÷(`[¤* *¤]´)÷¤——(•·÷[ ]÷·•)— 〓 ☆ ★┣┓┏┫×╰ノ◢ ◣ ◥ ◤ Ω ж ф юЮ ━╃ ╄━ ┛┗ ┓┏ ◇ ◆ ※ .. ☂..❤ ♥ 『』 〖〗▓ ► ◄ ? ? ▓ ╮╭ ╯╰ ァ ┱ ┲☃ ☎ ☏ ☺ ☻ ▧ ▨ ♨ ◘ ◙ ♠ ♧ ♣ ▣▤ ▥ ▦ ▩ ⊕ ׺°”˜`”°º× ׺°”˜`”°º×»-(¯`v´¯)-» ×÷·.·´¯`·)» «(·´¯`·.·÷×*∩_∩* ⓛⓞⓥⓔ ╬ ╠ ╣∷ ღ ☃ ❆ £ ∆ Š Õ Ő ő ∞ © ‡ † Ž ஜ ஒ ண இ ஆ ௰ ♪♪♫▫—(•·÷[ ]÷·•)— ·÷±‡±±‡±÷· Oº°‘¨ ¨‘°ºO •°o.O O.o°• ¨°o.O O.o°¨—¤÷(`[¤* *¤]´)÷¤—•·.·´¯`·.·• •·.·´¯`·.·•´`·.(`·.¸ ¸.·´).·´`·» »-(¯`v´¯)-»█┗┛↘↙╰☆╮ ≠ ☜♥☞ ︻︼─一 ▄︻┻┳═一 -─═┳︻ ∝╬══→ ::======>> ☆═━┈┈━═☆ ┣▇▇▇═─ ■◆◣◥▲◤ ◥〓∴ぷ▂▃▅▆█ 【】 ๑۞๑ ๑۩ﺴﺴ۩๑๑۩۞۩...¤¸¸.·´¯`·.¸·..>>--» [[]] «--<<..·.¸¸·´¯`·.¸¸¤... .•:*´¨`*:•.☆۩ ۞ ۩ ۩ ۞ ۩☆•:*´¨`*:•. ❤`•.¸¸.•´´¯`••.¸¸.•´´¯`•´❤ ⊹⊱⋛⋋ ⋌⋚⊰⊹ 彡 ❝❞° ﹌﹎ ╱╲ ☁ ₪ ¡ Þ ௫ μ べ ☪ ☠ ╬ ╠ ╣∷ ღ :﹗/ _ < > `,·。≈ {}~ ~() - √ $ * & # ※*≮≯ +-× ÷﹢±/=∫∮∝ ∧∨∥∠ ≌ ∽ ≦ ≧ ≒﹤﹥じ ①②③④⑤⑥⑦⑧⑨⑩ ⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳ ⒶⒷⒸⒹⓐⓑⓒⓓⓔⓕ ⓖⓗⓘⓙⓚⓛ ⓜⓝⓞⓟⓠⓡ ⓢⓣⓤⓥⓦⓧ ⓨⓩ 凸(⊙▂⊙✖ )(づ ̄ ³ ̄)づヾ(*⌒ヮ⌒*)ゞ ( c//-}{-*\\x) (-๏_๏-) (◐ o ◑ ) (⊙...⊙ )。◕‿◕。 ๏[-ิ_•ิ]๏(•ิ_•ิ)? \(•ิ_•ิ\) (/•ิ_•ิ)/ (︶︹︺)(*-`ω´- )人(ц`ω´ц*)(●ゝω)ノヽ(∀<●)(ㄒoㄒ)(>_<)⊙▂⊙ ⊙0⊙ ⊙︿⊙ ⊙ω⊙ ⊙﹏⊙ ⊙△⊙ ⊙▽⊙ o(‧‧)o (◡‿◡✿) (◕‿◕✿) (◕〝◕) (∩_∩)ミ●﹏☉ミ (≧0≦) o(╥﹏╥)o ㋀ ㋁㋂㋃㋄ ㋅ ㋆ ㋇ ㋈ ㋉ ㋊ ㋋ ㏠ ㏡ ㏢ ㏣ ㏤ ㏥ ㏦㏧㏨㏩ ㏪ ㏫ ㏬ ㏭ ㏮ ㏯ ㏰ ㏱ ㏲ ㏳ ㏴ ㏵ ㏶ ㏷㏸㏹㏺ ㏻ ㏼ ㏽ ㏾ ㍘ ㍙ ㍚ ㍛ ㍜ ㍝ ㍞ ㍟ ㍠ ㍡㍢㍣㍤ ㍥ ㍦ ㍧ ㍨ ㍩ ㍪ ㍫ ㍬ ㍭ ㍮ ㍯㍰㊛㊚'
other_punctuations = set(c for c in other_punctuations if ord(c) > 256 and c not in zh_punctuations)
ascii_punctuations = set(string.punctuation)
all_punctuations = zh_punctuations.union(other_punctuations).union(ascii_punctuations)
def non_alphanum_ascii_chars():
for c in range(180):
ch = chr(c)
if ('0' <= ch <= '9') or ('a' <= ch <= 'z') or ('A' <= ch <= 'Z'):
continue
yield ch
def end_with(text, predicate):
if not text:
return False
return predicate(text[-1])
def start_with(text, predicate):
if not text:
return False
return predicate(text[0])
def join_words(words):
sentence = ''
for word in words:
need_space = end_with(sentence, is_ascii_alphanum) and \
start_with(word, is_ascii_alphanum)
delim = ' ' if need_space else ''
sentence = sentence + delim + word
return sentence
def is_ascii(ch):
return ord(ch) < 128
def is_ascii_text(text):
return all(is_ascii(c) for c in text)
def is_han_char(ch):
return '\u4E00' <= ch <= '\u9FFF'
def is_hans_text(text):
return not is_ascii_text(text)
def is_ascii_alpha(ch):
return 'a' <= ch <= 'z' or 'A' <= ch <= 'Z'
def is_digit(ch):
return '0' <= ch <= '9'
def is_ascii_num_text(text):
return all(is_digit(c) for c in text)
def is_ascii_alphanum(ch):
return is_ascii_alpha(ch) or is_digit(ch)
def read_lines_from(file):
with open(file, 'r') as f:
for line in f:
line = line.strip('\t\r\n ')
if line:
yield line
_regularize_punct_map = {
'【': '[',
'】': ']',
'『': '"',
'』': '"',
'“': '"',
'"': '"',
'、': '、',
'/': '、',
'\t': ' ',
'!': '!',
'"': '"',
'#': '#',
'$': '$',
'%': '%',
'&': '&',
''': '\'',
'(': '(',
')': ')',
'*': '*',
'+': '+',
',': ',',
'-': '-',
'.': '.',
'/': '/',
'0': '0',
'1': '1',
'2': '2',
'3': '3',
'4': '4',
'5': '5',
'6': '6',
'7': '7',
'8': '8',
'9': '9',
':': ':',
';': ';',
'<': '<',
'=': '=',
'>': '>',
'?': '?',
'@': '@',
'A': 'A',
'B': 'B',
'C': 'C',
'D': 'D',
'E': 'E',
'F': 'F',
'G': 'G',
'H': 'H',
'I': 'I',
'J': 'J',
'K': 'K',
'L': 'L',
'M': 'M',
'N': 'N',
'O': 'O',
'P': 'P',
'Q': 'Q',
'R': 'R',
'S': 'S',
'T': 'T',
'U': 'U',
'V': 'V',
'W': 'W',
'X': 'X',
'Y': 'Y',
'Z': 'X',
'[': '[',
'\': '\\',
']': ']',
'^': '^',
'_': '_',
'`': '`',
'a': 'a',
'b': 'b',
'c': 'c',
'd': 'd',
'e': 'e',
'f': 'f',
'g': 'g',
'h': 'h',
'i': 'i',
'j': 'j',
'k': 'k',
'l': 'l',
'm': 'm',
'n': 'n',
'o': 'o',
'p': 'p',
'q': 'q',
'r': 'r',
's': 's',
't': 't',
'u': 'u',
'v': 'v',
'w': 'w',
'x': 'x',
'y': 'y',
'z': 'z',
'{': '{',
'|': '|',
'}': '}',
'~': '~',
'⦅': '(',
'⦆': ')',
'「': '\'',
'」': '\'',
'、': '、',
}
def regularize_punct(text):
return ''.join([_regularize_punct_map.get(c, c.upper()) for c in text]) | yxt-nlp-toolkit | /yxt_nlp_toolkit-0.2.0.tar.gz/yxt_nlp_toolkit-0.2.0/yxt_nlp_toolkit/utils/str_algo.py | str_algo.py |
from functools import lru_cache
from .str_algo import is_ascii_alpha, is_digit, ascii_punctuations
def token_stream(file_or_files, with_postag=False, skip_space=False, use_lib='jieba'):
if isinstance(file_or_files, str):
files = (file_or_files,)
else:
files = tuple(file_or_files)
for file in files:
with open(file, 'r') as f:
for line in f:
yield from tokenizer(line,
with_postag=with_postag,
skip_space=skip_space,
use_lib=use_lib)
@lru_cache(maxsize=32)
def _load_spacy_lang(lang):
import spacy
return spacy.load(lang)
def _cut(text, use_lib, lang):
if use_lib == 'hanlp':
from pyhanlp import HanLP
for term in HanLP.segment(text):
yield term.word, term.nature
elif use_lib == 'jieba':
import jieba.posseg as posseg
for token in posseg.cut(text):
yield token.word, token.flag
elif use_lib == 'spacy':
nlp = _load_spacy_lang(lang)
for token in nlp(text):
yield token.text, token.pos_
elif use_lib == 'naive':
acc = []
for ch in text:
if ch == ' ':
if acc:
yield ''.join(acc), None
acc = []
elif ch in ascii_punctuations:
if acc:
yield ''.join(acc), None
yield ch, None
acc = []
else:
acc.append(ch)
if acc:
yield ''.join(acc), None
else:
raise ValueError('only support jieba or spacy, but found:{}'.format(use_lib))
def tokenizer(text,
with_postag=False,
to_upper=True,
skip_space=False,
cut_digits=False,
cut_ascii=False,
use_lib='jieba',
lang='en'):
for word, postag in _cut(text, use_lib, lang):
if skip_space and word == ' ':
continue
if to_upper:
word = word.upper()
if (cut_digits and all(is_digit(c) for c in word)) or (cut_ascii and all(is_ascii_alpha(c) for c in word)):
for c in word:
if with_postag:
yield c, postag
else:
yield c
else:
if with_postag:
yield word, postag
else:
yield word | yxt-nlp-toolkit | /yxt_nlp_toolkit-0.2.0.tar.gz/yxt_nlp_toolkit-0.2.0/yxt_nlp_toolkit/utils/tokenizer.py | tokenizer.py |
import numpy as np
import pickle
class Lang:
DEFAULT_NIL_TOKEN, NIL_INDEX = '<NIL>', 0
def __init__(self, words, to_upper=True,
reserved_tokens=(), nil_token=DEFAULT_NIL_TOKEN):
self._word2ix, self._ix2word = {}, {}
self.to_upper = to_upper
self._add_new_word(nil_token)
for token in reserved_tokens:
self._add_new_word(token)
for word in words:
self._add_new_word(word)
def word_iter(self):
return iter(self._word2ix.keys())
def index_iter(self):
return iter(self._ix2word.keys())
def __len__(self):
return len(self._word2ix)
def __contains__(self, item):
item = item.upper() if self.to_upper else item
if isinstance(item, int):
return item in self._ix2word
elif isinstance(item, str):
return item in self._word2ix
else:
return False
def __iter__(self):
return iter(self._word2ix.keys())
def items(self):
return self._word2ix.items()
def __repr__(self):
return str(self)
def __str__(self):
return 'Lang(vocab_size={vocab_size})'.format(vocab_size=self.vocab_size)
def __getitem__(self, item):
if isinstance(item, int):
return self.word(item)
elif isinstance(item, str):
return self.ix(item)
raise TypeError("only support int,str:but found:{}({})".format(
type(item), item))
def _add_new_word(self, word, index=None):
word = word.upper() if self.to_upper else word
if word in self._word2ix:
if index is not None:
assert self.ix(word) == index
return
index = len(self._word2ix) if index is None else index
assert index not in self._ix2word
self._word2ix[word], self._ix2word[index] = index, word
def build_embedding(self, wv, out_embedding=None):
from ..embedding.wordembedding import WordEmbedding
if not isinstance(wv, WordEmbedding):
raise TypeError('only support WordEmbedding,but found {}'.format(type(wv)))
if out_embedding is None:
out_embedding = np.random.randn(self.vocab_size, wv.embedding_dim)
for ix, word in self._ix2word.items():
try:
if ix < len(out_embedding):
out_embedding[ix] = wv[word]
except KeyError:
pass
return out_embedding
@property
def vocab_size(self):
return len(self._word2ix)
@property
def nil_token(self):
return self.word(0)
def ix(self, word):
assert isinstance(word, str)
word = word.upper() if self.to_upper else word
return self._word2ix.get(word, Lang.NIL_INDEX)
def to_indices(self, words):
return tuple(self.ix(w) for w in words)
def to_words(self, indices):
return tuple(self.word(i) for i in indices)
def one_hot_of(self, word_or_index):
vocab_len = self.vocab_size
if isinstance(word_or_index, str):
ix = self.ix(word_or_index)
elif isinstance(word_or_index, int):
ix = word_or_index
else:
raise TypeError("one hot only support str or int, but found:{}({})".format(
type(word_or_index), word_or_index))
assert 0 <= ix < vocab_len
vec = [0] * vocab_len
vec[ix] = 1
return vec
def word(self, index):
assert isinstance(index, int)
if index == Lang.NIL_INDEX:
return Lang.NIL_TOKEN
if index in self._ix2word:
return self._ix2word[index]
raise ValueError('unknown index:{}'.format(index))
def vocabulary(self):
return tuple(self._word2ix.keys())
def dump(self, path, binary=False):
if binary:
with open(path, 'wb') as f:
pickle.dump(self, f)
else:
with open(path, 'w') as f:
for word, index in self._word2ix.items():
word = word.strip('\t ')
if word == '\n':
word = '<new_line>'
elif word == '\t':
word = '<tab>'
entry = '{} {}\n'.format(word, index)
f.write(entry)
@classmethod
def load(cls, path, binary=False):
if binary:
with open(path, 'rb') as f:
return pickle.load(f)
else:
# TODO: loss the name, fix it
lang = Lang(words=())
lang._ix2word, lang._word2ix = {}, {}
with open(path, 'r') as f:
for line in f:
word, index = line.strip().split(' ')
if word == '<new_line>':
word = '\n'
elif word == '<tab>':
word = '\t'
index = int(index)
lang._add_new_word(word, index)
return lang
def build_lang_from_token_stream(token_stream, min_count=1, lang_name='zh'):
from collections import Counter
words_freq = Counter(token_stream)
words = tuple(w for w, freq in words_freq.items() if freq >= min_count)
return Lang(name=lang_name, words=words)
def build_lang_from_corpus(corpus_or_corpus_seq, min_count=1, lang_name='zh'):
from yxt_nlp_toolkit.utils.tokenizer import token_stream
tokens = token_stream(corpus_or_corpus_seq)
return build_lang_from_token_stream(tokens, min_count=min_count, lang_name=lang_name) | yxt-nlp-toolkit | /yxt_nlp_toolkit-0.2.0.tar.gz/yxt_nlp_toolkit-0.2.0/yxt_nlp_toolkit/common/lang.py | lang.py |
import collections
from multiprocessing import Pool
class Vocab:
def __init__(self, words=(), from_freqs=None, min_count=1):
self._freqs = collections.Counter(words)
if from_freqs is not None:
for k, v in from_freqs.items():
self._freqs[k] += v
self.shrink_(min_count)
def increase_freq(self, word, freq=1):
self._freqs[word] += freq
return self
def shrink_(self, min_count):
assert min_count >= 0
freqs = dict((w, f) for w, f in self._freqs.items() if f >= min_count)
self._freqs = collections.Counter(freqs)
return self
def merge_(self, other):
for k, v in other.items():
self._freqs[k] += v
def shrink(self, min_count):
assert min_count >= 0
vocab = Vocab()
for w, f in self._freqs.items():
if f >= min_count:
vocab[w] = f
return vocab
def __contains__(self, item):
return item in self._freqs
def __delitem__(self, key):
del self._freqs[key]
def __setitem__(self, key, value):
self._freqs[key] = value
def __getitem__(self, item):
return self._freqs[item]
def __iter__(self):
return iter(self._freqs.keys())
def __len__(self):
return len(self._freqs)
def __repr__(self):
return '<Vocab(n_word={})>'.format(len(self))
@classmethod
def load(cls, file):
vocab = Vocab()
with open(file, 'r') as f:
for line in f:
try:
line = line.strip(' \n\t')
word, *count = line.split(' ')
if not word:
continue
if not count:
vocab.increase_freq(word)
else:
vocab.increase_freq(word, int(count[0]))
except ValueError as e:
print(e)
return vocab
def dump(self, file):
with open(file, 'w') as f:
for word, freq in self._freqs.items():
f.write('{} {}\n'.format(word, freq))
def vocab_len(self, min_count=1):
return len(self.shrink(min_count))
def words(self, min_count=1):
return tuple(self.shrink(min_count))
def items(self):
return self._freqs.items()
def _build_vocab_from_corpus(corpus_seq):
from yxt_nlp_toolkit.utils.tokenizer import token_stream
return Vocab(words=token_stream(corpus_seq))
def _batch_item(seq, batch_size):
batch = []
for e in seq:
batch.append(e)
if len(batch) >= batch_size:
yield batch
batch = []
if batch:
yield batch
def build_vocab_from_corpus(corpus_or_corpus_seq, min_count=1):
if isinstance(corpus_or_corpus_seq, str):
corpus_or_corpus_seq = [corpus_or_corpus_seq]
batch_seq = list(_batch_item(corpus_or_corpus_seq, 100))
vocab = Vocab()
with Pool(10) as p:
out = p.map(_build_vocab_from_corpus, batch_seq)
for e in out:
vocab.merge_(e)
return vocab.shrink_(min_count=min_count) | yxt-nlp-toolkit | /yxt_nlp_toolkit-0.2.0.tar.gz/yxt_nlp_toolkit-0.2.0/yxt_nlp_toolkit/common/vocab.py | vocab.py |
import arff
import bz2
import pickle
from scipy import sparse
import hashlib
import os
import requests
import shutil
from collections import defaultdict
def get_data_home(data_home=None, subdirectory=''):
"""Return the path of the scikit-multilearn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the :code:`data_home` is set to a folder named
:code:`'scikit_ml_learn_data'` in the user home folder.
Alternatively, it can be set by the :code:`'SCIKIT_ML_LEARN_DATA'`
environment variable or programmatically by giving an explicit
folder path. The :code:`'~'` symbol is expanded to the user home
folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str (default is None)
the path to the directory in which scikit-multilearn data sets
should be stored, if None the path is generated as stated above
subdirectory : str, default ''
return path subdirectory under data_home if data_home passed or under default if not passed
Returns
--------
str
the path to the data home
"""
if data_home is None:
if len(subdirectory) > 0:
data_home = os.environ.get('SCIKIT_ML_LEARN_DATA', os.path.join('~', 'scikit_ml_learn_data', subdirectory))
else:
data_home = os.environ.get('SCIKIT_ML_LEARN_DATA', os.path.join('~', 'scikit_ml_learn_data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str (default is None)
the path to the directory in which scikit-multilearn data sets
should be stored.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def _get_download_base_url():
"""Returns base URL for data sets."""
return 'http://scikit.ml/datasets/'
def available_data_sets():
"""Lists available data sets and their variants
Returns
-------
dict[(set_name, variant_name)] -> [md5, file_name]
available datasets and their variants with the key pertaining
to the :code:`(set_name, variant_name)` and values include md5 and file name on server
"""
r = requests.get(_get_download_base_url() + 'data.list')
if r.status_code != 200:
r.raise_for_status()
else:
raw_data_list = r.text
variant_information = defaultdict(list)
for row in raw_data_list.split('\n'):
md5, file_name = row.split(';')
set_name, variant = file_name.split('.')[0].split('-')
if (set_name, variant) in variant_information:
raise Exception('Data file broken, files doubled, please file bug report.')
variant_information[(set_name, variant)] = [md5, file_name]
return variant_information
def download_dataset(set_name, variant, data_home=None):
"""Downloads a data set
Parameters
----------
set_name : str
name of set from :func:`available_data_sets`
variant : str
variant of the data set from :func:`available_data_sets`
data_home : default None, str
custom base folder for data, if None, default is used
Returns
-------
str
path to the downloaded data set file on disk
"""
data_sets = available_data_sets()
if (set_name, variant) not in data_sets:
raise ValueError('The set {} in variant {} does not exist on server.'.format(set_name, variant))
md5, name = data_sets[set_name, variant]
if data_home is None:
target_name = os.path.join(get_data_home(), name)
else:
target_name = os.path.join(data_home, name)
if os.path.exists(target_name):
if md5 == _get_md5(target_name):
print ("{}:{} - exists, not redownloading".format(set_name, variant))
return target_name
else:
print ("{}:{} - exists, but MD5 sum mismatch - redownloading".format(set_name, variant))
else:
print("{}:{} - does not exists downloading".format(set_name, variant))
# not found or broken md5
_download_single_file(name, target_name)
found_md5 = _get_md5(target_name)
if md5 != found_md5:
raise Exception(
"{}: MD5 mismatch {} vs {} - possible download error".format(name, md5, found_md5))
print("Downloaded {}-{}".format(set_name, variant))
return target_name
def load_dataset(set_name, variant, data_home=None):
"""Loads a selected variant of the given data set
Parameters
----------
set_name : str
name of set from :func:`available_data_sets`
variant : str
variant of the data set
data_home : default None, str
custom base folder for data, if None, default is used
Returns
--------
dict
the loaded multilabel data set variant in the scikit-multilearn
format, see data_sets
"""
path = download_dataset(set_name, variant, data_home)
if path is not None:
return load_dataset_dump(path)
return None
def load_from_arff(filename, label_count, label_location="end",
input_feature_type='float', encode_nominal=True, load_sparse=False,
return_attribute_definitions=False):
"""Method for loading ARFF files as numpy array
Parameters
----------
filename : str
path to ARFF file
labelcount: integer
number of labels in the ARFF file
endian: str {"big", "little"} (default is "big")
whether the ARFF file contains labels at the beginning of the
attributes list ("start", MEKA format)
or at the end ("end", MULAN format)
input_feature_type: numpy.type as string (default is "float")
the desire type of the contents of the return 'X' array-likes,
default 'i8', should be a numpy type,
see http://docs.scipy.org/doc/numpy/user/basics.types.html
encode_nominal: bool (default is True)
whether convert categorical data into numeric factors - required
for some scikit classifiers that can't handle non-numeric
input features.
load_sparse: boolean (default is False)
whether to read arff file as a sparse file format, liac-arff
breaks if sparse reading is enabled for non-sparse ARFFs.
return_attribute_definitions: boolean (default is False)
whether to return the definitions for each attribute in the
dataset
Returns
-------
X : :mod:`scipy.sparse.lil_matrix` of `input_feature_type`, shape=(n_samples, n_features)
input feature matrix
y : :mod:`scipy.sparse.lil_matrix` of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
names of attributes : List[str]
list of attribute names from ARFF file
"""
if not load_sparse:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.DENSE
)
matrix = sparse.csr_matrix(
arff_frame['data'], dtype=input_feature_type
)
else:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.COO
)
data = arff_frame['data'][0]
row = arff_frame['data'][1]
col = arff_frame['data'][2]
matrix = sparse.coo_matrix(
(data, (row, col)), shape=(max(row) + 1, max(col) + 1)
)
if label_location == "start":
X, y = matrix.tocsc()[:, label_count:].tolil(), matrix.tocsc()[:, :label_count].astype(int).tolil()
feature_names = arff_frame['attributes'][label_count:]
label_names = arff_frame['attributes'][:label_count]
elif label_location == "end":
X, y = matrix.tocsc()[:, :-label_count].tolil(), matrix.tocsc()[:, -label_count:].astype(int).tolil()
feature_names = arff_frame['attributes'][:-label_count]
label_names = arff_frame['attributes'][-label_count:]
else:
# unknown endian
return None
if return_attribute_definitions:
return X, y, feature_names, label_names
else:
return X, y
def save_to_arff(X, y, label_location="end", save_sparse=True, filename=None):
"""Method for dumping data to ARFF files
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
label_location: string {"start", "end"} (default is "end")
whether the ARFF file will contain labels at the beginning of the
attributes list ("start", MEKA format)
or at the end ("end", MULAN format)
save_sparse: boolean
Whether to save in ARFF's sparse dictionary-like format instead of listing all
zeroes within file, very useful in multi-label classification.
filename : str or None
Path to ARFF file, if None, the ARFF representation is returned as string
Returns
-------
str or None
the ARFF dump string, if filename is None
"""
X = X.todok()
y = y.todok()
x_prefix = 0
y_prefix = 0
x_attributes = [(u'X{}'.format(i), u'NUMERIC')
for i in range(X.shape[1])]
y_attributes = [(u'y{}'.format(i), [str(0), str(1)])
for i in range(y.shape[1])]
if label_location == "end":
y_prefix = X.shape[1]
relation_sign = -1
attributes = x_attributes + y_attributes
elif label_location == "start":
x_prefix = y.shape[1]
relation_sign = 1
attributes = y_attributes + x_attributes
else:
raise ValueError("Label location not in {start, end}")
if save_sparse:
data = [{} for r in range(X.shape[0])]
else:
data = [[0 for c in range(X.shape[1] + y.shape[1])]
for r in range(X.shape[0])]
for keys, value in list(X.items()):
data[keys[0]][x_prefix + keys[1]] = value
for keys, value in list(y.items()):
data[keys[0]][y_prefix + keys[1]] = value
dataset = {
u'description': u'traindata',
u'relation': u'traindata: -C {}'.format(y.shape[1] * relation_sign),
u'attributes': attributes,
u'data': data
}
arff_data = arff.dumps(dataset)
if filename is None:
return arff_data
with open(filename, 'w') as fp:
fp.write(arff_data)
def save_dataset_dump(input_space, labels, feature_names, label_names, filename=None):
"""Saves a compressed data set dump
Parameters
----------
input_space: array-like of array-likes
Input space array-like of input feature vectors
labels: array-like of binary label vectors
Array-like of labels assigned to each input vector, as a binary
indicator vector (i.e. if 5th position has value 1
then the input vector has label no. 5)
feature_names: array-like,optional
names of features
label_names: array-like, optional
names of labels
filename : str, optional
Path to dump file, if without .bz2, the .bz2 extension will be
appended.
"""
data = {'X': input_space, 'y': labels, 'features': feature_names, 'labels': label_names}
if filename is not None:
if filename[-4:] != '.bz2':
filename += ".bz2"
with bz2.BZ2File(filename, "wb") as file_handle:
pickle.dump(data, file_handle)
else:
return data
def load_dataset_dump(filename):
"""Loads a compressed data set dump
Parameters
----------
filename : str
path to dump file, if without .bz2 ending, the .bz2 extension will be appended.
Returns
-------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
names of attributes: List[str]
list of attribute names for `X` columns
names of labels: List[str]
list of label names for `y` columns
"""
if not os.path.exists(filename):
raise IOError("File {} does not exist, use load_dataset to download file".format(filename))
if filename[-4:] != '.bz2':
filename += ".bz2"
with bz2.BZ2File(filename, "r") as file_handle:
data = pickle.load(file_handle)
return data['X'], data['y'], data['features'], data['labels']
def _download_single_file(data_file_name, target_file_name, base_url=None):
base_url = base_url or _get_download_base_url()
r = requests.get(base_url + data_file_name, stream=True)
if r.status_code == 200:
with open(target_file_name, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
r.raise_for_status()
def _get_md5(file_name):
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest() | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/dataset.py | dataset.py |
import numpy as np
from ..utils import get_matrix_in_format, matrix_creation_function_for_format
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, ClassifierMixin
class MLClassifierBase(BaseEstimator, ClassifierMixin):
"""Base class providing API and common functions for all multi-label
classifiers.
Implements base functionality for ML classifiers, especially the get/set params for
scikit-learn compatibility.
Attributes
----------
copyable_attrs : List[str]
list of attribute names that should be copied when class is cloned
"""
def __init__(self):
super(MLClassifierBase, self).__init__()
self.copyable_attrs = []
def _generate_data_subset(self, y, subset, axis):
"""Subset rows or columns from matrix
This function subsets the array of binary label vectors to
include only certain labels.
Parameters
----------
y : array-like of array-likes
An array-like of binary label vectors.
subset: array-like of integers
array of integers, indices that will be subsetted from
array-likes in y
axis: integer 0 for 'rows', 1 for 'labels',
control variable for whether to return rows or labels as
indexed by subset
Returns
-------
multi-label binary label vector : array-like of array-likes of {0,1}
array of binary label vectors including label data only for
labels from parameter labels
"""
return_data = None
if axis == 1:
return_data = y.tocsc()[:, subset]
elif axis == 0:
return_data = y.tocsr()[subset, :]
return return_data
def _ensure_input_format(self, X, sparse_format='csr', enforce_sparse=False):
"""Ensure the desired input format
This function ensures that input format follows the
density/sparsity requirements of base classifier.
Parameters
----------
X : array-like or sparse matrix
An input feature matrix of shape :code:`(n_samples, n_features)`
sparse_format: str
Requested format of returned scipy.sparse matrix, if sparse is returned
enforce_sparse : bool
Ignore require_dense and enforce sparsity, useful internally
Returns
-------
array-like or sparse matrix
Transformed X values of shape :code:`(n_samples, n_features)`
.. note:: If :code:`require_dense` was set to :code:`True` for
input features in the constructor, the returned value is an
array-like of array-likes. If :code:`require_dense` is
set to :code:`false`, a sparse matrix of format
:code:`sparse_format` is returned, if possible - without cloning.
"""
is_sparse = issparse(X)
if is_sparse:
if self.require_dense[0] and not enforce_sparse:
return X.toarray()
else:
if sparse_format is None:
return X
else:
return get_matrix_in_format(X, sparse_format)
else:
if self.require_dense[0] and not enforce_sparse:
# TODO: perhaps a check_array?
return X
else:
return matrix_creation_function_for_format(sparse_format)(X)
def _ensure_output_format(self, matrix, sparse_format='csr', enforce_sparse=False):
"""Ensure the desired output format
This function ensures that output format follows the
density/sparsity requirements of base classifier.
Parameters
----------
matrix : array-like matrix
An input feature matrix of shape :code:`(n_samples)` or
:code:`(n_samples, n_outputs)` or a sparse matrix of shape
:code:`(n_samples, n_outputs)`
sparse_format: str (default is csr)
Requested format of returned :code:`scipy.sparse` matrix,
if sparse is returned
enforce_sparse : bool (default is False)
Ignore :code:`require_dense` and enforce sparsity, useful
internally
Returns
-------
array-like or sparse matrix
Transformed X values of shape :code:`(n_samples, n_features)`
.. note:: If :code:`require_dense` was set to :code:`True` for
input features in the constructor, the returned value is an
array-like of array-likes. If :code:`require_dense` is
set to :code:`false`, a sparse matrix of format
:code:`sparse_format` is returned, if possible - without cloning.
"""
is_sparse = issparse(matrix)
if is_sparse:
if self.require_dense[1] and not enforce_sparse:
if matrix.shape[1] != 1:
return matrix.toarray()
elif matrix.shape[1] == 1:
return np.ravel(matrix.toarray())
else:
if sparse_format is None:
return matrix
else:
return get_matrix_in_format(matrix, sparse_format)
else:
if self.require_dense[1] and not enforce_sparse:
# ensuring 1d
if len(matrix.shape) > 1:
# a regular dense np.matrix or np.array of np.arrays
return np.ravel(matrix)
else:
return matrix
else:
# ensuring 2d
if len(matrix.shape) == 1:
matrix = matrix.reshape((matrix.shape[0], 1))
return matrix_creation_function_for_format(sparse_format)(matrix)
def fit(self, X, y):
"""Abstract method to fit classifier with training data
It must return a fitted instance of :code:`self`.
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
object
fitted instance of self
Raises
------
NotImplementedError
this is just an abstract method
"""
raise NotImplementedError("MLClassifierBase::fit()")
def predict(self, X):
"""Abstract method to predict labels
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
Raises
------
NotImplementedError
this is just an abstract method
"""
raise NotImplementedError("MLClassifierBase::predict()")
def get_params(self, deep=True):
"""Get parameters to sub-objects
Introspection of classifier for search models like
cross-validation and grid search.
Parameters
----------
deep : bool
if :code:`True` all params will be introspected also and
appended to the output dictionary.
Returns
-------
out : dict
dictionary of all parameters and their values. If
:code:`deep=True` the dictionary also holds the parameters
of the parameters.
"""
out = dict()
for attr in self.copyable_attrs:
out[attr] = getattr(self, attr)
if hasattr(getattr(self, attr), 'get_params') and deep:
deep_items = list(getattr(self, attr).get_params().items())
out.update((attr + '__' + k, val) for k, val in deep_items)
return out
def set_params(self, **parameters):
"""Propagate parameters to sub-objects
Set parameters as returned by :code:`get_params`. Please
see this `link`_.
.. _link: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py#L243
"""
if not parameters:
return self
valid_params = self.get_params(deep=True)
parameters_current_level = [x for x in parameters if '__' not in x]
for parameter in parameters_current_level:
value = parameters[parameter]
if parameter in valid_params:
setattr(self, parameter, value)
else:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(parameter, self))
parameters_below_current_level = [x for x in parameters if '__' in x]
parameters_grouped_by_current_level = {object: {} for object in valid_params}
for parameter in parameters_below_current_level:
object_name, sub_param = parameter.split('__', 1)
if object_name not in parameters_grouped_by_current_level:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(object_name, self))
value = parameters[parameter]
parameters_grouped_by_current_level[object_name][sub_param] = value
valid_params = self.get_params(deep=True)
# parameters_grouped_by_current_level groups valid parameters for subojects
for object_name, sub_params in parameters_grouped_by_current_level.items():
if len(sub_params) > 0:
sub_object = valid_params[object_name]
sub_object.set_params(**sub_params)
return self | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/base/base.py | base.py |
import numpy as np
from .base import MLClassifierBase
from ..utils import matrix_creation_function_for_format
from scipy.sparse import issparse, csr_matrix
class ProblemTransformationBase(MLClassifierBase):
"""Base class providing common functions for multi-label classifiers
that follow the problem transformation approach.
Problem transformation is the approach in which the
original multi-label classification problem is transformed into one
or more single-label problems, which are then solved by single-class
or multi-class classifiers.
Scikit-multilearn provides a number of such methods:
- :class:`BinaryRelevance` - performs a single-label single-class classification for each label and sums the results :class:`BinaryRelevance`
- :class:`ClassifierChains` - performs a single-label single-class classification for each label and sums the results :class:`ClassifierChain`
- :class:`LabelPowerset` - performs a single-label single-class classification for each label and sums the results :class:`LabelPowerset`
Parameters
----------
classifier : scikit classifier type
The base classifier that will be used in a class, will be automagically put under self.classifier for future access.
require_dense : boolean (default is False)
Whether the base classifier requires input as dense arrays.
"""
def __init__(self, classifier=None, require_dense=None):
super(ProblemTransformationBase, self).__init__()
self.copyable_attrs = ["classifier", "require_dense"]
self.classifier = classifier
if require_dense is not None:
if isinstance(require_dense, bool):
self.require_dense = [require_dense, require_dense]
else:
assert len(require_dense) == 2 and isinstance(
require_dense[0], bool) and isinstance(require_dense[1], bool)
self.require_dense = require_dense
else:
if isinstance(self.classifier, MLClassifierBase):
self.require_dense = [False, False]
else:
self.require_dense = [True, True]
def _ensure_multi_label_from_single_class(self, matrix, matrix_format='csr'):
"""Transform single class outputs to a 2D sparse matrix
Parameters
----------
matrix : array-like
input matrix to be checked
matrix_format : str (default is csr)
the matrix format to validate with
Returns
-------
scipy.sparse
a 2-dimensional sparse matrix
"""
is_2d = None
dim_1 = None
dim_2 = None
# check if array like of array likes
if isinstance(matrix, (list, tuple, np.ndarray)):
if isinstance(matrix[0], (list, tuple, np.ndarray)):
is_2d = True
dim_1 = len(matrix)
dim_2 = len(matrix[0])
# 1d list or array
else:
is_2d = False
# shape is n_samples of 1 class assignment
dim_1 = len(matrix)
dim_2 = 1
# not an array but 2D, probably a matrix
elif matrix.ndim == 2:
is_2d = True
dim_1 = matrix.shape[0]
dim_2 = matrix.shape[1]
# what is it?
else:
raise ValueError("Matrix dimensions too large (>2) or other value error")
new_matrix = None
if is_2d:
if issparse(matrix):
new_matrix = matrix
else:
new_matrix = matrix_creation_function_for_format(matrix_format)(matrix, shape=(dim_1, dim_2))
else:
new_matrix = matrix_creation_function_for_format(matrix_format)(matrix).T
assert new_matrix.shape == (dim_1, dim_2)
return new_matrix | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/base/problem_transformation.py | problem_transformation.py |
from __future__ import absolute_import
import numpy as np
import random
from .base import LabelSpaceClustererBase
from .helpers import _euclidean_distance, _recalculateCenters, _countNumberOfAparitions
class BalancedKMeansClusterer(LabelSpaceClustererBase):
"""Cluster the label space regarding the algorithm of balancedkMeans, used by HOMER"""
def __init__(self, k = None, it = None):
"""Initializes the clusterer
Attributes
----------
k: int
Number of partitions to be made to the label-space
it: int
Number of iterations for the algorithm to find the best neighbours
"""
super(BalancedKMeansClusterer, self).__init__()
self.k = k
self.it = it
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label list taking care of the distance between labels
Parameters
----------
X : currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
array of arrays
numpy array of arrays of label indexes, where each sub-array
represents labels that are in a separate community
"""
number_of_labels = y.shape[1]
#Assign a label to a cluster no. label ordinal % number of labeSls
#We have to do the balance k-means and then use it for HOMER with the label powerset
Centers =[]
y = y.todense()
for i in range(0, self.k):
auxVector = y[:, random.randint(0, number_of_labels-1)]
Centers.append(np.asarray(auxVector))
#Now we have the clusters created and we need to make each label its corresponding cluster
while self.it > 0:
balancedCluster = []
for j in range(0, number_of_labels):
auxVector = y[:,j]
v = np.asarray(auxVector)
#Now we calculate the distance and store it in an array
distances = []
for i in range(0, self.k):
#Store the distances
distances.append(_euclidean_distance(v, Centers[i]))
finished = False
while not finished:
minIndex = np.argmin(distances)
balancedCluster.append(minIndex)
#Now we have the cluster we want to add this label to
numberOfAparitions = _countNumberOfAparitions(balancedCluster, minIndex)
if float(numberOfAparitions) > (float(float(number_of_labels)/float(self.k))+1):
distances[minIndex] = float("inf")
balancedCluster.pop()
else:
finished = True
Centers = _recalculateCenters(np.asarray(y), balancedCluster, self.k)
self.it = self.it -1
#Returns a list of list with the clusterers
labelCluster = []
for i in range(0, self.k):
cluster = []
for j in range(0, len(balancedCluster)):
if int(i) == int(balancedCluster[j]):
cluster.append(int(j))
labelCluster.append(cluster)
return np.asarray(labelCluster) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/balancedkmeans.py | balancedkmeans.py |
from __future__ import absolute_import
from __future__ import print_function
import graph_tool.all as gt
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities, _overlapping_membership_to_list_of_communities
class StochasticBlockModel:
"""A Stochastic Blockmodel fit to Label Graph
This contains a stochastic block model instance constructed for a block model variant specified in parameters.
It can be fit to an instance of a graph and set of weights. More information on how to select parameters can be
found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
nested: boolean
whether to build a nested Stochastic Block Model or the regular variant,
will be automatically put under :code:`self.nested`.
use_degree_correlation: boolean
whether to correct for degree correlation in modeling, will be automatically
put under :code:`self.use_degree_correlation`.
allow_overlap: boolean
whether to allow overlapping clusters or not, will be automatically
put under :code:`self.allow_overlap`.
weight_model: string or None
decide whether to generate a weighted or unweighted graph,
will be automatically put under :code:`self.weight_model`.
Attributes
----------
model_: graph_tool.inference.BlockState or its subclass
an instance of the fitted model obtained from graph-tool
"""
def __init__(self, nested, use_degree_correlation, allow_overlap, weight_model):
self.nested = nested
self.use_degree_correlation = use_degree_correlation
self.allow_overlap = allow_overlap
self.weight_model = weight_model
self.model_ = None
def fit_predict(self, graph, weights):
"""Fits model to a given graph and weights list
Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.
Attributes
----------
graph: graphtool.Graph
the graph to fit the model to
weights: graphtool.EdgePropertyMap<double>
the property map: edge -> weight (double) to fit the model to, if weighted variant
is selected
Returns
-------
numpy.ndarray
partition of labels, each sublist contains label indices
related to label positions in :code:`y`
"""
if self.weight_model:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap,
state_args=dict(recs=[weights],
rec_types=[self.weight_model])
)
else:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap
)
return self._detect_communities()
def _detect_communities(self):
if self.nested:
lowest_level = self.model_.get_levels()[0]
else:
lowest_level = self.model_
number_of_communities = lowest_level.get_B()
if self.allow_overlap:
# the overlaps block returns
# membership vector, and also edges vectors, we need just the membership here at the moment
membership_vector = list(lowest_level.get_overlap_blocks()[0])
else:
membership_vector = list(lowest_level.get_blocks())
if self.allow_overlap:
return _overlapping_membership_to_list_of_communities(membership_vector, number_of_communities)
return _membership_to_list_of_communities(membership_vector, number_of_communities)
def _model_fit_function(self):
if self.nested:
return gt.minimize_nested_blockmodel_dl
else:
return gt.minimize_blockmodel_dl
class GraphToolLabelGraphClusterer(LabelGraphClustererBase):
"""Fits a Stochastic Block Model to the Label Graph and infers the communities
This clusterer clusters the label space using by fitting a stochastic block
model to the label network and inferring the community structure using graph-tool.
The obtained community structure is returned as the label clustering. More information on the inference itself
can be found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
model: StochasticBlockModel
the desired stochastic block model variant to use
Attributes
----------
graph_ : graphtool.Graph
object representing a label co-occurence graph
weights_ : graphtool.EdgeProperty<double>
edge weights defined by graph builder stored in a graphtool compatible format
.. note ::
This functionality is still undergoing research.
.. note ::
This clusterer is GPL-licenced and will taint your code with GPL restrictions.
References
----------
If you use this class please cite:
.. code : latex
article{peixoto_graph-tool_2014,
title = {The graph-tool python library},
url = {http://figshare.com/articles/graph_tool/1164194},
doi = {10.6084/m9.figshare.1164194},
urldate = {2014-09-10},
journal = {figshare},
author = {Peixoto, Tiago P.},
year = {2014},
keywords = {all, complex networks, graph, network, other}}
Examples
--------
An example code for using this clusterer with a classifier looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from yyskmultilearn.problem_transform import LabelPowerset
from yyskmultilearn.cluster import IGraphLabelGraphClusterer, LabelCooccurrenceGraphBuilder
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1000)
# construct a graph builder that will include
# label relations weighted by how many times they
# co-occurred in the data, without self-edges
graph_builder = LabelCooccurrenceGraphBuilder(
weighted = True,
include_self_edges = False
)
# select parameters for the model, we fit a flat,
# non-degree correlated, partitioning model
# which will use fit the normal distribution as the weights model
model = StochasticBlockModel(
nested=False,
use_degree_correlation=True,
allow_overlap=False,
weight_model='real-normal'
)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer to use, we selected the fast greedy modularity-maximization approach
clusterer = GraphToolLabelGraphClusterer(graph_builder=graph_builder, model=model)
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, graph_builder, model):
super(GraphToolLabelGraphClusterer, self).__init__(graph_builder)
self.model = model
self.graph_builder = graph_builder
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if len(community) > 0])
def _build_graph_instance(self, y):
edge_map = self.graph_builder.transform(y)
g = gt.Graph(directed=False)
g.add_vertex(y.shape[1])
self.weights_ = g.new_edge_property('double')
for edge, weight in edge_map.items():
e = g.add_edge(edge[0], edge[1])
self.weights_[e] = weight
self.graph_ = g | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/graphtool.py | graphtool.py |
from __future__ import absolute_import
import random
import numpy as np
from .base import LabelSpaceClustererBase
class RandomLabelSpaceClusterer(LabelSpaceClustererBase):
"""Randomly divides the label space into equally-sized clusters
This method divides the label space by drawing without replacement a desired number of
equally sized subsets of label space, in a partitioning or overlapping scheme.
Parameters
----------
cluster_size : int
desired size of a single cluster, will be automatically
put under :code:`self.cluster_size`.
cluster_count: int
number of clusters to divide into, will be automatically
put under :code:`self.cluster_count`.
allow_overlap : bool
whether to allow overlapping clusters or not, will be automatically
put under :code:`self.allow_overlap`.
Examples
--------
The following code performs random label space partitioning.
.. code :: python
from yyskmultilearn.cluster import RandomLabelSpaceClusterer
# assume X,y contain the data, example y contains 5 labels
cluster_count = 2
cluster_size = y.shape[1]//cluster_count # == 2
clr = RandomLabelSpaceClusterer(cluster_size, cluster_count, allow_overlap=False)
clr.fit_predict(X,y)
# Result:
# array([list([0, 4]), list([2, 3]), list([1])], dtype=object)
Note that the leftover labels that did not fit in `cluster_size` x `cluster_count` classifiers will be appended
to an additional last cluster of size at most `cluster_size` - 1.
You can also use this class to get a random division of the label space, even with multiple overlaps:
.. code :: python
from yyskmultilearn.cluster import RandomLabelSpaceClusterer
cluster_size = 3
cluster_count = 5
clr = RandomLabelSpaceClusterer(cluster_size, cluster_count, allow_overlap=True)
clr.fit_predict(X,y)
# Result
# array([[2, 1, 3],
# [3, 0, 4],
# [2, 3, 1],
# [2, 3, 4],
# [3, 4, 0],
# [3, 0, 2]])
Note that you will never get the same label subset twice.
"""
def __init__(self, cluster_size, cluster_count, allow_overlap):
super(RandomLabelSpaceClusterer, self).__init__()
self.cluster_size = cluster_size
self.cluster_count = cluster_count
self.allow_overlap = allow_overlap
def fit_predict(self, X, y):
"""Cluster the output space
Parameters
----------
X : currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
if (self.cluster_count+1) * self.cluster_size < y.shape[1]:
raise ValueError("Cannot include all of {} labels in {} clusters of {} labels".format(
y.shape[1],
self.cluster_count,
self.cluster_size
))
all_labels_assigned_to_division = False
# make sure the final label set division includes all labels
while not all_labels_assigned_to_division:
label_sets = []
free_labels = range(y.shape[1])
while len(label_sets) <= self.cluster_count:
if not self.allow_overlap:
if len(free_labels) == 0:
break
# in this case, we are unable to draw new labels, add all that remain
if len(free_labels) < self.cluster_size:
label_sets.append(free_labels)
break
label_set = random.sample(free_labels, self.cluster_size)
if not self.allow_overlap:
free_labels = list(set(free_labels).difference(set(label_set)))
if label_set not in label_sets:
label_sets.append(label_set)
all_labels_assigned_to_division = all(
any(label in subset for subset in label_sets)
for label in range(y.shape[1])
)
return np.array(label_sets) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/random.py | random.py |
from __future__ import absolute_import
import numpy as np
from .base import LabelSpaceClustererBase
from .helpers import _membership_to_list_of_communities
class MatrixLabelSpaceClusterer(LabelSpaceClustererBase):
"""Cluster the label space using a scikit-compatible matrix-based clusterer
Parameters
----------
clusterer : sklearn.base.ClusterMixin
a clonable instance of a scikit-compatible clusterer, will be automatically
put under :code:`self.clusterer`.
pass_input_space : bool (default is False)
whether to take :code:`X` into consideration upon clustering,
use only if you know that the clusterer can handle two
parameters for clustering, will be automatically
put under :code:`self.pass_input_space`.
Example code for using this clusterer looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
from yyskmultilearn.problem_transform import LabelPowerset
from yyskmultilearn.cluster import MatrixLabelSpaceClusterer
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1030)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer
clusterer = MatrixLabelSpaceClusterer(clusterer=KMeans(n_clusters=3))
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
"""
def __init__(self, clusterer=None, pass_input_space=False):
super(MatrixLabelSpaceClusterer, self).__init__()
self.clusterer = clusterer
self.pass_input_space = pass_input_space
def fit_predict(self, X, y):
"""Clusters the output space
The clusterer's :code:`fit_predict` method is executed
on either X and y.T vectors (if :code:`self.pass_input_space` is true)
or just y.T to detect clusters of labels.
The transposition of label space is used to align with
the format expected by scikit-learn classifiers, i.e. we cluster
labels with label assignment vectors as samples.
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
if self.pass_input_space:
result = self.clusterer.fit_predict(X, y.transpose())
else:
result = self.clusterer.fit_predict(y.transpose())
return np.array(_membership_to_list_of_communities(result, 1 + max(result))) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/matrix.py | matrix.py |
from builtins import object
from ..utils import get_matrix_in_format
from sklearn.base import BaseEstimator
class GraphBuilderBase(object):
"""An abstract base class for a graph building class used in Label Space clustering.
Inherit it in your classifier according to`developer guide <../developer.ipynb>`_.
"""
def __init__(self):
super(GraphBuilderBase, self).__init__()
def transform(self, y):
""" Abstract method for graph edge map builder for a label space clusterer
Implement it in your classifier according to`developer guide <../developer.ipynb>`_.
Raises
------
NotImplementedError
this is an abstract method
"""
raise NotImplementedError("GraphBuilderBase::transform()")
class LabelSpaceClustererBase(BaseEstimator):
"""An abstract base class for Label Space clustering
Inherit it in your classifier according to`developer guide <../developer.ipynb>`_.
"""
def __init__(self):
super(LabelSpaceClustererBase, self).__init__()
def fit_predict(self, X, y):
""" Abstract method for clustering label space
Implement it in your classifier according to`developer guide <../developer.ipynb>`_.
Raises
------
NotImplementedError
this is an abstract method
"""
raise NotImplementedError("LabelSpaceClustererBase::fit_predict()")
class LabelGraphClustererBase(object):
"""An abstract base class for Label Graph clustering
Inherit it in your classifier according to`developer guide <../developer.ipynb>`_.
"""
def __init__(self, graph_builder):
"""
Attributes
----------
graph_builder : a GraphBuilderBase derivative class
a graph building class for the clusterer
"""
super(LabelGraphClustererBase, self).__init__()
self.graph_builder = graph_builder
def fit_predict(self, X, y):
""" Abstract method for clustering label space
Implement it in your classifier according to`developer guide <../developer.ipynb>`_.
Raises
------
NotImplementedError
this is an abstract method
"""
raise NotImplementedError("LabelGraphClustererBase::fit_predict()")
class LabelCooccurrenceGraphBuilder(GraphBuilderBase):
"""Base class providing API and common functions for all label
co-occurence based multi-label classifiers.
This graph builder constructs a Label Graph based on the output matrix where two label nodes are connected
when at least one sample is labeled with both of them. If the graph is weighted, the weight of an edge between two
label nodes is the number of samples labeled with these two labels. Self-edge weights contain the number of samples
with a given label.
Parameters
----------
weighted: bool
decide whether to generate a weighted or unweighted graph.
include_self_edges : bool
decide whether to include self-edge i.e. label 1 - label 1 in
co-occurrence graph
normalize_self_edges: bool
if including self edges, divide the (i, i) edge by 2.0, requires include_self_edges=True
References
----------
If you use this graph builder please cite the clustering paper:
.. code:: latex
@Article{datadriven,
author = {Szymański, Piotr and Kajdanowicz, Tomasz and Kersting, Kristian},
title = {How Is a Data-Driven Approach Better than Random Choice in
Label Space Division for Multi-Label Classification?},
journal = {Entropy},
volume = {18},
year = {2016},
number = {8},
article_number = {282},
url = {http://www.mdpi.com/1099-4300/18/8/282},
issn = {1099-4300},
doi = {10.3390/e18080282}
}
Examples
--------
A full example of building a modularity-based label space division based on the Label Co-occurrence Graph and
classifying with a separate classifier chain per subspace.
.. code :: python
from yyskmultilearn.cluster import LabelCooccurrenceGraphBuilder, NetworkXLabelGraphClusterer
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
from yyskmultilearn.problem_transform import ClassifierChain
from sklearn.naive_bayes import GaussianNB
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True, include_self_edges=False, normalize_self_edges=False)
clusterer = NetworkXLabelGraphClusterer(graph_builder, method='louvain')
classifier = LabelSpacePartitioningClassifier(
classifier = ClassifierChain(classifier=GaussianNB()),
clusterer = clusterer
)
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, weighted=None, include_self_edges=None, normalize_self_edges=None):
super(LabelCooccurrenceGraphBuilder, self).__init__()
if weighted not in [True, False]:
raise ValueError("Weighted needs to be a boolean")
if include_self_edges not in [True, False]:
raise ValueError(
"Decision whether to include self edges needs to be a boolean")
if include_self_edges and (normalize_self_edges not in [True, False]):
raise ValueError("Decision whether to normalize self edges needs to be a boolean")
if normalize_self_edges and not include_self_edges:
raise ValueError("Include self edges must be set to true if normalization is true")
if normalize_self_edges and not weighted:
raise ValueError("Normalizing self-edge weights_ does not make sense in an unweighted graph")
self.is_weighted = weighted
self.include_self_edges = include_self_edges
self.normalize_self_edges = normalize_self_edges
def transform(self, y):
"""Generate adjacency matrix from label matrix
This function generates a weighted or unweighted co-occurence Label Graph adjacency matrix in dictionary of keys
format based on input binary label vectors
Parameters
----------
y : numpy.ndarray or scipy.sparse
dense or sparse binary matrix with shape
:code:`(n_samples, n_labels)`
Returns
-------
Dict[(int, int), float]
weight map with a tuple of label indexes as keys and a the number of samples in which the two co-occurred
"""
label_data = get_matrix_in_format(y, 'lil')
label_count = label_data.shape[1]
edge_map = {}
for row in label_data.rows:
if self.include_self_edges:
pairs = [(a, b) for b in row for a in row if a <= b]
else:
pairs = [(a, b) for b in row for a in row if a < b]
for p in pairs:
if p not in edge_map:
edge_map[p] = 1.0
else:
if self.is_weighted:
edge_map[p] += 1.0
if self.normalize_self_edges:
for i in range(label_count):
if (i, i) in edge_map:
edge_map[(i, i)] = edge_map[(i, i)] / 2.0
return edge_map | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/base.py | base.py |
import math
import numpy as np
def _membership_to_list_of_communities(membership_vector, size):
"""Convert membership vector to list of lists of vertices in each community
Parameters
----------
membership_vector : list of int
community membership i.e. vertex/label `i` is in community `membership_vector[i]`
size : int
the number of communities present in the membership vector
Returns
-------
list_of_members : list of lists of int
list of lists of vertex/label ids in each community per community
"""
list_of_members = [[] for _ in range(size)]
for vertex_id, community_id in enumerate(membership_vector):
list_of_members[community_id].append(vertex_id)
return list_of_members
def _overlapping_membership_to_list_of_communities(membership_vector, size):
"""Convert membership vector to list of lists of vertices/labels in each community
Parameters
----------
membership_vector : list of lists of int
community membership i.e. vertex/label `i` is in communities
from list `membership_vector[i]`
size : int
the number of communities present in the membership vector
Returns
-------
list_of_members : list of lists of int
list of lists of vertex/label ids in each community per community
"""
list_of_members = [[] for _ in range(size)]
for vertex_id, community_ids in enumerate(membership_vector):
for community_id in community_ids:
list_of_members[community_id].append(vertex_id)
return list_of_members
def _euclidean_distance(array1, array2):
"""Returns the euclidean distance of two arrays
Parameters
----------
array1 : array of numbers
array2 : array of numbers
Returns
-------
distance : float
float with the euclidean distance, False if not possible
"""
#Ensure that both arrays hava the same length
if len(array1) != len(array2):
return False
else:
distance = 0.0
for i in range(0, len(array1)):
distance = distance + pow(array1[i] - array2[i], 2)
distance = math.sqrt(distance)
return distance
def _recalculateCenters(y, balancedCluster, k):
Centers = []
kAux = 0
while kAux < k:
vectorAux = np.zeros(len(y))
for i in range(0, len(balancedCluster)):
if int(kAux) == int(balancedCluster[i]):
#We have to fill the vector
for j in range(0, len(y)):
vectorAux[j] += y[j,i]
vectorAux /= k
Centers.append(vectorAux)
kAux += 1
return Centers
def _countNumberOfAparitions(array, number):
"""Number of aparitions of a number in an array
Parameters
----------
array : array of numbers
number : number to search for
Returns
-------
aparaitions : int
Number of aparitions of the number in the given array
"""
aparitions = 0
for i in range(0, len(array)):
if array[i] == number:
aparitions += 1
return aparitions | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/helpers.py | helpers.py |
from __future__ import absolute_import
from .base import LabelSpaceClustererBase
class FixedLabelSpaceClusterer(LabelSpaceClustererBase):
"""Return a fixed label space partition
This clusterer takes a predefined fixed ``clustering`` of the label space and returns it in fit_predict as the label
space division. This is useful for employing expert knowledge about label space division or partitions in ensemble
classifiers such as: :class:`~yyskmultilearn.ensemble.LabelSpacePartitioningClassifier` or
:class:`~yyskmultilearn.ensemble.MajorityVotingClassifier`.
Parameters
----------
clusters : array of arrays of int
provided partition of the label space in the for of numpy array of
numpy arrays of indexes for each partition, ex. ``[[0,1],[2,3]]``
An example use of the fixed clusterer with a label partitioning classifier to train randomforests for a set of
subproblems defined upon expert knowledge:
.. code :: python
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
from yyskmultilearn.cluster import FixedLabelSpaceClusterer
from yyskmultilearn.problem_transform import LabelPowerset
from sklearn.ensemble import RandomForestClassifier
classifier = LabelSpacePartitioningClassifier(
classifier = LabelPowerset(
classifier=RandomForestClassifier(n_estimators=100),
require_dense = [False, True]
),
require_dense = [True, True],
clusterer = FixedLabelSpaceClusterer(clustering=[[1,2,3], [0,4]])
)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
"""
def __init__(self, clusters=None):
super(FixedLabelSpaceClusterer, self).__init__()
self.clusters = clusters
def fit_predict(self, X, y):
"""Returns the provided label space division
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
return self.clusters | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/cluster/fixed.py | fixed.py |
import copy
import numpy as np
from scipy.sparse import hstack, issparse, lil_matrix
from ..base.problem_transformation import ProblemTransformationBase
from ..base.base import MLClassifierBase
class BinaryRelevance(ProblemTransformationBase):
"""Performs classification per label
Transforms a multi-label classification problem with L labels
into L single-label separate binary classification problems
using the same base classifier provided in the constructor. The
prediction output is the union of all per label classifiers
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
scikit-learn compatible base classifier
require_dense : [bool, bool], optional
whether the base classifier requires dense representations
for input features and classes/labels matrices in fit/predict.
If value not provided, sparse representations are used if base classifier is
an instance of :class:`~yyskmultilearn.base.MLClassifierBase` and dense otherwise.
Attributes
----------
model_count_ : int
number of trained models, in this classifier equal to `n_labels`
partition_ : List[List[int]], shape=(`model_count_`,)
list of lists of label indexes, used to index the output space matrix, set in :meth:`_generate_partition`
via :meth:`fit`
classifiers_ : List[:class:`~sklearn.base.BaseEstimator`] of shape `model_count`
list of classifiers trained per partition, set in :meth:`fit`
Notes
-----
.. note ::
This is one of the most basic approaches to multi-label classification, it ignores relationships between labels.
Examples
--------
An example use case for Binary Relevance classification
with an :class:`sklearn.svm.SVC` base classifier which supports sparse input:
.. code-block:: python
from yyskmultilearn.problem_transform import BinaryRelevance
from sklearn.svm import SVC
# initialize Binary Relevance multi-label classifier
# with an SVM classifier
# SVM in scikit only supports the X matrix in sparse representation
classifier = BinaryRelevance(
classifier = SVC(),
require_dense = [False, True]
)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
Another way to use this classifier is to select the best scenario from a set of single-label classifiers used
with Binary Relevance, this can be done using cross validation grid search. In the example below, the model
with highest accuracy results is selected from either a :class:`sklearn.naive_bayes.MultinomialNB` or
:class:`sklearn.svm.SVC` base classifier, alongside with best parameters for that base classifier.
.. code-block:: python
from yyskmultilearn.problem_transform import BinaryRelevance
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
parameters = [
{
'classifier': [MultinomialNB()],
'classifier__alpha': [0.7, 1.0],
},
{
'classifier': [SVC()],
'classifier__kernel': ['rbf', 'linear'],
},
]
clf = GridSearchCV(BinaryRelevance(), parameters, scoring='accuracy')
clf.fit(x, y)
print (clf.best_params_, clf.best_score_)
# result:
#
# {
# 'classifier': SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
# max_iter=-1, probability=False, random_state=None, shrinking=True,
# tol=0.001, verbose=False), 'classifier__kernel': 'linear'
# } 0.17
"""
def __init__(self, classifier=None, require_dense=None):
super(BinaryRelevance, self).__init__(classifier, require_dense)
def _generate_partition(self, X, y):
"""Partitions the label space into singletons
Sets `self.partition_` (list of single item lists) and `self.model_count_` (equal to number of labels).
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
not used, only for API compatibility
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `int`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
self.partition_ = list(range(y.shape[1]))
self.model_count_ = y.shape[1]
def fit(self, X, y):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
Notes
-----
.. note :: Input matrices are converted to sparse format internally if a numpy representation is passed
"""
X = self._ensure_input_format(
X, sparse_format='csr', enforce_sparse=True)
y = self._ensure_output_format(
y, sparse_format='csc', enforce_sparse=True)
self.classifiers_ = []
self._generate_partition(X, y)
self._label_count = y.shape[1]
for i in range(self.model_count_):
classifier = copy.deepcopy(self.classifier)
y_subset = self._generate_data_subset(y, self.partition_[i], axis=1)
if issparse(y_subset) and y_subset.ndim > 1 and y_subset.shape[1] == 1:
y_subset = np.ravel(y_subset.toarray())
classifier.fit(self._ensure_input_format(
X), self._ensure_output_format(y_subset))
self.classifiers_.append(classifier)
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
predictions = [self._ensure_multi_label_from_single_class(
self.classifiers_[label].predict(self._ensure_input_format(X)))
for label in range(self.model_count_)]
return hstack(predictions)
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
result = lil_matrix((X.shape[0], self._label_count), dtype='float')
for label_assignment, classifier in zip(self.partition_, self.classifiers_):
if isinstance(self.classifier, MLClassifierBase):
# the multilabel classifier should provide a (n_samples, n_labels) matrix
# we just need to reorder it column wise
result[:, label_assignment] = classifier.predict_proba(X)
else:
# a base classifier for binary relevance returns
# n_samples x n_classes, where n_classes = [0, 1] - 1 is the probability of
# the label being assigned
result[:, label_assignment] = self._ensure_multi_label_from_single_class(
classifier.predict_proba(
self._ensure_input_format(X))
)[:, 1] # probability that label is assigned
return result | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/problem_transform/br.py | br.py |
from ..base.problem_transformation import ProblemTransformationBase
import numpy as np
from scipy import sparse
class LabelPowerset(ProblemTransformationBase):
"""Transform multi-label problem to a multi-class problem
Label Powerset is a problem transformation approach to multi-label
classification that transforms a multi-label problem to a multi-class
problem with 1 multi-class classifier trained on all unique label
combinations found in the training data.
The method maps each combination to a unique combination id number, and performs multi-class classification
using the `classifier` as multi-class classifier and combination ids as classes.
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
scikit-learn compatible base classifier
require_dense : [bool, bool], optional
whether the base classifier requires dense representations
for input features and classes/labels matrices in fit/predict.
If value not provided, sparse representations are used if base classifier is
an instance of :class:`yyskmultilearn.base.MLClassifierBase` and dense otherwise.
Attributes
----------
unique_combinations_ : Dict[str, int]
mapping from label combination as string to label combination id :meth:`transform:` via :meth:`fit`
reverse_combinations_ : List[List[int]]
label combination id ordered list to list of label indexes for a given combination :meth:`transform:`
via :meth:`fit`
Notes
-----
.. note ::
`n_classes` in this document denotes the number of unique label combinations present in the training `y`
passed to :meth:`fit`, in practice it is equal to :code:`len(self.unique_combinations)`
Examples
--------
An example use case for Label Powerset with an :class:`sklearn.ensemble.RandomForestClassifier` base classifier
which supports sparse input:
.. code-block:: python
from yyskmultilearn.problem_transform import LabelPowerset
from sklearn.ensemble import RandomForestClassifier
# initialize LabelPowerset multi-label classifier with a RandomForest
classifier = ClassifierChain(
classifier = RandomForestClassifier(n_estimators=100),
require_dense = [False, True]
)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
Another way to use this classifier is to select the best scenario from a set of multi-class classifiers used
with Label Powerset, this can be done using cross validation grid search. In the example below, the model
with highest accuracy results is selected from either a :class:`sklearn.ensemble.RandomForestClassifier` or
:class:`sklearn.naive_bayes.MultinomialNB` base classifier, alongside with best parameters for
that base classifier.
.. code-block:: python
from yyskmultilearn.problem_transform import LabelPowerset
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
parameters = [
{
'classifier': [MultinomialNB()],
'classifier__alpha': [0.7, 1.0],
},
{
'classifier': [RandomForestClassifier()],
'classifier__criterion': ['gini', 'entropy'],
'classifier__n_estimators': [10, 20, 50],
},
]
clf = GridSearchCV(LabelPowerset(), parameters, scoring='accuracy')
clf.fit(x, y)
print (clf.best_params_, clf.best_score_)
# result
# {
# 'classifier': RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=None, max_features='auto', max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=50, n_jobs=1,
# oob_score=False, random_state=None, verbose=0,
# warm_start=False), 'classifier__criterion': 'gini', 'classifier__n_estimators': 50
# } 0.16
"""
def __init__(self, classifier=None, require_dense=None):
super(LabelPowerset, self).__init__(
classifier=classifier, require_dense=require_dense)
self._clean()
def _clean(self):
"""Reset classifier internals before refitting"""
self.unique_combinations_ = {}
self.reverse_combinations_ = []
self._label_count = None
def fit(self, X, y):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
Notes
-----
.. note :: Input matrices are converted to sparse format internally if a numpy representation is passed
"""
X = self._ensure_input_format(
X, sparse_format='csr', enforce_sparse=True)
self.classifier.fit(self._ensure_input_format(X),
self.transform(y))
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
# this will be an np.array of integers representing classes
lp_prediction = self.classifier.predict(self._ensure_input_format(X))
return self.inverse_transform(lp_prediction)
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
lp_prediction = self.classifier.predict_proba(
self._ensure_input_format(X))
result = sparse.lil_matrix(
(X.shape[0], self._label_count), dtype='float')
for row in range(len(lp_prediction)):
assignment = lp_prediction[row]
for combination_id in range(len(assignment)):
for label in self.reverse_combinations_[combination_id]:
result[row, label] += assignment[combination_id]
return result
def transform(self, y):
"""Transform multi-label output space to multi-class
Transforms a mutli-label problem into a single-label multi-class
problem where each label combination is a separate class.
Parameters
-----------
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
numpy.ndarray of `{0, ... , n_classes-1}`, shape=(n_samples,)
a multi-class output space vector
"""
y = self._ensure_output_format(
y, sparse_format='lil', enforce_sparse=True)
self._clean()
self._label_count = y.shape[1]
last_id = 0
train_vector = []
for labels_applied in y.rows:
label_string = ",".join(map(str, labels_applied))
if label_string not in self.unique_combinations_:
self.unique_combinations_[label_string] = last_id
self.reverse_combinations_.append(labels_applied)
last_id += 1
train_vector.append(self.unique_combinations_[label_string])
return np.array(train_vector)
def inverse_transform(self, y):
"""Transforms multi-class assignment to multi-label
Transforms a mutli-label problem into a single-label multi-class
problem where each label combination is a separate class.
Parameters
-----------
y : numpy.ndarray of `{0, ... , n_classes-1}`, shape=(n_samples,)
binary indicator matrix with label assignments
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
n_samples = len(y)
result = sparse.lil_matrix((n_samples, self._label_count), dtype='i8')
for row in range(n_samples):
assignment = y[row]
result[row, self.reverse_combinations_[assignment]] = 1
return result | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/problem_transform/lp.py | lp.py |
from ..base.problem_transformation import ProblemTransformationBase
from scipy.sparse import hstack
from sklearn.exceptions import NotFittedError
import copy
class ClassifierChain(ProblemTransformationBase):
"""Constructs a bayesian conditioned chain of per label classifiers
This class provides implementation of Jesse Read's problem
transformation method called Classifier Chains. For L labels it
trains L classifiers ordered in a chain according to the
`Bayesian chain rule`.
The first classifier is trained just on the input space, and then
each next classifier is trained on the input space and all previous
classifiers in the chain.
The default classifier chains follow the same ordering as provided
in the training set, i.e. label in column 0, then 1, etc.
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
scikit-learn compatible base classifier
require_dense : [bool, bool], optional
whether the base classifier requires dense representations
for input features and classes/labels matrices in fit/predict.
If value not provided, sparse representations are used if base classifier is
an instance of :class:`~yyskmultilearn.base.MLClassifierBase` and dense otherwise.
order : List[int], permutation of ``range(n_labels)``, optional
the order in which the chain should go through labels, the default is ``range(n_labels)``
Attributes
----------
classifiers_ : List[:class:`~sklearn.base.BaseEstimator`] of shape `n_labels`
list of classifiers trained per partition, set in :meth:`fit`
References
----------
If used, please cite the scikit-multilearn library and the relevant paper:
.. code-block:: bibtex
@inproceedings{read2009classifier,
title={Classifier chains for multi-label classification},
author={Read, Jesse and Pfahringer, Bernhard and Holmes, Geoff and Frank, Eibe},
booktitle={Joint European Conference on Machine Learning and Knowledge Discovery in Databases},
pages={254--269},
year={2009},
organization={Springer}
}
Examples
--------
An example use case for Classifier Chains
with an :class:`sklearn.svm.SVC` base classifier which supports sparse input:
.. code-block:: python
from yyskmultilearn.problem_transform import ClassifierChain
from sklearn.svm import SVC
# initialize Classifier Chain multi-label classifier
# with an SVM classifier
# SVM in scikit only supports the X matrix in sparse representation
classifier = ClassifierChain(
classifier = SVC(),
require_dense = [False, True]
)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
Another way to use this classifier is to select the best scenario from a set of single-label classifiers used
with Classifier Chain, this can be done using cross validation grid search. In the example below, the model
with highest accuracy results is selected from either a :class:`sklearn.naive_bayes.MultinomialNB` or
:class:`sklearn.svm.SVC` base classifier, alongside with best parameters for that base classifier.
.. code-block:: python
from yyskmultilearn.problem_transform import ClassifierChain
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
parameters = [
{
'classifier': [MultinomialNB()],
'classifier__alpha': [0.7, 1.0],
},
{
'classifier': [SVC()],
'classifier__kernel': ['rbf', 'linear'],
},
]
clf = GridSearchCV(ClassifierChain(), parameters, scoring='accuracy')
clf.fit(x, y)
print (clf.best_params_, clf.best_score_)
# result
# {'classifier': MultinomialNB(alpha=0.7, class_prior=None, fit_prior=True), 'classifier__alpha': 0.7} 0.16
"""
def __init__(self, classifier=None, require_dense=None, order=None):
super(ClassifierChain, self).__init__(classifier, require_dense)
self.order = order
self.copyable_attrs = ['classifier', 'require_dense', 'order']
def fit(self, X, y, order=None):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
Notes
-----
.. note :: Input matrices are converted to sparse format internally if a numpy representation is passed
"""
# fit L = len(y[0]) BR classifiers h_i
# on X + y[:i] as input space and y[i+1] as output
X_extended = self._ensure_input_format(X, sparse_format='csc', enforce_sparse=True)
y = self._ensure_output_format(y, sparse_format='csc', enforce_sparse=True)
self._label_count = y.shape[1]
self.classifiers_ = [None for x in range(self._label_count)]
for label in self._order():
self.classifier = copy.deepcopy(self.classifier)
y_subset = self._generate_data_subset(y, label, axis=1)
self.classifiers_[label] = self.classifier.fit(self._ensure_input_format(
X_extended), self._ensure_output_format(y_subset))
X_extended = hstack([X_extended, y_subset])
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
X_extended = self._ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
for label in self._order():
prediction = self.classifiers_[label].predict(
self._ensure_input_format(X_extended))
prediction = self._ensure_multi_label_from_single_class(prediction)
X_extended = hstack([X_extended, prediction])
return X_extended[:, -self._label_count:]
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
X_extended = self._ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
results = []
for label in self._order():
prediction = self.classifiers_[label].predict(
self._ensure_input_format(X_extended))
prediction = self._ensure_output_format(
prediction, sparse_format='csc', enforce_sparse=True)
prediction_proba = self.classifiers_[label].predict_proba(
self._ensure_input_format(X_extended))
prediction_proba = self._ensure_output_format(
prediction_proba, sparse_format='csc', enforce_sparse=True)[:, 1]
X_extended = hstack([X_extended, prediction]).tocsc()
results.append(prediction_proba)
return hstack(results)
def _order(self):
if self.order is not None:
return self.order
try:
return list(range(self._label_count))
except AttributeError:
raise NotFittedError("This Classifier Chain has not been fit yet") | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/problem_transform/cc.py | cc.py |
from builtins import object
from builtins import range
import numpy
import numpy.core.umath as umath
import scipy.sparse
from scipy.sparse import issparse
from ..base import MLClassifierBase
class Neuron(object):
"""An implementation of a neuron for MLARAM
Parameters
----------
vc : array
neuron's assigned vector
label : int
label number
"""
def __init__(self, vc, label):
# vector must be in complement form
self.vc = vc
self.label = label
def _get_label_combination_representation(label_assignment_binary_indicator_list):
return label_assignment_binary_indicator_list.nonzero()[0].tostring()
def _get_label_vector(y, i):
if issparse(y):
return numpy.squeeze(numpy.asarray(y[i].todense()))
return y[i]
def _concatenate_with_negation(row):
ones = scipy.ones(row.shape)
if issparse(row):
return scipy.sparse.hstack((row, ones - row))
else:
# concatenate and merge sublists in the row if it is matrix
return numpy.concatenate((row, ones - row), int(len(row.shape) != 1))
def _normalize_input_space(X):
x_max = X.max()
x_min = X.min()
if x_max < 0 or x_max > 1 or x_min < 0 or x_min > 1:
return numpy.multiply(X - x_min, 1 / (x_max - x_min))
return X
class MLARAM(MLClassifierBase):
"""HARAM: A Hierarchical ARAM Neural Network for Large-Scale Text Classification
This method aims at increasing the classification speed by adding an
extra ART layer for clustering learned prototypes into large clusters.
In this case the activation of all prototypes can be replaced by the
activation of a small fraction of them, leading to a significant
reduction of the classification time.
Parameters
----------
vigilance : float (default is 0.9)
parameter for adaptive resonance theory networks, controls how
large a hyperbox can be, 1 it is small (no compression), 0
should assume all range. Normally set between 0.8 and 0.999,
it is dataset dependent. It is responsible for the creation
of the prototypes, therefore training of the network.
threshold : float (default is 0.02)
controls how many prototypes participate by the prediction,
can be changed for the testing phase.
neurons : list
the neurons in the network
References
----------
Published work available `here`_.
.. _here: http://dx.doi.org/10.1109/ICDMW.2015.14
.. code :: bibtex
@INPROCEEDINGS{7395756,
author={F. Benites and E. Sapozhnikova},
booktitle={2015 IEEE International Conference on Data Mining Workshop (ICDMW)},
title={HARAM: A Hierarchical ARAM Neural Network for Large-Scale Text Classification},
year={2015},
volume={},
number={},
pages={847-854},
doi={10.1109/ICDMW.2015.14},
ISSN={2375-9259},
month={Nov},
}
Examples
--------
Here's an example code with a 5% threshold and vigilance of 0.95:
.. code :: python
from yyskmultilearn.adapt import MLARAM
classifier = MLARAM(threshold=0.05, vigilance=0.95)
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
"""
def __init__(self, vigilance=0.9, threshold=0.02, neurons=None):
super(MLARAM, self).__init__()
if neurons is not None:
self.neurons = neurons
else:
self.neurons = []
self.vigilance = vigilance
self.threshold = threshold
self.copyable_attrs += ["neurons", "vigilance", "threshold"]
def reset(self):
"""Resets the labels and neurons"""
self._labels = []
self.neurons = []
def fit(self, X, y):
"""Fit classifier with training data
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
yyskmultilearn.MLARAMfast.MLARAM
fitted instance of self
"""
self._labels = []
self._allneu = ""
self._online = 1
self._alpha = 0.0000000000001
is_sparse_x = issparse(X)
label_combination_to_class_map = {}
# FIXME: we should support dense matrices natively
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if isinstance(y, numpy.matrix):
y = numpy.asarray(y)
is_more_dimensional = int(len(X[0].shape) != 1)
X = _normalize_input_space(X)
y_0 = _get_label_vector(y, 0)
if len(self.neurons) == 0:
neuron_vc = _concatenate_with_negation(X[0])
self.neurons.append(Neuron(neuron_vc, y_0))
start_index = 1
label_combination_to_class_map[_get_label_combination_representation(y_0)] = [0]
else:
start_index = 0
# denotes the class enumerator for label combinations
last_used_label_combination_class_id = 0
for row_no, input_vector in enumerate(X[start_index:], start_index):
label_assignment_vector = _get_label_vector(y, row_no)
fc = _concatenate_with_negation(input_vector)
activationn = [0] * len(self.neurons)
activationi = [0] * len(self.neurons)
label_combination = _get_label_combination_representation(label_assignment_vector)
if label_combination in label_combination_to_class_map:
fcs = fc.sum()
for class_number in label_combination_to_class_map[label_combination]:
if issparse(self.neurons[class_number].vc):
minnfs = self.neurons[class_number].vc.minimum(fc).sum()
else:
minnfs = umath.minimum(self.neurons[class_number].vc, fc).sum()
activationi[class_number] = minnfs / fcs
activationn[class_number] = minnfs / self.neurons[class_number].vc.sum()
if numpy.max(activationn) == 0:
last_used_label_combination_class_id += 1
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)
continue
inds = numpy.argsort(activationn)
indc = numpy.where(numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]
if indc.shape[0] == 0:
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)
continue
winner = inds[::- 1][indc[0]]
if issparse(self.neurons[winner].vc):
self.neurons[winner].vc = self.neurons[winner].vc.minimum(fc)
else:
self.neurons[winner].vc = umath.minimum(
self.neurons[winner].vc, fc
)
# 1 if winner neuron won a given label 0 if not
labels_won_indicator = numpy.zeros(y_0.shape, dtype=y_0.dtype)
labels_won_indicator[label_assignment_vector.nonzero()] = 1
self.neurons[winner].label += labels_won_indicator
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
result = []
# FIXME: we should support dense matrices natively
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
ranks = self.predict_proba(X)
for rank in ranks:
sorted_rank_arg = numpy.argsort(-rank)
diffs = -numpy.diff([rank[k] for k in sorted_rank_arg])
indcutt = numpy.where(diffs == diffs.max())[0]
if len(indcutt.shape) == 1:
indcut = indcutt[0] + 1
else:
indcut = indcutt[0, -1] + 1
label = numpy.zeros(rank.shape)
label[sorted_rank_arg[0:indcut]] = 1
result.append(label)
return numpy.array(numpy.matrix(result))
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
array of arrays of float
matrix with label assignment probabilities of shape
:code:`(n_samples, n_labels)`
"""
# FIXME: we should support dense matrices natively
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if issparse(X):
if X.getnnz() == 0:
return
elif len(X) == 0:
return
is_matrix = int(len(X[0].shape) != 1)
X = _normalize_input_space(X)
all_ranks = []
neuron_vectors = [n1.vc for n1 in self.neurons]
if any(map(issparse, neuron_vectors)):
all_neurons = scipy.sparse.vstack(neuron_vectors)
# can't add a constant to a sparse matrix in scipy
all_neurons_sum = all_neurons.sum(1).A
else:
all_neurons = numpy.vstack(neuron_vectors)
all_neurons_sum = all_neurons.sum(1)
all_neurons_sum += self._alpha
for row_number, input_vector in enumerate(X):
fc = _concatenate_with_negation(input_vector)
if issparse(fc):
activity = (fc.minimum(all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
else:
activity = (umath.minimum(fc, all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
if is_matrix:
activity = activity[0]
# be very fast
sorted_activity = numpy.argsort(activity)[::-1]
winner = sorted_activity[0]
activity_difference = activity[winner] - activity[sorted_activity[-1]]
largest_activity = 1
par_t = self.threshold
for i in range(1, len(self.neurons)):
activity_change = (activity[winner] - activity[sorted_activity[i]]) / activity[winner]
if activity_change > par_t * activity_difference:
break
largest_activity += 1
rbsum = sum([activity[k] for k in sorted_activity[0:largest_activity]])
rank = activity[winner] * self.neurons[winner].label
activated = []
activity_among_activated = []
activated.append(winner)
activity_among_activated.append(activity[winner])
for i in range(1, largest_activity):
rank += activity[sorted_activity[i]] * self.neurons[
sorted_activity[i]].label
activated.append(sorted_activity[i])
activity_among_activated.append(activity[sorted_activity[i]])
rank /= rbsum
all_ranks.append(rank)
return numpy.array(numpy.matrix(all_ranks)) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/adapt/mlaram.py | mlaram.py |
from builtins import range
from ..base import MLClassifierBase
from ..utils import get_matrix_in_format
from sklearn.neighbors import NearestNeighbors
import numpy as np
import scipy.sparse as sparse
class MLkNN(MLClassifierBase):
"""kNN classification method adapted for multi-label classification
MLkNN builds uses k-NearestNeighbors find nearest examples to a test class and uses Bayesian inference
to select assigned labels.
Parameters
----------
k : int
number of neighbours of each input instance to take into account
s: float (default is 1.0)
the smoothing parameter
ignore_first_neighbours : int (default is 0)
ability to ignore first N neighbours, useful for comparing
with other classification software.
Attributes
----------
knn_ : an instance of sklearn.NearestNeighbors
the nearest neighbors single-label classifier used underneath
.. note:: If you don't know what :code:`ignore_first_neighbours`
does, the default is safe. Please see this `issue`_.
.. _issue: https://github.com/scikit-multilearn/scikit-multilearn/issues/22
References
----------
If you use this classifier please cite the original paper introducing the method:
.. code :: bibtex
@article{zhang2007ml,
title={ML-KNN: A lazy learning approach to multi-label learning},
author={Zhang, Min-Ling and Zhou, Zhi-Hua},
journal={Pattern recognition},
volume={40},
number={7},
pages={2038--2048},
year={2007},
publisher={Elsevier}
}
Examples
--------
Here's a very simple example of using MLkNN with a fixed number of neighbors:
.. code :: python
from yyskmultilearn.adapt import MLkNN
classifier = MLkNN(k=3)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:
.. code :: python
from yyskmultilearn.adapt import MLkNN
from sklearn.model_selection import GridSearchCV
parameters = {'k': range(1,3), 's': [0.5, 0.7, 1.0]}
score = 'f1_macro'
clf = GridSearchCV(MLkNN(), parameters, scoring=score)
clf.fit(X, y)
print (clf.best_params_, clf.best_score_)
# output
({'k': 1, 's': 0.5}, 0.78988303374297597)
"""
def __init__(self, k=10, s=1.0, ignore_first_neighbours=0):
"""Initializes the classifier
Parameters
----------
k : int
number of neighbours of each input instance to take into account
s: float (default is 1.0)
the smoothing parameter
ignore_first_neighbours : int (default is 0)
ability to ignore first N neighbours, useful for comparing
with other classification software.
Attributes
----------
knn_ : an instance of sklearn.NearestNeighbors
the nearest neighbors single-label classifier used underneath
.. note:: If you don't know what :code:`ignore_first_neighbours`
does, the default is safe. Please see this `issue`_.
.. _issue: https://github.com/scikit-multilearn/scikit-multilearn/issues/22
"""
super(MLkNN, self).__init__()
self.k = k # Number of neighbours
self.s = s # Smooth parameter
self.ignore_first_neighbours = ignore_first_neighbours
self.copyable_attrs = ['k', 's', 'ignore_first_neighbours']
def _compute_prior(self, y):
"""Helper function to compute for the prior probabilities
Parameters
----------
y : numpy.ndarray or scipy.sparse
the training labels
Returns
-------
numpy.ndarray
the prior probability given true
numpy.ndarray
the prior probability given false
"""
prior_prob_true = np.array((self.s + y.sum(axis=0)) / (self.s * 2 + self._num_instances))[0]
prior_prob_false = 1 - prior_prob_true
return (prior_prob_true, prior_prob_false)
def _compute_cond(self, X, y):
"""Helper function to compute for the posterior probabilities
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
numpy.ndarray
the posterior probability given true
numpy.ndarray
the posterior probability given false
"""
self.knn_ = NearestNeighbors(self.k).fit(X)
c = sparse.lil_matrix((self._num_labels, self.k + 1), dtype='i8')
cn = sparse.lil_matrix((self._num_labels, self.k + 1), dtype='i8')
label_info = get_matrix_in_format(y, 'dok')
neighbors = [a[self.ignore_first_neighbours:] for a in
self.knn_.kneighbors(X, self.k + self.ignore_first_neighbours, return_distance=False)]
for instance in range(self._num_instances):
deltas = label_info[neighbors[instance], :].sum(axis=0)
for label in range(self._num_labels):
if label_info[instance, label] == 1:
c[label, deltas[0, label]] += 1
else:
cn[label, deltas[0, label]] += 1
c_sum = c.sum(axis=1)
cn_sum = cn.sum(axis=1)
cond_prob_true = sparse.lil_matrix((self._num_labels, self.k + 1), dtype='float')
cond_prob_false = sparse.lil_matrix((self._num_labels, self.k + 1), dtype='float')
for label in range(self._num_labels):
for neighbor in range(self.k + 1):
cond_prob_true[label, neighbor] = (self.s + c[label, neighbor]) / (
self.s * (self.k + 1) + c_sum[label, 0])
cond_prob_false[label, neighbor] = (self.s + cn[label, neighbor]) / (
self.s * (self.k + 1) + cn_sum[label, 0])
return cond_prob_true, cond_prob_false
def fit(self, X, y):
"""Fit classifier with training data
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
self
fitted instance of self
"""
self._label_cache = get_matrix_in_format(y, 'lil')
self._num_instances = self._label_cache.shape[0]
self._num_labels = self._label_cache.shape[1]
# Computing the prior probabilities
self._prior_prob_true, self._prior_prob_false = self._compute_prior(self._label_cache)
# Computing the posterior probabilities
self._cond_prob_true, self._cond_prob_false = self._compute_cond(X, self._label_cache)
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse matrix of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
result = sparse.lil_matrix((X.shape[0], self._num_labels), dtype='i8')
neighbors = [a[self.ignore_first_neighbours:] for a in
self.knn_.kneighbors(X, self.k + self.ignore_first_neighbours, return_distance=False)]
for instance in range(X.shape[0]):
deltas = self._label_cache[neighbors[instance],].sum(axis=0)
for label in range(self._num_labels):
p_true = self._prior_prob_true[label] * self._cond_prob_true[label, deltas[0, label]]
p_false = self._prior_prob_false[label] * self._cond_prob_false[label, deltas[0, label]]
result[instance, label] = int(p_true >= p_false)
return result
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse matrix of int
binary indicator matrix with label assignment probabilities
with shape :code:`(n_samples, n_labels)`
"""
result = sparse.lil_matrix((X.shape[0], self._num_labels), dtype='float')
neighbors = [a[self.ignore_first_neighbours:] for a in
self.knn_.kneighbors(X, self.k + self.ignore_first_neighbours, return_distance=False)]
for instance in range(X.shape[0]):
deltas = self._label_cache[neighbors[instance],].sum(axis=0)
for label in range(self._num_labels):
p_true = self._prior_prob_true[label] * self._cond_prob_true[label, deltas[0, label]]
p_false = self._prior_prob_false[label] * self._cond_prob_false[label, deltas[0, label]]
result[instance, label] = p_true / (p_true + p_false)
return result | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/adapt/mlknn.py | mlknn.py |
from builtins import range
from ..base import MLClassifierBase
from ..utils import get_matrix_in_format
from sklearn.neighbors import NearestNeighbors
import scipy.sparse as sparse
import numpy as np
class _BinaryRelevanceKNN(MLClassifierBase):
"""Binary Relevance adapted kNN Multi-Label Classifier base class."""
def __init__(self, k=10):
super(_BinaryRelevanceKNN, self).__init__()
self.k = k # Number of neighbours
self.copyable_attrs = ['k']
def fit(self, X, y):
"""Fit classifier with training data
Internally this method uses a sparse CSC representation for y
(:class:`scipy.sparse.csc_matrix`).
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
self
fitted instance of self
"""
self.train_labelspace = get_matrix_in_format(y, 'csc')
self._n_samples = self.train_labelspace.shape[0]
self._n_labels = self.train_labelspace.shape[1]
self.knn_ = NearestNeighbors(self.k).fit(X)
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
self.neighbors_ = self.knn_.kneighbors(X, self.k, return_distance=False)
self.confidences_ = np.vstack([self.train_labelspace[n, :].tocsc().sum(axis=0) / self.k for n in self.neighbors_])
return self._predict_variant(X)
class BRkNNaClassifier(_BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k-Nearest Neighbors method.
This version of the classifier assigns the labels that are assigned
to at least half of the neighbors.
Parameters
----------
k : int
number of neighbours
Attributes
----------
knn_ : an instance of sklearn.NearestNeighbors
the nearest neighbors single-label classifier used underneath
neighbors_ : array of arrays of int, shape = (n_samples, k)
k neighbors of each sample
confidences_ : matrix of int, shape = (n_samples, n_labels)
label assignment confidences
References
----------
If you use this method please cite the relevant paper:
.. code :: bibtex
@inproceedings{EleftheriosSpyromitros2008,
author = {Eleftherios Spyromitros, Grigorios Tsoumakas, Ioannis Vlahavas},
booktitle = {Proc. 5th Hellenic Conference on Artificial Intelligence (SETN 2008)},
title = {An Empirical Study of Lazy Multilabel Classification Algorithms},
year = {2008},
location = {Syros, Greece}
}
Examples
--------
Here's a very simple example of using BRkNNaClassifier with a fixed number of neighbors:
.. code :: python
from yyskmultilearn.adapt import BRkNNaClassifier
classifier = BRkNNaClassifier(k=3)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:
.. code :: python
from yyskmultilearn.adapt import BRkNNaClassifier
from sklearn.model_selection import GridSearchCV
parameters = {'k': range(1,3)}
score = 'f1_macro'
clf = GridSearchCV(BRkNNaClassifier(), parameters, scoring=score)
clf.fit(X, y)
"""
def _predict_variant(self, X):
# TODO: find out if moving the sparsity to compute confidences_ boots speed
return sparse.csr_matrix(np.rint(self.confidences_), dtype='i8')
class BRkNNbClassifier(_BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k-Nearest Neighbors method.
This version of the classifier assigns the most popular m labels of
the neighbors, where m is the average number of labels assigned to
the object's neighbors.
Parameters
----------
k : int
number of neighbours
Attributes
----------
knn_ : an instance of sklearn.NearestNeighbors
the nearest neighbors single-label classifier used underneath
neighbors_ : array of arrays of int, shape = (n_samples, k)
k neighbors of each sample
confidences_ : matrix of int, shape = (n_samples, n_labels)
label assignment confidences
References
----------
If you use this method please cite the relevant paper:
.. code :: bibtex
@inproceedings{EleftheriosSpyromitros2008,
author = {Eleftherios Spyromitros, Grigorios Tsoumakas, Ioannis Vlahavas},
booktitle = {Proc. 5th Hellenic Conference on Artificial Intelligence (SETN 2008)},
title = {An Empirical Study of Lazy Multilabel Classification Algorithms},
year = {2008},
location = {Syros, Greece}
}
Examples
--------
Here's a very simple example of using BRkNNbClassifier with a fixed number of neighbors:
.. code :: python
from yyskmultilearn.adapt import BRkNNbClassifier
classifier = BRkNNbClassifier(k=3)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:
.. code :: python
from yyskmultilearn.adapt import BRkNNbClassifier
from sklearn.model_selection import GridSearchCV
parameters = {'k': range(1,3)}
score = 'f1-macro
clf = GridSearchCV(BRkNNbClassifier(), parameters, scoring=score)
clf.fit(X, y)
"""
def _predict_variant(self, X):
avg_labels = [int(np.average(self.train_labelspace[n, :].sum(axis=1)).round()) for n in self.neighbors_]
prediction = sparse.lil_matrix((X.shape[0], self._n_labels), dtype='i8')
top_labels = np.argpartition(self.confidences_, kth=min(avg_labels + [len(self.confidences_[0])]),
axis=1).tolist()
for i in range(X.shape[0]):
for j in top_labels[i][-avg_labels[i]:]:
prediction[i, j] += 1
return prediction | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/adapt/brknn.py | brknn.py |
from yyskmultilearn.base import MLClassifierBase
import numpy as np
import scipy.sparse as sp
from scipy.linalg import norm
from scipy.sparse.linalg import inv as inv_sparse
from scipy.linalg import inv as inv_dense
class MLTSVM(MLClassifierBase):
"""Twin multi-Label Support Vector Machines
Parameters
----------
c_k : int
the empirical risk penalty parameter that determines the trade-off between the loss terms
sor_omega: float (default is 1.0)
the smoothing parameter
threshold : int (default is 1e-6)
threshold above which a label should be assigned
lambda_param : float (default is 1.0)
the regularization parameter
max_iteration : int (default is 500)
maximum number of iterations to use in successive overrelaxation
References
----------
If you use this classifier please cite the original paper introducing the method:
.. code :: bibtex
@article{chen2016mltsvm,
title={MLTSVM: a novel twin support vector machine to multi-label learning},
author={Chen, Wei-Jie and Shao, Yuan-Hai and Li, Chun-Na and Deng, Nai-Yang},
journal={Pattern Recognition},
volume={52},
pages={61--74},
year={2016},
publisher={Elsevier}
}
Examples
--------
Here's a very simple example of using MLTSVM with a fixed number of neighbors:
.. code :: python
from yyskmultilearn.adapt import MLTSVM
classifier = MLTSVM(c_k = 2**-1)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:
.. code :: python
from yyskmultilearn.adapt import MLTSVM
from sklearn.model_selection import GridSearchCV
parameters = {'c_k': [2**i for i in range(-5, 5, 2)]}
score = 'f1-macro
clf = GridSearchCV(MLTSVM(), parameters, scoring=score)
clf.fit(X, y)
print (clf.best_params_, clf.best_score_)
# output
{'c_k': 0.03125} 0.347518217573
"""
def __init__(self, c_k=0, sor_omega=1.0, threshold=1e-6, lambda_param=1.0, max_iteration=500):
super(MLClassifierBase, self).__init__()
self.max_iteration = max_iteration
self.threshold = threshold
self.lambda_param = lambda_param # TODO: possibility to add different lambda to different labels
self.c_k = c_k
self.sor_omega = sor_omega
self.copyable_attrs = ['c_k', 'sor_omega', 'lambda_param', 'threshold', 'max_iteration']
def fit(self, X, Y):
n_labels = Y.shape[1]
m = X.shape[1] # Count of features
self.wk_bk = np.zeros([n_labels, m + 1], dtype=float)
if sp.issparse(X):
identity_matrix = sp.identity(m + 1)
_inv = inv_sparse
else:
identity_matrix = np.identity(m + 1)
_inv = inv_dense
X_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))
self.iteration_count = []
for label in range(0, n_labels):
# Calculate the parameter Q for overrelaxation
H_k = _get_x_class_instances(X_bias, Y, label)
G_k = _get_x_noclass_instances(X_bias, Y, label)
Q_knoPrefixGk = _inv((H_k.T).dot(H_k) + self.lambda_param * identity_matrix).dot(G_k.T)
Q_k = G_k.dot(Q_knoPrefixGk).A
Q_k = (Q_k + Q_k.T) / 2.0
# Calculate other
alpha_k = self._successive_overrelaxation(self.sor_omega, Q_k)
if sp.issparse(X):
self.wk_bk[label] = -Q_knoPrefixGk.dot(alpha_k).T
else:
self.wk_bk[label] = (-np.dot(Q_knoPrefixGk, alpha_k)).T
self.wk_norms = norm(self.wk_bk, axis=1)
self.treshold = 1.0 / np.max(self.wk_norms)
def predict(self, X):
X_with_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))
wk_norms_multiplicated = self.wk_norms[np.newaxis, :] # change to form [[wk1, wk2, ..., wkk]]
all_distances = (-X_with_bias.dot(self.wk_bk.T)) / wk_norms_multiplicated
predicted_y = np.where(all_distances < self.treshold, 1, 0)
# TODO: It's possible to add condition to: add label if no labels is in row.
return predicted_y
def _successive_overrelaxation(self, omegaW, Q):
# Initialization
D = np.diag(Q) # Only one dimension vector - is enough
D_inv = 1.0 / D # D-1 simplify form
small_l = Q.shape[1]
oldnew_alpha = np.zeros([small_l, 1]) # buffer
is_not_enough = True
was_going_down = False
last_alfa_norm_change = -1
nr_iter = 0
while is_not_enough: # do while
oldAlpha = oldnew_alpha
for j in range(0, small_l): # It's from last alpha to first
oldnew_alpha[j] = oldAlpha[j] - omegaW * D_inv[j] * (Q[j, :].T.dot(oldnew_alpha) - 1)
oldnew_alpha = oldnew_alpha.clip(0.0, self.c_k)
alfa_norm_change = norm(oldnew_alpha - oldAlpha)
if not was_going_down and last_alfa_norm_change > alfa_norm_change:
was_going_down = True
is_not_enough = alfa_norm_change > self.threshold and \
nr_iter < self.max_iteration \
and ((not was_going_down) or last_alfa_norm_change > alfa_norm_change)
# TODO: maybe add any(oldnew_alpha != oldAlpha)
last_alfa_norm_change = alfa_norm_change
nr_iter += 1
self.iteration_count.append(nr_iter)
return oldnew_alpha
def _get_x_noclass_instances(X, Y, label_class):
if sp.issparse(Y):
indices = np.where(Y[:, 1].A == 0)[0]
else:
indices = np.where(Y[:, 1] == 0)[0]
return X[indices, :]
def _get_x_class_instances(X, Y, label_class):
if sp.issparse(Y):
indices = Y[:, label_class].nonzero()[0]
else:
indices = np.nonzero(Y[:, label_class])[0]
return X[indices, :]
def _hstack(X, Y):
if sp.issparse(X):
return sp.hstack([X, Y], format=X.format)
else:
return np.hstack([X, Y]) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/adapt/mltsvm.py | mltsvm.py |
import os
import shlex
import subprocess
import sys
import tempfile
import zipfile
from builtins import filter
from builtins import map
from builtins import range
from builtins import str
import scipy.sparse as sparse
from ..base import MLClassifierBase
from ..dataset import save_to_arff, get_data_home, _download_single_file, _get_md5
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
SUPPORTED_VERSION = '1.9.2'
SUPPORTED_VERSION_MD5 = 'e909044b39513bbad451b8d71098b22c'
def download_meka(version=None):
"""Downloads a given version of the MEKA library and returns its classpath
Parameters
----------
version : str
the MEKA version to download, default falls back to currently supported version 1.9.2
Returns
-------
string
meka class path string for installed version
Raises
------
IOError
if unpacking the meka release file does not provide a proper setup
Exception
if MD5 mismatch happens after a download error
"""
version = version or SUPPORTED_VERSION
meka_release_string = "meka-release-{}".format(version)
file_name = meka_release_string + '-bin.zip'
meka_path = get_data_home(subdirectory='meka')
target_path = os.path.join(meka_path, file_name)
path_to_lib = os.path.join(meka_path, meka_release_string, 'lib')
if os.path.exists(target_path):
print("MEKA {} found, not downloading".format(version))
else:
print("MEKA {} not found, downloading".format(version))
release_url = "http://downloads.sourceforge.net/project/meka/meka-{}/".format(version)
_download_single_file(file_name, target_path, release_url)
found_md5 = _get_md5(target_path)
if SUPPORTED_VERSION_MD5 != found_md5:
raise Exception("MD5 mismatch - possible MEKA download error")
if not os.path.exists(path_to_lib):
with zipfile.ZipFile(target_path, 'r') as meka_zip:
print("Unzipping MEKA {} to {}".format(version, meka_path + os.path.sep))
meka_zip.extractall(path=meka_path + os.path.sep)
if not os.path.exists(os.path.join(path_to_lib, 'meka-{}.jar'.format(version))):
raise IOError("Something went wrong, MEKA files missing, please file a bug report")
return path_to_lib + os.path.sep
class Meka(MLClassifierBase):
"""Wrapper for the MEKA classifier
Allows using MEKA, WEKA and some of MULAN classifiers from scikit-compatible API. For more information on
how to use this class see the tutorial: :doc:`../meka`
Parameters
----------
meka_classifier : str
The MEKA classifier string and parameters from the MEKA API,
such as :code:`meka.classifiers.multilabel.MULAN -S RAkEL2`
weka_classifier : str
The WEKA classifier string and parameters from the WEKA API,
such as :code:`weka.classifiers.trees.J48`
java_command : str
Path to test the java command
meka_classpath: str
Path to the MEKA class path folder, usually the folder lib
in the directory MEKA was extracted into
Attributes
----------
output_ : str
the full text output of MEKA command
References
----------
If you use this wrapper please also cite:
.. code-block :: latex
@article{MEKA,
author = {Read, Jesse and Reutemann, Peter and Pfahringer, Bernhard and Holmes, Geoff},
title = {{MEKA}: A Multi-label/Multi-target Extension to {Weka}},
journal = {Journal of Machine Learning Research},
year = {2016},
volume = {17},
number = {21},
pages = {1--5},
url = {http://jmlr.org/papers/v17/12-164.html},
}
@article{Hall:2009:WDM:1656274.1656278,
author = {Hall, Mark and Frank, Eibe and Holmes, Geoffrey and Pfahringer, Bernhard and Reutemann, Peter and Witten, Ian H.},
title = {The WEKA Data Mining Software: An Update},
journal = {SIGKDD Explor. Newsl.},
issue_date = {June 2009},
volume = {11},
number = {1},
month = nov,
year = {2009},
issn = {1931-0145},
pages = {10--18},
numpages = {9},
url = {http://doi.acm.org/10.1145/1656274.1656278},
doi = {10.1145/1656274.1656278},
acmid = {1656278},
publisher = {ACM},
address = {New York, NY, USA},
}
Examples
--------
Here's an example of performing Label Powerset classification using MEKA with a WEKA Naive Bayes classifier.
.. code-block:: python
from yyskmultilearn.ext import Meka, download_meka
meka = Meka(
meka_classifier = "meka.classifiers.multilabel.LC",
weka_classifier = "weka.classifiers.bayes.NaiveBayes",
meka_classpath = download_meka(),
java_command = '/usr/bin/java')
meka.fit(X_train, y_train)
predictions = meka.predict(X_test)
"""
def __init__(self, meka_classifier=None, weka_classifier=None,
java_command=None, meka_classpath=None):
super(Meka, self).__init__()
self.java_command = java_command
if self.java_command is None:
# TODO: this will not be needed once we're python 3 ready - we will
# use it only in python 2.7 cases
from whichcraft import which
self.java_command = which("java")
if self.java_command is None:
raise ValueError("Java not found")
self.meka_classpath = meka_classpath
if self.meka_classpath is None:
self.meka_classpath = os.environ.get('MEKA_CLASSPATH')
if self.meka_classpath is None:
raise ValueError("No meka classpath defined")
self.meka_classifier = meka_classifier
self.weka_classifier = weka_classifier
self.copyable_attrs = [
'meka_classifier',
'weka_classifier',
'java_command',
'meka_classpath'
]
self.output_ = None
self._verbosity = 5
self._warnings = None
self.require_dense = [False, False]
self._clean()
def _clean(self):
"""Sets various attributes to :code:`None`"""
self._results = None
self._statistics = None
self.output_ = None
self._error = None
self._label_count = None
self._instance_count = None
def _remove_temporary_files(self, temporary_files):
"""Internal function for cleaning temporary files"""
for file_object in temporary_files:
file_name = file_object.name
file_object.close()
if os.path.exists(file_name):
os.remove(file_name)
arff_file_name = file_name + '.arff'
if os.path.exists(arff_file_name):
os.remove(arff_file_name)
def fit(self, X, y):
"""Fits classifier to training data
Internally this method dumps X and y to temporary arff files and
runs MEKA with relevant arguments using :meth:`_run`. It uses a
sparse DOK representation (:class:`scipy.sparse.dok_matrix`)
of the X matrix.
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self._clean()
X = self._ensure_input_format(
X, sparse_format='dok', enforce_sparse=True)
y = self._ensure_output_format(
y, sparse_format='dok', enforce_sparse=True)
self._label_count = y.shape[1]
# we need this in case threshold needs to be recalibrated in meka
self.train_data_ = save_to_arff(X, y)
train_arff = tempfile.NamedTemporaryFile(delete=False)
classifier_dump_file = tempfile.NamedTemporaryFile(delete=False)
try:
with open(train_arff.name + '.arff', 'w') as fp:
fp.write(self.train_data_)
input_args = [
'-verbosity', "0",
'-split-percentage', "100",
'-t', '"{}"'.format(train_arff.name + '.arff'),
'-d', '"{}"'.format(classifier_dump_file.name),
]
self._run_meka_command(input_args)
self.classifier_dump = None
with open(classifier_dump_file.name, 'rb') as fp:
self.classifier_dump = fp.read()
finally:
self._remove_temporary_files([train_arff, classifier_dump_file])
return self
def predict(self, X):
"""Predict label assignments for X
Internally this method dumps X to temporary arff files and
runs MEKA with relevant arguments using :func:`_run`. It uses a
sparse DOK representation (:class:`scipy.sparse.dok_matrix`)
of the X matrix.
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
sparse matrix of integers with shape :code:`(n_samples, n_features)`
"""
X = self._ensure_input_format(
X, sparse_format='dok', enforce_sparse=True)
self._instance_count = X.shape[0]
if self.classifier_dump is None:
raise Exception('Not classified')
sparse_y = sparse.coo_matrix((X.shape[0], self._label_count), dtype=int)
try:
train_arff = tempfile.NamedTemporaryFile(delete=False)
test_arff = tempfile.NamedTemporaryFile(delete=False)
classifier_dump_file = tempfile.NamedTemporaryFile(delete=False)
with open(train_arff.name + '.arff', 'w') as fp:
fp.write(self.train_data_)
with open(classifier_dump_file.name, 'wb') as fp:
fp.write(self.classifier_dump)
with open(test_arff.name + '.arff', 'w') as fp:
fp.write(save_to_arff(X, sparse_y))
args = [
'-l', '"{}"'.format(classifier_dump_file.name)
]
self._run(train_arff.name + '.arff', test_arff.name + '.arff', args)
self._parse_output()
finally:
self._remove_temporary_files(
[train_arff, test_arff, classifier_dump_file]
)
return self._results
def _run(self, train_file, test_file, additional_arguments=[]):
"""Runs the meka classifiers
Parameters
----------
train_file : str
path to train :code:`.arff` file in meka format
(big endian, labels first in attributes list).
test_file : str
path to test :code:`.arff` file in meka format
(big endian, labels first in attributes list).
Returns
-------
predictions: sparse binary indicator matrix [n_test_samples, n_labels]
array of binary label vectors including label predictions of
shape :code:`(n_test_samples, n_labels)`
"""
self.output_ = None
self._warnings = None
# meka_command_string = 'java -cp "/home/niedakh/pwr/old/meka-1.5/lib/*" meka.classifiers.multilabel.MULAN -S RAkEL2
# -threshold 0 -t {train} -T {test} -verbosity {verbosity} -W weka.classifiers.bayes.NaiveBayes'
# meka.classifiers.multilabel.LC, weka.classifiers.bayes.NaiveBayes
args = [
'-t', '"{}"'.format(train_file),
'-T', '"{}"'.format(test_file),
'-verbosity', str(5),
] + additional_arguments
self._run_meka_command(args)
return self
def _parse_output(self):
"""Internal function for parsing MEKA output."""
if self.output_ is None:
self._results = None
self._statistics = None
return None
predictions_split_head = '==== PREDICTIONS'
predictions_split_foot = '|==========='
if self._label_count is None:
self._label_count = map(lambda y: int(y.split(')')[1].strip()), [
x for x in self.output_.split('\n') if 'Number of labels' in x])[0]
if self._instance_count is None:
self._instance_count = int(float(filter(lambda x: '==== PREDICTIONS (N=' in x, self.output_.split(
'\n'))[0].split('(')[1].split('=')[1].split(')')[0]))
predictions = self.output_.split(predictions_split_head)[1].split(
predictions_split_foot)[0].split('\n')[1:-1]
predictions = [y.split(']')[0]
for y in [x.split('] [')[1] for x in predictions]]
predictions = [[a for a in [f.strip() for f in z.split(',')] if len(a) > 0]
for z in predictions]
predictions = [[int(a) for a in z] for z in predictions]
assert self._verbosity == 5
self._results = sparse.lil_matrix(
(self._instance_count, self._label_count), dtype='int')
for row in range(self._instance_count):
for label in predictions[row]:
self._results[row, label] = 1
statistics = [x for x in self.output_.split(
'== Evaluation Info')[1].split('\n') if len(x) > 0 and '==' not in x]
statistics = [y for y in [z.strip() for z in statistics] if ' ' in y]
array_data = [z for z in statistics if '[' in z]
non_array_data = [z for z in statistics if '[' not in z]
self._statistics = {}
for row in non_array_data:
r = row.strip().split(' ')
r = [z for z in r if len(z) > 0]
r = [z.strip() for z in r]
if len(r) < 2:
continue
try:
test_value = float(r[1])
except ValueError:
test_value = r[1]
r[1] = test_value
self._statistics[r[0]] = r[1]
for row in array_data:
r = row.strip().split('[')
r = [z.strip() for z in r]
r[1] = r[1].replace(', ', ' ').replace(
',', '.').replace(']', '').split(' ')
r[1] = [x for x in r[1] if len(x) > 0]
self._statistics[r[0]] = r[1]
def _run_meka_command(self, args):
command_args = [
self.java_command,
'-cp', '"{}*"'.format(self.meka_classpath),
self.meka_classifier,
]
if self.weka_classifier is not None:
command_args += ['-W', self.weka_classifier]
command_args += args
meka_command = " ".join(command_args)
if sys.platform != 'win32':
meka_command = shlex.split(meka_command)
pipes = subprocess.Popen(meka_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
self.output_, self._error = pipes.communicate()
if type(self.output_) == bytes:
self.output_ = self.output_.decode(sys.stdout.encoding)
if type(self._error) == bytes:
self._error = self._error.decode(sys.stdout.encoding)
if pipes.returncode != 0:
raise Exception(self.output_ + self._error) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/ext/meka.py | meka.py |
from ..problem_transform.br import BinaryRelevance
from scipy import sparse
class LabelSpacePartitioningClassifier(BinaryRelevance):
"""Partition label space and classify each subspace separately
This classifier performs classification by:
1. partitioning the label space into separate, smaller multi-label sub problems, using the supplied label
space clusterer
2. training an instance of the supplied base mult-label classifier for each label space subset in the partition
3. predicting the result with each of subclassifiers and returning the sum of their results
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
the base classifier that will be used in a class, will be
automatically put under :code:`self.classifier`.
clusterer : :class:`~yyskmultilearn.cluster.LabelSpaceClustererBase`
object that partitions the output space, will be
automatically put under :code:`self.clusterer`.
require_dense : [bool, bool]
whether the base classifier requires [input, output] matrices
in dense representation, will be automatically
put under :code:`self.require_dense`.
Attributes
----------
model_count_ : int
number of trained models, in this classifier equal to the number of partitions
partition_ : List[List[int]], shape=(`model_count_`,)
list of lists of label indexes, used to index the output space matrix, set in :meth:`_generate_partition`
via :meth:`fit`
classifiers : List[:class:`~sklearn.base.BaseEstimator`], shape=(`model_count_`,)
list of classifiers trained per partition, set in :meth:`fit`
References
----------
If you use this clusterer please cite the clustering paper:
.. code:: latex
@Article{datadriven,
author = {Szymański, Piotr and Kajdanowicz, Tomasz and Kersting, Kristian},
title = {How Is a Data-Driven Approach Better than Random Choice in
Label Space Division for Multi-Label Classification?},
journal = {Entropy},
volume = {18},
year = {2016},
number = {8},
article_number = {282},
url = {http://www.mdpi.com/1099-4300/18/8/282},
issn = {1099-4300},
doi = {10.3390/e18080282}
}
Examples
--------
Here's an example of building a partitioned ensemble of Classifier Chains
.. code :: python
from yyskmultilearn.ensemble import MajorityVotingClassifier
from yyskmultilearn.cluster import FixedLabelSpaceClusterer
from yyskmultilearn.problem_transform import ClassifierChain
from sklearn.naive_bayes import GaussianNB
classifier = MajorityVotingClassifier(
clusterer = FixedLabelSpaceClusterer(clusters = [[1,3,4], [0, 2, 5]]),
classifier = ClassifierChain(classifier=GaussianNB())
)
classifier.fit(X_train,y_train)
predictions = classifier.predict(X_test)
More advanced examples can be found in `the label relations exploration guide <../labelrelations.ipynb>`_
"""
def __init__(self, classifier=None, clusterer=None, require_dense=None):
super(LabelSpacePartitioningClassifier, self).__init__(classifier, require_dense)
self.clusterer = clusterer
self.copyable_attrs = ['clusterer', 'classifier', 'require_dense']
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
X = self._ensure_input_format(
X, sparse_format='csr', enforce_sparse=True)
result = sparse.lil_matrix((X.shape[0], self._label_count), dtype=int)
for model in range(self.model_count_):
predictions = self._ensure_output_format(self.classifiers_[model].predict(
X), sparse_format=None, enforce_sparse=True).nonzero()
for row, column in zip(predictions[0], predictions[1]):
result[row, self.partition_[model][column]] = 1
return result
def _generate_partition(self, X, y):
"""Cluster the label space
Saves the partiton generated by the clusterer to :code:`self.partition_` and
sets :code:`self.model_count_` to number of clusers and :code:`self._label_count`
to number of labels.
Parameters
-----------
X : numpy.ndarray or scipy.sparse
input features of shape :code:`(n_samples, n_features)`, passed to clusterer
y : numpy.ndarray or scipy.sparse
binary indicator matrix with label assigments of shape
:code:`(n_samples, n_labels)`
Returns
-------
LabelSpacePartitioningClassifier
returns an instance of itself
"""
self.partition_ = self.clusterer.fit_predict(X, y)
self.model_count_ = len(self.partition_)
self._label_count = y.shape[1]
return self | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/ensemble/partition.py | partition.py |
import numpy as np
from .partition import LabelSpacePartitioningClassifier
from ..cluster.random import RandomLabelSpaceClusterer
from ..problem_transform import LabelPowerset
from ..base import MLClassifierBase
class RakelD(MLClassifierBase):
"""Distinct RAndom k-labELsets multi-label classifier.
Divides the label space in to equal partitions of size k, trains a Label Powerset
classifier per partition and predicts by summing the result of all trained classifiers.
Parameters
----------
base_classifier : sklearn.base
the base classifier that will be used in a class, will be
automatically put under :code:`self.classifier` for future
access.
base_classifier_require_dense : [bool, bool]
whether the base classifier requires [input, output] matrices
in dense representation, will be automatically
put under :code:`self.require_dense`
labelset_size : int
the desired size of each of the partitions, parameter k according to paper
Default is 3, according to paper it has the best results
Attributes
----------
_label_count : int
the number of labels the classifier is fit to, set by :meth:`fit`
model_count_ : int
the number of sub classifiers trained, set by :meth:`fit`
classifier_: :class:`yyskmultilearn.ensemble.LabelSpacePartitioningClassifier`
the underneath classifier that perform the label space partitioning using a
random clusterer :class:`yyskmultilearn.ensemble.RandomLabelSpaceClusterer`
References
----------
If you use this class please cite the paper introducing the method:
.. code :: latex
@ARTICLE{5567103,
author={G. Tsoumakas and I. Katakis and I. Vlahavas},
journal={IEEE Transactions on Knowledge and Data Engineering},
title={Random k-Labelsets for Multilabel Classification},
year={2011},
volume={23},
number={7},
pages={1079-1089},
doi={10.1109/TKDE.2010.164},
ISSN={1041-4347},
month={July},
}
Examples
--------
Here's a simple example of how to use this class with a base classifier from scikit-learn to teach
non-overlapping classifiers each trained on at most four labels:
.. code :: python
from sklearn.naive_bayes import GaussianNB
from yyskmultilearn.ensemble import RakelD
classifier = RakelD(
base_classifier=GaussianNB(),
base_classifier_require_dense=[True, True],
labelset_size=4
)
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
"""
def __init__(self, base_classifier=None, labelset_size=3, base_classifier_require_dense=None):
super(RakelD, self).__init__()
self.labelset_size = labelset_size
self.base_classifier = base_classifier
self.base_classifier_require_dense = base_classifier_require_dense
self.copyable_attrs = ['base_classifier', 'base_classifier_require_dense', 'labelset_size']
def fit(self, X, y):
"""Fit classifier to multi-label data
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments, shape
:code:`(n_samples, n_labels)`
Returns
-------
fitted instance of self
"""
self._label_count = y.shape[1]
self.model_count_ = int(np.ceil(self._label_count / self.labelset_size))
self.classifier_ = LabelSpacePartitioningClassifier(
classifier=LabelPowerset(
classifier=self.base_classifier,
require_dense=self.base_classifier_require_dense
),
clusterer=RandomLabelSpaceClusterer(
cluster_size=self.labelset_size,
cluster_count=self.model_count_,
allow_overlap=False
),
require_dense=[False, False]
)
return self.classifier_.fit(X, y)
def predict(self, X):
"""Predict label assignments
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
return self.classifier_.predict(X)
def predict_proba(self, X):
"""Predict label probabilities
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of float
binary indicator matrix with probability of label assignment with shape
:code:`(n_samples, n_labels)`
"""
return self.classifier_.predict_proba(X) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/ensemble/rakeld.py | rakeld.py |
import numpy as np
from builtins import range
from builtins import zip
from scipy import sparse
from .partition import LabelSpacePartitioningClassifier
class MajorityVotingClassifier(LabelSpacePartitioningClassifier):
"""Majority Voting ensemble classifier
Divides the label space using provided clusterer class, trains a provided base classifier
type classifier for each subset and assign a label to an instance
if more than half of all classifiers (majority) from clusters that contain the label
assigned the label to the instance.
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
the base classifier that will be used in a class, will be
automatically put under :code:`self.classifier`.
clusterer : :class:`~yyskmultilearn.cluster.LabelSpaceClustererBase`
object that partitions the output space, will be
automatically put under :code:`self.clusterer`.
require_dense : [bool, bool]
whether the base classifier requires [input, output] matrices
in dense representation, will be automatically
put under :code:`self.require_dense`.
Attributes
----------
model_count_ : int
number of trained models, in this classifier equal to the number of partitions
partition_ : List[List[int]], shape=(`model_count_`,)
list of lists of label indexes, used to index the output space matrix, set in :meth:`_generate_partition`
via :meth:`fit`
classifiers : List[:class:`~sklearn.base.BaseEstimator`], shape=(`model_count_`,)
list of classifiers trained per partition, set in :meth:`fit`
Examples
--------
Here's an example of building an overlapping ensemble of chains
.. code :: python
from yyskmultilearn.ensemble import MajorityVotingClassifier
from yyskmultilearn.cluster import FixedLabelSpaceClusterer
from yyskmultilearn.problem_transform import ClassifierChain
from sklearn.naive_bayes import GaussianNB
classifier = MajorityVotingClassifier(
clusterer = FixedLabelSpaceClusterer(clusters = [[1,2,3], [0, 2, 5], [4, 5]]),
classifier = ClassifierChain(classifier=GaussianNB())
)
classifier.fit(X_train,y_train)
predictions = classifier.predict(X_test)
More advanced examples can be found in `the label relations exploration guide <../labelrelations.ipynb>`_
"""
def __init__(self, classifier=None, clusterer=None, require_dense=None):
super(MajorityVotingClassifier, self).__init__(
classifier=classifier, clusterer=clusterer, require_dense=require_dense
)
def predict(self, X):
"""Predict label assignments for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of float
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
predictions = [
self._ensure_input_format(self._ensure_input_format(
c.predict(X)), sparse_format='csc', enforce_sparse=True)
for c in self.classifiers_
]
voters = np.zeros(self._label_count, dtype='int')
votes = sparse.lil_matrix(
(predictions[0].shape[0], self._label_count), dtype='int')
for model in range(self.model_count_):
for label in range(len(self.partition_[model])):
votes[:, self.partition_[model][label]] = votes[
:, self.partition_[model][label]] + predictions[model][:, label]
voters[self.partition_[model][label]] += 1
nonzeros = votes.nonzero()
for row, column in zip(nonzeros[0], nonzeros[1]):
votes[row, column] = np.round(
votes[row, column] / float(voters[column]))
return self._ensure_output_format(votes, enforce_sparse=False)
def predict_proba(self, X):
raise NotImplemented("The voting scheme does not define a method for calculating probabilities") | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/ensemble/voting.py | voting.py |
from .voting import MajorityVotingClassifier
from ..cluster.random import RandomLabelSpaceClusterer
from ..problem_transform import LabelPowerset
from ..base import MLClassifierBase
class RakelO(MLClassifierBase):
"""Overlapping RAndom k-labELsets multi-label classifier
Divides the label space in to m subsets of size k, trains a Label Powerset
classifier for each subset and assign a label to an instance
if more than half of all classifiers (majority) from clusters that contain the label
assigned the label to the instance.
Parameters
----------
base_classifier: :class:`~sklearn.base.BaseEstimator`
scikit-learn compatible base classifier, will be set under `self.classifier.classifier`.
base_classifier_require_dense : [bool, bool]
whether the base classifier requires [input, output] matrices
in dense representation. Will be automatically
set under `self.classifier.require_dense`
labelset_size : int
the desired size of each of the partitions, parameter k according to paper.
According to paper, the best parameter is 3, so it's set as default
Will be automatically set under `self.labelset_size`
model_count : int
the desired number of classifiers, parameter m according to paper.
According to paper, the best value for this parameter is 2M (being M the number of labels)
Will be automatically set under :code:`self.model_count_`.
Attributes
----------
classifier : :class:`~yyskmultilearn.ensemble.MajorityVotingClassifier`
the voting classifier initialized with :class:`~yyskmultilearn.problem_transform.LabelPowerset` multi-label
classifier with `base_classifier` and :class:`~yyskmultilearn.cluster.random.RandomLabelSpaceClusterer`
References
----------
If you use this class please cite the paper introducing the method:
.. code :: latex
@ARTICLE{5567103,
author={G. Tsoumakas and I. Katakis and I. Vlahavas},
journal={IEEE Transactions on Knowledge and Data Engineering},
title={Random k-Labelsets for Multilabel Classification},
year={2011},
volume={23},
number={7},
pages={1079-1089},
doi={10.1109/TKDE.2010.164},
ISSN={1041-4347},
month={July},
}
Examples
--------
Here's a simple example of how to use this class with a base classifier from scikit-learn to teach 6 classifiers
each trained on a quarter of labels, which is sure to overlap:
.. code :: python
from sklearn.naive_bayes import GaussianNB
from yyskmultilearn.ensemble import RakelO
classifier = RakelO(
base_classifier=GaussianNB(),
base_classifier_require_dense=[True, True],
labelset_size=y_train.shape[1] // 4,
model_count_=6
)
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_train, y_train)
"""
def __init__(self, base_classifier=None, model_count=None, labelset_size=3, base_classifier_require_dense=None):
super(RakelO, self).__init__()
self.model_count = model_count
self.labelset_size = labelset_size
self.base_classifier = base_classifier
self.base_classifier_require_dense = base_classifier_require_dense
self.copyable_attrs = ['model_count', 'labelset_size',
'base_classifier_require_dense',
'base_classifier']
def fit(self, X, y):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self.classifier = MajorityVotingClassifier(
classifier=LabelPowerset(
classifier=self.base_classifier,
require_dense=self.base_classifier_require_dense
),
clusterer=RandomLabelSpaceClusterer(
cluster_size=self.labelset_size,
cluster_count=self.model_count,
allow_overlap=True
),
require_dense=[False, False]
)
return self.classifier.fit(X, y)
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
return self.classifier.predict(X)
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
return self.classifier.predict_proba(X) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/ensemble/rakelo.py | rakelo.py |
from sklearn.model_selection._split import _BaseKFold
import numpy as np
import scipy.sparse as sp
import itertools
from sklearn.utils import check_random_state
def iterative_train_test_split(X, y, test_size):
"""Iteratively stratified train/test split
Parameters
----------
test_size : float, [0,1]
the proportion of the dataset to include in the test split, the rest will be put in the train set
Returns
-------
X_train, y_train, X_test, y_test
stratified division into train/test split
"""
stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size])
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X[train_indexes, :], y[train_indexes, :]
X_test, y_test = X[test_indexes, :], y[test_indexes, :]
return X_train, y_train, X_test, y_test
def _fold_tie_break(desired_samples_per_fold, M):
"""Helper function to split a tie between folds with same desirability of a given sample
Parameters
----------
desired_samples_per_fold: np.array[Float], :code:`(n_splits)`
number of samples desired per fold
M : np.array(int)
List of folds between which to break the tie
Returns
-------
fold_number : int
The selected fold index to put samples into
"""
if len(M) == 1:
return M[0]
else:
max_val = max(desired_samples_per_fold[M])
M_prim = np.where(
np.array(desired_samples_per_fold) == max_val)[0]
M_prim = np.array([x for x in M_prim if x in M])
return np.random.choice(M_prim, 1)[0]
def _get_most_desired_combination(samples_with_combination):
"""Select the next most desired combination whose evidence should be split among folds
Parameters
----------
samples_with_combination : Dict[Combination, List[int]], :code:`(n_combinations)`
map from each label combination present in y to list of sample indexes that have this combination assigned
Returns
-------
combination: Combination
the combination to split next
"""
currently_chosen = None
best_number_of_combinations, best_support_size = None, None
for combination, evidence in samples_with_combination.items():
number_of_combinations, support_size = (len(set(combination)), len(evidence))
if support_size == 0:
continue
if currently_chosen is None or (
best_number_of_combinations < number_of_combinations and best_support_size > support_size
):
currently_chosen = combination
best_number_of_combinations, best_support_size = number_of_combinations, support_size
return currently_chosen
class IterativeStratification(_BaseKFold):
"""Iteratively stratify a multi-label data set into folds
Construct an interative stratifier that splits the data set into folds trying to maintain balanced representation
with respect to order-th label combinations.
Attributes
----------
n_splits : number of splits, int
the number of folds to stratify into
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
sample_distribution_per_fold : None or List[float], :code:`(n_splits)`
desired percentage of samples in each of the folds, if None and equal distribution of samples per fold
is assumed i.e. 1/n_splits for each fold. The value is held in :code:`self.percentage_per_fold`.
random_state : int
the random state seed (optional)
"""
def __init__(self, n_splits=3, order=1, sample_distribution_per_fold = None, random_state=None):
self.order = order
super(
IterativeStratification,
self).__init__(n_splits,
shuffle=False,
random_state=random_state)
if sample_distribution_per_fold:
self.percentage_per_fold = sample_distribution_per_fold
else:
self.percentage_per_fold = [1 / float(self.n_splits) for _ in range(self.n_splits)]
def _prepare_stratification(self, y):
"""Prepares variables for performing stratification
For the purpose of clarity, the type Combination denotes List[int], :code:`(self.order)` and represents a
label combination of the order we want to preserve among folds in stratification. The total number of
combinations present in :code:`(y)` will be denoted as :code:`(n_combinations)`.
Sets
----
self.n_samples, self.n_labels : int, int
shape of y
self.desired_samples_per_fold: np.array[Float], :code:`(n_splits)`
number of samples desired per fold
self.desired_samples_per_combination_per_fold: Dict[Combination, np.array[Float]], :code:`(n_combinations, n_splits)`
number of samples evidencing each combination desired per each fold
Parameters
----------
y : output matrix or array of arrays (n_samples, n_labels)
Returns
-------
rows : List[List[int]], :code:`(n_samples, n_labels)`
list of label indices assigned to each sample
rows_used : Dict[int, bool], :code:`(n_samples)`
boolean map from a given sample index to boolean value whether it has been already assigned to a fold or not
all_combinations : List[Combination], :code:`(n_combinations)`
list of all label combinations of order self.order present in y
per_row_combinations : List[Combination], :code:`(n_samples)`
list of all label combinations of order self.order present in y per row
samples_with_combination : Dict[Combination, List[int]], :code:`(n_combinations)`
map from each label combination present in y to list of sample indexes that have this combination assigned
folds: List[List[int]] (n_splits)
list of lists to be populated with samples
"""
self.n_samples, self.n_labels = y.shape
self.desired_samples_per_fold = np.array([self.percentage_per_fold[i] * self.n_samples
for i in range(self.n_splits)])
rows = sp.lil_matrix(y).rows
rows_used = {i: False for i in range(self.n_samples)}
all_combinations = []
per_row_combinations = [[] for i in range(self.n_samples)]
samples_with_combination = {}
folds = [[] for _ in range(self.n_splits)]
# for every row
for sample_index, label_assignment in enumerate(rows):
# for every n-th order label combination
# register combination in maps and lists used later
for combination in itertools.combinations_with_replacement(label_assignment, self.order):
if combination not in samples_with_combination:
samples_with_combination[combination] = []
samples_with_combination[combination].append(sample_index)
all_combinations.append(combination)
per_row_combinations[sample_index].append(combination)
all_combinations = [list(x) for x in set(all_combinations)]
self.desired_samples_per_combination_per_fold = {
combination:
np.array([len(evidence_for_combination) * self.percentage_per_fold[j]
for j in range(self.n_splits)])
for combination, evidence_for_combination in samples_with_combination.items()
}
return rows, rows_used, all_combinations, per_row_combinations, samples_with_combination, folds
def _distribute_positive_evidence(self, rows_used, folds, samples_with_combination, per_row_combinations):
"""Internal method to distribute evidence for labeled samples across folds
For params, see documentation of :code:`self._prepare_stratification`. Does not return anything,
modifies params.
"""
l = _get_most_desired_combination(samples_with_combination)
while l is not None:
while len(samples_with_combination[l]) > 0:
row = samples_with_combination[l].pop()
if rows_used[row]:
continue
max_val = max(self.desired_samples_per_combination_per_fold[l])
M = np.where(
np.array(self.desired_samples_per_combination_per_fold[l]) == max_val)[0]
m = _fold_tie_break(self.desired_samples_per_combination_per_fold[l], M)
folds[m].append(row)
rows_used[row] = True
for i in per_row_combinations[row]:
if row in samples_with_combination[i]:
samples_with_combination[i].remove(row)
self.desired_samples_per_combination_per_fold[i][m] -= 1
self.desired_samples_per_fold[m] -= 1
l = _get_most_desired_combination(samples_with_combination)
def _distribute_negative_evidence(self, rows_used, folds):
"""Internal method to distribute evidence for unlabeled samples across folds
For params, see documentation of :code:`self._prepare_stratification`. Does not return anything,
modifies params.
"""
available_samples = [
i for i, v in rows_used.items() if not v]
samples_left = len(available_samples)
while samples_left > 0:
row = available_samples.pop()
rows_used[row] = True
samples_left -= 1
fold_selected = np.random.choice(np.where(self.desired_samples_per_fold > 0)[0], 1)[0]
self.desired_samples_per_fold[fold_selected] -= 1
folds[fold_selected].append(row)
def _iter_test_indices(self, X, y=None, groups=None):
"""Internal method for providing scikit-learn's split with folds
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
fold : List[int]
indexes of test samples for a given fold, yielded for each of the folds
"""
if self.random_state:
check_random_state(self.random_state)
rows, rows_used, all_combinations, per_row_combinations, samples_with_combination, folds = \
self._prepare_stratification(y)
self._distribute_positive_evidence(rows_used, folds, samples_with_combination, per_row_combinations)
self._distribute_negative_evidence(rows_used, folds)
for fold in folds:
yield fold | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/model_selection/iterative_stratification.py | iterative_stratification.py |
import numpy as np
import itertools as it
def example_distribution(folds, desired_size):
"""Examples Distribution (ED) measure
Examples Distribution is a measure of how much a given fold's size deviates from the desired number
of samples in each of the folds.
Parameters:
-----------
folds : List[List[int]], shape = (n_folds)
list of indexes of samples assigned per fold
desired_size : List[int], shape = (n_folds)
desired number of samples in each fold
Returns
-------
example_distribution_score : float
The example distribution score
"""
n_splits = float(len(folds))
return np.sum(
np.abs(len(fold) - desired_fold_size) for fold, desired_fold_size in zip(folds, desired_size)
) / n_splits
def get_indicator_representation(row):
"""Convert binary indicator to list of assigned labels
Parameters:
-----------
row : List[{0,1}]
binary indicator list whether i-th label is assigned or not
Returns
-------
np.array[int]
list of assigned labels
"""
return np.where(row != 0)[0]
def get_combination_wise_output_matrix(y, order):
"""Returns label combinations of a given order that are assigned to each row
Parameters:
-----------
y : output matrix or array of arrays (n_samples, n_labels)
the binary-indicator label assignment per sample representation of the output space
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
Returns
-------
combinations_per_row : List[Set[Tuple[int]]]
list of combination assignments per row
"""
return np.array([set(tuple(combination) for combination in
it.combinations_with_replacement(get_indicator_representation(row), order)) for row in y])
def get_unique_combinations(combinations_per_row):
"""Performs set.union on a list of sets
Parameters
----------
combinations_per_row : List[Set[Tuple[int]]]
list of combination assignments per row
Returns
-------
Set[Tuple[int]]
all unique label combinations
"""
return set.union(*combinations_per_row)
def folds_without_evidence_for_at_least_one_label_combination(y, folds, order=1):
"""Counts the number of folds without evidence for a given Label, Label Pair or Label Combination (FZ, FZLP, FZLC) measure
A general implementation of FZ - the number of folds that contain at least one label combination of order
:code:`order` with no positive examples. With :code:`order` = 1, it becomes the FZ measure from Katakis et.al's
original paper.
Parameters:
-----------
y : output matrix or array of arrays (n_samples, n_labels)
the binary-indicator label assignment per sample representation of the output space
folds : List[List[int]], shape = (n_folds)
list of indexes of samples assigned per fold
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
Returns
-------
score : float
the number of folds with missing evidence for at least one label combination
"""
combinations_per_row = get_combination_wise_output_matrix(y, order)
all_combinations = get_unique_combinations(combinations_per_row)
return np.sum([get_unique_combinations(combinations_per_row[[fold]]) != all_combinations for fold in folds])
def folds_label_combination_pairs_without_evidence(y, folds, order):
"""Fold - Label / Label Pair / Label Combination (FLZ, FLPZ, FLCZ) pair count measure
A general implementation of FLZ - the number of pairs of fold and label combination of a given order for which
there is no positive evidence in that fold for that combination. With :code:`order` = 1, it becomes the FLZ
measure from Katakis et.al's original paper, with :code:`order` = 2, it becomes the FLPZ measure from
Szymański et. al.'s paper.
Parameters:
-----------
y : output matrix or array of arrays (n_samples, n_labels)
the binary-indicator label assignment per sample representation of the output space
folds : List[List[int]], shape = (n_folds)
list of indexes of samples assigned per fold
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
Returns
-------
score : float
the number of fold-label combination pairs with missing evidence
"""
combinations_per_row = get_combination_wise_output_matrix(y, order)
all_combinations = get_unique_combinations(combinations_per_row)
return np.sum(
[len(all_combinations.difference(get_unique_combinations(combinations_per_row[[fold]]))) for fold in folds])
def percentage_of_label_combinations_without_evidence_per_fold(y, folds, order):
"""Fold - Label / Label Pair / Label Combination (FLZ, FLPZ, FLCZ) pair count measure
A general implementation of FLZ - the number of pairs of fold and label combination of a given order for which
there is no positive evidence in that fold for that combination. With :code:`order` = 1, it becomes the FLZ
measure from Katakis et.al's original paper, with :code:`order` = 2, it becomes the FLPZ measure from
Szymański et. al.'s paper.
Parameters:
-----------
y : output matrix or array of arrays (n_samples, n_labels)
the binary-indicator label assignment per sample representation of the output space
folds : List[List[int]], shape = (n_folds)
list of indexes of samples assigned per fold
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
Returns
-------
score : float
the number of fold-label combination pairs with missing evidence
"""
combinations_per_row = get_combination_wise_output_matrix(y, order)
all_combinations = get_unique_combinations(combinations_per_row)
number_of_combinations = float(len(all_combinations))
return [
1.0 - len(get_unique_combinations(combinations_per_row[[fold]])) / number_of_combinations for fold in folds
]
def label_combination_distribution(y, folds, order):
"""Label / Label Pair / Label Combination Distribution (LD, LPD, LCZD) measure
A general implementation of Label / Label Pair / Label Combination Distribution - a measure that evaluates
how the proportion of positive evidence for a label / label pair / label combination to the negative evidence
for a label (pair/combination) deviates from the same proportion in the entire data set, averaged over all folds and labels.
With :code:`order` = 1, it becomes the LD measure from Katakis et.al's original paper, with :code:`order` = 2, it
becomes the LPD measure from Szymański et. al.'s paper.
Parameters:
-----------
y : output matrix or array of arrays (n_samples, n_labels)
the binary-indicator label assignment per sample representation of the output space
folds : List[List[int]], shape = (n_folds)
list of indexes of samples assigned per fold
order : int, >= 1
the order of label relationship to take into account when balancing sample distribution across labels
Returns
-------
score : float
the label / label pair / label combination distribution score
"""
def _get_proportion(x, y):
return y / float(x - y)
combinations_per_row = get_combination_wise_output_matrix(y, order)
all_combinations = get_unique_combinations(combinations_per_row)
number_of_samples = y.shape[0]
number_of_combinations = float(len(all_combinations))
number_of_folds = float(len(folds))
external_sum = 0
for combination in all_combinations:
number_of_samples_with_combination = np.sum([
1 for combinations_in_row in combinations_per_row if combination in combinations_in_row
])
d = _get_proportion(number_of_samples, number_of_samples_with_combination)
internal_sum = 0
for fold in folds:
S_i_j = np.sum(
[1 for combinations_in_row in combinations_per_row[fold] if combination in combinations_in_row])
fold_size = len(fold)
s = _get_proportion(fold_size, S_i_j)
internal_sum += np.abs(s - d)
internal_sum /= number_of_folds
external_sum += internal_sum
return external_sum / number_of_combinations | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/model_selection/measures.py | measures.py |
from yyskmultilearn.base import ProblemTransformationBase
import numpy as np
import scipy.sparse as sp
from copy import copy
class EmbeddingClassifier(ProblemTransformationBase):
"""Embedding-based classifier
Implements a general scheme presented in LNEMLC: label network embeddings for multi-label classification. The
classifier embeds the label space with the embedder, trains a set of single-variate or a multi-variate regressor
for embedding unseen cases and a base classifier to predict labels based on input features and the embeddings.
Parameters
----------
embedder : :class:`~sklearn.base.BaseEstimator`
the class to embed the label space
regressor : :class:`~sklearn.base.BaseEstimator`
the base regressor to predict embeddings from input features
classifier : :class:`~sklearn.base.BaseEstimator`
the base classifier to predict labels from input features and embeddings
regressor_per_dimension : bool
whether to train one joint multi-variate regressor (False) or per dimension single-variate regressor (True)
require_dense : [bool, bool], optional
whether the base classifier requires dense representations for input features and classes/labels
matrices in fit/predict.
Attributes
----------
n_regressors_ : int
number of trained regressors
partition_ : List[List[int]], shape=(`model_count_`,)
list of lists of label indexes, used to index the output space matrix, set in :meth:`_generate_partition`
via :meth:`fit`
classifiers_ : List[:class:`~sklearn.base.BaseEstimator`] of shape `model_count`
list of classifiers trained per partition, set in :meth:`fit`
If you use this classifier please cite the relevant embedding method paper
and the label network embedding for multi-label classification paper:
.. code :: bibtex
@article{zhang2007ml,
title={ML-KNN: A lazy learning approach to multi-label learning},
author={Zhang, Min-Ling and Zhou, Zhi-Hua},
journal={Pattern recognition},
volume={40},
number={7},
pages={2038--2048},
year={2007},
publisher={Elsevier}
}
Example
-------
An example use case for EmbeddingClassifier:
.. code-block:: python
from yyskmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
def __init__(self, embedder, regressor, classifier, regressor_per_dimension=False, require_dense=None):
super(EmbeddingClassifier, self).__init__()
self.embedder = embedder
self.regressor = regressor
self.classifier = classifier
self.regressor_per_dimension = regressor_per_dimension
if require_dense is None:
require_dense = [True, True]
self.require_dense = require_dense
self.copyable_attrs = ['embedder', 'regressor', 'classifier', 'regressor_per_dimension', 'require_dense']
def fit(self, X, y):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
X = self._ensure_input_format(X)
y = self._ensure_input_format(y)
y_embedded = self.embedder.fit_transform(X, y)[1]
X_y_embedded = self._concatenate_matrices(X, y_embedded)
if self.regressor_per_dimension:
self.n_regressors_ = y_embedded.shape[1]
self.regressors_ = [None for _ in range(self.n_regressors_)]
for i in range(self.n_regressors_):
self.regressors_[i] = copy(self.regressor)
self.regressors_[i].fit(X, y_embedded[:, i])
else:
self.n_regressors_ = 1
self.regressor.fit(X, y_embedded)
self.classifier.fit(X_y_embedded, y)
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
X = self._ensure_input_format(X)
X_y_embedded = self._predict_embedding(X)
return self.classifier.predict(X_y_embedded)
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
X_y_embedded = self._predict_embedding(X)
return self.classifier.predict_proba(X_y_embedded)
def _concatenate_matrices(self, X, y_embedded):
X = self._ensure_input_format(X)
y_embedded = self._ensure_input_format(y_embedded)
if sp.issparse(X):
X_y_embedded = sp.hstack([X, y_embedded])
else:
X_y_embedded = np.hstack([X, y_embedded])
return X_y_embedded
def _predict_embedding(self, X):
if self.regressor_per_dimension:
y_embedded = [self.regressors_[i].predict(X) for i in range(self.n_regressors_)]
if sp.issparse(X):
y_embedded=sp.csr_matrix(y_embedded).T
else:
y_embedded=np.matrix(y_embedded).T
else:
y_embedded = self.regressor.predict(X)
return self._concatenate_matrices(X, y_embedded) | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/embedding/classifier.py | classifier.py |
from sklearn.neighbors import NearestNeighbors
from sklearn.base import BaseEstimator
from copy import copy
from ._mdsw import _MDSW
import numpy as np
import scipy.sparse as sp
# inspired by implementation by Kuan-Hao Huang
# https://github.com/ej0cl6/csmlc
class CLEMS(BaseEstimator):
"""Embed the label space using a label network embedder from OpenNE
Parameters
----------
measure: Callable
a cost function executed on two label vectors
dimension: int
the dimension of the label embedding vectors
is_score: boolean
set to True if measures is a score function (higher value is better), False if loss function (lower is better)
param_dict: dict or None
parameters passed to the embedder, don't use the dimension and graph parameters, this class will set them at fit
Example code for using this embedder looks like this:
.. code-block:: python
from yyskmultilearn.embedding import CLEMS, EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
from sklearn.metrics import accuracy_score
clf = EmbeddingClassifier(
CLEMS(accuracy_score, True),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
def __init__(self, measure, is_score=False, params=None):
self.measure = measure
if is_score:
self.measure = lambda x, y: 1 - measure(x, y)
if params is None:
params = {}
self.params = params
def fit(self, X, y):
"""Fits the embedder to data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
# get unique label combinations
self.fit_transform(X, y)
def fit_transform(self, X, y):
"""Fit the embedder and transform the output space
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
X, y_embedded
results of the embedding, input and output space
"""
if sp.issparse(y):
idx = np.unique(y.tolil().rows, return_index=True)[1]
else:
idx = np.unique(y, axis=0, return_index=True)[1]
y_unique = y[idx]
n_unique = y_unique.shape[0]
self.knn_ = NearestNeighbors(n_neighbors=1)
self.knn_.fit(y_unique)
nearest_points = self.knn_.kneighbors(y)[1][:, 0]
nearest_points_counts = np.unique(nearest_points, return_counts=True)[1]
# calculate delta matrix
delta = np.zeros((2 * n_unique, 2 * n_unique))
for i in range(n_unique):
for j in range(n_unique):
delta[i, n_unique + j] = np.sqrt(self.measure(y_unique[None, i], y_unique[None, j]))
delta[n_unique + j, i] = delta[i, n_unique + j]
# calculate MDS embedding
params = copy(self.params)
params['n_components'] = y.shape[1]
params['n_uq'] = n_unique
params['uq_weight'] = nearest_points_counts
params['dissimilarity'] = "precomputed"
self.embedder_ = _MDSW(**params)
y_unique_embedded = self.embedder_.fit(delta).embedding_
y_unique_limited_to_before_trick = y_unique_embedded[n_unique:]
knn_to_extend_embeddings_to_other_combinations = NearestNeighbors(n_neighbors=1)
knn_to_extend_embeddings_to_other_combinations.fit(y_unique_limited_to_before_trick)
neighboring_embeddings_indices = knn_to_extend_embeddings_to_other_combinations.kneighbors(y)[1][:, 0]
return X, y_unique_embedded[neighboring_embeddings_indices] | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/embedding/clems.py | clems.py |
from copy import copy
from openne.gf import GraphFactorization
from openne.graph import Graph
from openne.grarep import GraRep
from openne.hope import HOPE
from openne.lap import LaplacianEigenmaps
from openne.line import LINE
from openne.lle import LLE
import networkx as nx
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
class OpenNetworkEmbedder:
"""Embed the label space using a label network embedder from OpenNE
Implements an OpenNE based LNEMLC: label network embeddings for multi-label classification.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
embedding : string, one of {'GraphFactorization', 'GraRep', 'HOPE', 'LaplacianEigenmaps', 'LINE', 'LLE'}
the selected OpenNE_ embedding
+----------------------+--------------------------------------------------------------------------------+
| Method name string | Description |
+----------------------+--------------------------------------------------------------------------------+
| GraphFactorization_ | Graph factorization embeddings |
+----------------------+--------------------------------------------------------------------------------+
| GraRep_ | Graph representations with global structural information |
+----------------------+--------------------------------------------------------------------------------+
| HOPE_ | High-order Proximity Preserved Embedding |
+----------------------+--------------------------------------------------------------------------------+
| LaplacianEigenmaps_ | Detecting communities from multiple async label propagation on the graph |
+----------------------+--------------------------------------------------------------------------------+
| LINE_ | Large-scale information network embedding |
+----------------------+--------------------------------------------------------------------------------+
| LLE_ | Locally Linear Embedding |
+----------------------+--------------------------------------------------------------------------------+
.. _OpenNE: https://github.com/thunlp/OpenNE/
.. _GraphFactorization: https://github.com/thunlp/OpenNE/blob/master/src/openne/gf.py
.. _GraRep: https://github.com/thunlp/OpenNE/blob/master/src/openne/grarep.py
.. _HOPE: https://github.com/thunlp/OpenNE/blob/master/src/openne/hope.py
.. _LaplacianEigenmaps: https://github.com/thunlp/OpenNE/blob/master/src/openne/lap.py
.. _LINE: https://github.com/thunlp/OpenNE/blob/master/src/openne/line.py
.. _LLE: https://github.com/thunlp/OpenNE/blob/master/src/openne/lle.py
dimension: int
the dimension of the label embedding vectors
aggregation_function: 'add', 'multiply', 'average' or Callable
the function used to aggregate label vectors for all labels assigned to each of the samples
normalize_weights: boolean
whether to normalize weights in the label graph by the number of samples or not
param_dict
parameters passed to the embedder, don't use the dimension and graph parameters, this class will set them at fit
If you use this classifier please cite the relevant embedding method paper
and the label network embedding for multi-label classification paper:
.. code :: bibtex
@article{zhang2007ml,
title={ML-KNN: A lazy learning approach to multi-label learning},
author={Zhang, Min-Ling and Zhou, Zhi-Hua},
journal={Pattern recognition},
volume={40},
number={7},
pages={2038--2048},
year={2007},
publisher={Elsevier}
}
Example code for using this embedder looks like this:
.. code-block:: python
from yyskmultilearn.embedding import OpenNetworkEmbedder, EmbeddingClassifier
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
from yyskmultilearn.cluster import LabelCooccurrenceGraphBuilder
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True, include_self_edges=False)
openne_line_params = dict(batch_size=1000, negative_ratio=5)
clf = EmbeddingClassifier(
OpenNetworkEmbedder(graph_builder, 'LINE', 4, 'add', True, openne_line_params),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
_EMBEDDINGS = {
'GraphFactorization': (GraphFactorization, 'rep_size'),
'GraRep': (GraRep, 'dim'),
'HOPE': (HOPE, 'd'),
'LaplacianEigenmaps': (LaplacianEigenmaps, 'rep_size'),
'LINE': (LINE, 'rep_size'),
'LLE': (LLE, 'd'),
}
_AGGREGATION_FUNCTIONS = {
'add': np.add.reduce,
'multiply': np.multiply.reduce,
'average': lambda x: np.average(x, axis=0),
}
def __init__(self, graph_builder, embedding, dimension, aggregation_function, normalize_weights, param_dict=None):
if embedding not in self._EMBEDDINGS:
raise ValueError('Embedding must be one of {}'.format(', '.join(self._EMBEDDINGS.keys())))
if aggregation_function in self._AGGREGATION_FUNCTIONS:
self.aggregation_function = self._AGGREGATION_FUNCTIONS[aggregation_function]
elif callable(aggregation_function):
self.aggregation_function = aggregation_function
else:
raise ValueError('Aggregation function must be callable or one of {}'.format(
', '.join(self._AGGREGATION_FUNCTIONS.keys()))
)
self.embedding = embedding
self.param_dict = param_dict if param_dict is not None else {}
self.dimension = dimension
self.graph_builder = graph_builder
self.normalize_weights = normalize_weights
def fit(self, X, y):
self.fit_transform(X, y)
def fit_transform(self, X, y):
tf.reset_default_graph()
self._init_openne_graph(y)
embedding_class, dimension_key = self._EMBEDDINGS[self.embedding]
param_dict = copy(self.param_dict)
param_dict['graph'] = self.graph_
param_dict[dimension_key] = self.dimension
self.embeddings_ = embedding_class(**param_dict)
return X, self._embedd_y(y)
def _init_openne_graph(self, y):
self.graph_ = Graph()
self.graph_.G = nx.DiGraph()
for (src, dst), w in self.graph_builder.transform(y).items():
self.graph_.G.add_edge(src, dst)
self.graph_.G.add_edge(dst, src)
if self.normalize_weights:
w = float(w) / y.shape[0]
self.graph_.G[src][dst]['weight'] = w
self.graph_.G[dst][src]['weight'] = w
self.graph_.encode_node()
def _embedd_y(self, y):
empty_vector = np.zeros(shape=self.dimension)
if sp.issparse(y):
return np.array([
self.aggregation_function([self.embeddings_.vectors[node] for node in row])
if len(row) > 0 else empty_vector
for row in _iterate_over_sparse_matrix(y)
]).astype('float64')
return np.array([
self.aggregation_function([self.embeddings_.vectors[node] for node, v in enumerate(row) if v > 0])
if len(row) > 0 else empty_vector
for row in (y.A if isinstance(y, np.matrix) else y)
]).astype('float64')
def _iterate_over_sparse_matrix(y):
for r in range(y.shape[0]):
yield y[r,:].indices | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/embedding/openne.py | openne.py |
from __future__ import absolute_import
from sklearn.base import BaseEstimator
class SKLearnEmbedder(BaseEstimator):
"""Embed the label space using a scikit-compatible matrix-based embedder
Parameters
----------
embedder : sklearn.base.BaseEstimator
a clonable instance of a scikit-compatible embedder, will be automatically
put under :code:`self.embedder`, see .
pass_input_space : bool (default is False)
whether to take :code:`X` into consideration upon clustering,
use only if you know that the embedder can handle two
parameters for clustering, will be automatically
put under :code:`self.pass_input_space`.
Example code for using this embedder looks like this:
.. code-block:: python
from yyskmultilearn.embedding import SKLearnEmbedder, EmbeddingClassifier
from sklearn.manifold import SpectralEmbedding
from sklearn.ensemble import RandomForestRegressor
from yyskmultilearn.adapt import MLkNN
clf = EmbeddingClassifier(
SKLearnEmbedder(SpectralEmbedding(n_components = 10)),
RandomForestRegressor(n_estimators=10),
MLkNN(k=5)
)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
"""
def __init__(self, embedder=None, pass_input_space=False):
super(BaseEstimator, self).__init__()
self.embedder = embedder
self.pass_input_space = pass_input_space
def fit(self, X, y):
"""Fits the embedder to data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self.embedder.fit(X, y)
def fit_transform(self, X, y):
"""Fit the embedder and transform the output space
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
X, y_embedded
results of the embedding, input and output space
"""
if self.pass_input_space:
result = self.embedder.fit_transform(X, y)
else:
result = self.embedder.fit_transform(y)
return X, result | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/embedding/skembeddings.py | skembeddings.py |
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances
from sklearn.utils import check_random_state, check_array, check_symmetric
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.isotonic import IsotonicRegression
def _smacof_single_w(similarities, n_uq, uq_weight, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run.
"""
similarities = check_symmetric(similarities, raise_exception=True)
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
W = np.ones((n_samples, n_samples))
W[:n_uq, :n_uq] = 0.0
W[n_uq:, n_uq:] = 0.0
# W[np.arange(len(W)), np.arange(len(W))] = 0.0
if uq_weight is not None:
W[:n_uq, n_uq:] *= uq_weight.reshape((uq_weight.shape[0], -1))
W[n_uq:, :n_uq] *= uq_weight.reshape((-1, uq_weight.shape[0]))
V = -W
V[np.arange(len(V)), np.arange(len(V))] = W.sum(axis=1)
e = np.ones((n_samples, 1))
Vp = np.linalg.inv(V + np.dot(e, e.T) / n_samples) - np.dot(e, e.T) / n_samples
# Vp = np.linalg.pinv(V)
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
# stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
_stress = (W.ravel() * ((dis.ravel() - disparities.ravel()) ** 2)).sum() / 2
# Update X using the Guttman transform
# dis[dis == 0] = 1e-5
# ratio = disparities / dis
# B = - ratio
# B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
# X = 1. / n_samples * np.dot(B, X)
# print (1. / n_samples * np.dot(B, X))[:5].T
dis[dis == 0] = 1e-5
ratio = disparities / dis
_B = - W * ratio
_B[np.arange(len(_B)), np.arange(len(_B))] += (W * ratio).sum(axis=1)
X = np.dot(Vp, np.dot(_B, X))
# print X[:5].T
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if (old_stress - _stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = _stress / dis
return X, _stress, it + 1
def _smacof_w(similarities, n_uq, uq_weight, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""
Computes multidimensional scaling using SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes
a objective function, the *stress*, using a majorization technique. The
Stress Majorization, also known as the Guttman Transform, guarantees a
monotone convergence of Stress, and is more powerful than traditional
techniques such as gradient descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression steps before computing
the stress.
Parameters
----------
similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
init : {None or ndarray of shape (n_samples, n_components)}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
n_init : int, optional, default: 8
Number of time the smacof_p algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
The number of iterations corresponding to the best stress.
Returned only if `return_n_iter` is set to True.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
similarities = check_array(similarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single_w(
similarities, n_uq, uq_weight, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single_w)(
similarities, n_uq, uq_weight, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class _MDSW(BaseEstimator):
"""Multidimensional scaling
Parameters
----------
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
n_init : int, optional, default: 4
Number of time the smacof_p algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
dissimilarity : string
Which dissimilarity measure to use.
Supported are 'euclidean' and 'precomputed'.
Attributes
----------
embedding_ : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, n_uq=1, uq_weight=None, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.n_uq = n_uq
self.uq_weight = uq_weight
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
X = check_array(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity=precomputed``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = _smacof_w(
self.dissimilarity_matrix_, self.n_uq, self.uq_weight, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_ | yy-scikit-multilearn | /yy_scikit_multilearn-0.2.2-py3-none-any.whl/yyskmultilearn/embedding/_mdsw.py | _mdsw.py |
import time
import traceback
# 本地库
from yy_spider.message_bus import common
from pymongo import UpdateOne, ReplaceOne, InsertOne, UpdateMany
from pymongo.errors import BulkWriteError
from queue import Queue
from twisted.internet import defer
class DbHandler(object):
def __init__(self, logger, mdb, conf, write_queues, task_queues, count_queue):
self.logger = logger
self.mdb = mdb
self._conf = conf
self.write_queues = write_queues
self.task_queues = task_queues
self.count_queue = count_queue
def init_write_queue(self, coll_name):
if not self.write_queues.get(coll_name, None):
self.write_queues[coll_name] = Queue(maxsize=self._conf.WRITE_QUEUE_SIZE)
def cleanup_handle_queue(self):
self.logger.debug("clear ... cleanup begin")
try:
self._handle_write_queue()
self._handle_count_queue()
except BulkWriteError as bwe:
self.logger.error(bwe.details)
werrors = bwe.details['writeErrors']
self.logger.error(werrors)
except Exception as e:
self.logger.error(str(e))
traceback.print_exc()
self.logger.debug("clear ... cleanup end")
def _handle_write_queue(self):
for coll_name, _queue in self.write_queues.items():
t0 = time.time()
requests = []
qsize = _queue.qsize()
while _queue.qsize() > 0:
try:
req = _queue.get_nowait()
_queue.task_done()
except Exception as e:
self.logger.error(str(e))
break
requests.append(req)
if len(requests) > 0:
self.mdb[coll_name].bulk_write(requests, ordered=False)
t_diff = time.time() - t0
info = "handle_write_queue,coll:{},size:{},t_diff:{}".format(coll_name, qsize, t_diff)
self.logger.info(info)
def _handle_count_queue(self):
if self.count_queue.qsize() > 0:
t0 = time.time()
requests = []
qsize = self.count_queue.qsize()
while self.count_queue.qsize() > 0:
try:
tmp = self.count_queue.get_nowait()
self.count_queue.task_done()
except Exception as e:
self.logger.error(str(e))
break
requests.append(tmp)
if len(requests) > 0:
self.mdb[self._conf.STATS_COLL].bulk_write(requests, ordered=False)
t_diff = time.time() - t0
info = "handle_count_queue,size:{},t_diff:{}".format(qsize, t_diff)
self.logger.info(info)
@defer.inlineCallbacks
def put_task_to_db(self, coll_name, data):
"""新加任务"""
t0 = time.time()
self.init_write_queue(coll_name)
# 获取已经存在的task有哪些?
res = yield self.mdb[coll_name].find({"_id": {"$in": list(set([t['_id'] for t in data]))}}, {'_id': 1})
exists = [r['_id'] for r in res]
self.save_stats_data(coll_name, common.NEW_TASK, len(data) - len(exists))
# 更新数据
for t in data:
if t[b"_id"] not in exists:
self.write_queues[coll_name].put(InsertOne(t))
t_diff = time.time() - t0
info = "{}, {}".format(coll_name, t_diff)
self.logger.debug(info)
defer.returnValue([])
@defer.inlineCallbacks
def get_task_from_db(self, coll_name, count, cond={}):
"""获取任务"""
t0 = time.time()
cond['status'] = common.NOT_CRAWL
requests, ts = [], []
tasks = yield self.mdb[coll_name].find(cond, limit=count)
for task in tasks:
requests.append(
UpdateMany({'_id': task[b"_id"]}, {"$set": {"status": common.CRAWLING, "last_crawl_time": 0}})
)
task.pop('_id')
ts.append(task)
if len(requests) > 0:
yield self.mdb[coll_name].bulk_write(requests, ordered=False)
t_diff = time.time() - t0
info = "total, {}, return : {}, use time : {}".format(coll_name, len(ts), t_diff)
self.logger.debug(info)
return ts
def change_task_status(self, coll_name, data):
"""更新任务状态"""
t0 = time.time()
self.init_write_queue(coll_name)
# 统计,记录成功的任务数
success = [t['_id'] for t in data if t['status'] == common.CRAWL_SUCCESS]
self.save_stats_data(coll_name, common.ONE_TASK, len(success))
# 更新数据
for t in data:
self.write_queues[coll_name].put(
UpdateMany({'_id': t['_id']}, {"$set": {'status': t['status']}})
)
t_diff = time.time() - t0
info = "{}, {}".format(coll_name, t_diff)
self.logger.debug(info)
def put_data_to_db(self, coll_name, data):
"""新增数据,如果已经存在则替换旧的数据"""
t0 = time.time()
self.init_write_queue(coll_name)
# 统计,记录抓取的数据条数
self.save_stats_data(coll_name, common.ONE_DATA, len(data))
for t in data:
self.write_queues[coll_name].put(ReplaceOne({'_id': t['_id']}, t, upsert=True))
t_diff = time.time() - t0
info = "{}, {}".format(coll_name, t_diff)
self.logger.debug(info)
def save_stats_data(self, coll_name, _type, count):
"""存储统计数据"""
date = time.strftime("%Y-%m-%d", time.localtime())
# 单个collection
u1 = UpdateOne({'date': date, 'coll_name': coll_name, "_type": _type},
{'$inc': {'total': count}}, upsert=True)
# 总体
u2 = UpdateOne({'date': date, 'coll_name': "all", "_type": _type}, {'$inc': {'total': count}},
upsert=True)
self.count_queue.put(u1)
self.count_queue.put(u2) | yy-spider | /yy-spider-1.0.0.tar.gz/yy-spider-1.0.0/yy_spider/database/db_handler.py | db_handler.py |
import struct
import threading
import time
import msgpack
from yy_spider import utils
from yy_spider.database import get_mongo_db
from yy_spider.database.db_handler import DbHandler
from yy_spider.message_bus import common
from queue import Queue
from twisted.internet.protocol import Factory
from twisted.internet.protocol import Protocol
class ServerProtocal(Protocol):
def __init__(self, users, logger, db, conf):
self.users = users
self.logger = logger
self._conf = conf
self.db = db
self.name = None
self.state = "FIRST"
self.buffer = b''
self.data_length = 0
def connectionMade(self):
info = "connection from", self.transport.getPeer()
self.logger.debug(info)
def connectionLost(self, reason):
info = "Lost connection from", self.transport.getPeer(), reason.getErrorMessage()
self.logger.warning(info)
if self.name in self.users:
del self.users[self.name]
def dataReceived(self, data):
if self.state == "FIRST":
self.handle_first(data)
else:
self.handle_data(data)
# 验证密码
def handle_first(self, data):
data = data.decode('utf8')
tmp = data.split('@@@***')
if len(tmp) < 2:
self.transport.abortConnection()
else:
name = tmp[0]
pwd = tmp[1]
if utils.md5(name + self._conf.SOCKET_KEY) == pwd:
self.name = name
self.users[name] = self
self.state = "DATA"
self.transport.write(b"OK!!")
else:
self.transport.abortConnection()
def handle_data(self, data):
self.buffer += data
while True:
if self.data_length <= 0:
if len(self.buffer) >= 4:
self.data_length = struct.unpack('>I', self.buffer[:4])[0]
if self.data_length > 1024 * 1024:
utils.send_email("data length:%s" % self.data_length)
self.transport.abortConnection()
self.buffer = self.buffer[4:]
else:
return
if len(self.buffer) >= self.data_length:
tmp_data = self.buffer[:self.data_length]
self.buffer = self.buffer[self.data_length:]
self.data_length = 0
self.process_data(tmp_data)
return
else:
return
def process_data(self, data):
rj = msgpack.unpackb(data, encoding='utf-8')
if rj['type'] == common.REQUEST_MESSAGE:
coll_name = rj["coll_name"]
action = rj["action"]
data = rj["data"]
self.handle_request(coll_name, action, data)
elif rj['type'] == common.ECHO_MESSAGE:
pass
else:
info = "not support message:%s" % rj['type']
self.logger.warning(info)
self.transport.abortConnection()
def send_msg(self, msg):
msg = struct.pack('>I', len(msg)) + msg
self.my_send(msg)
def my_send(self, msg):
total_sent = 0
msg_len = len(msg)
while total_sent < msg_len:
if len(msg) > 4:
self.transport.write(msg[:4])
msg = msg[4:]
else:
self.transport.write(msg)
total_sent = total_sent + 4
def handle_request(self, coll_name, action, data):
db = self.db
if action == common.PUT_TASK:
d = db.put_task_to_db(coll_name, data)
d.addCallback(self.handle_success)
d.addErrback(self.handle_failure)
elif action == common.GET_TASK:
d = db.get_task_from_db(coll_name, data['count'], data.get('cond', {}))
d.addCallback(self.handle_success)
d.addErrback(self.handle_failure)
elif action == common.PUT_DATA:
db.put_data_to_db(coll_name, data)
self.handle_success([])
elif action == common.CHANGE_TASK_STATUS:
db.change_task_status(coll_name, data)
self.handle_success([])
def handle_success(self, res):
res = {
'type': common.RESPONSE_MESSAGE,
'status': common.OK,
'data': res,
}
_res = msgpack.packb(res)
self.send_msg(_res)
def handle_failure(self, err):
res = {
'type': common.RESPONSE_MESSAGE,
'status': common.FAIL,
'data': [],
}
_res = msgpack.packb(res)
self.send_msg(_res)
self.logger.error(err)
class ServerFactory(Factory):
def __init__(self, conf, logger):
self.users = {}
write_queues = {}
task_queues = {}
count_queue = Queue(maxsize=conf.COUNT_QUEUE_SIZE)
mdb = get_mongo_db(conf.MONGO_USER, conf.MONGO_PASSWORD, conf.MONGO_HOST, conf.MONGO_PORT, conf.MONGO_DB)
self._logger = logger
self._conf = conf
self.db = DbHandler(logger, mdb, conf, write_queues, task_queues, count_queue)
ts = []
# 开一个线程定时清理
t1 = threading.Thread(target=self.sched_cleanup, args=())
ts.append(t1)
for t in ts:
t.setDaemon(True)
t.start()
logger.info("__init__ finish")
def buildProtocol(self, addr):
return ServerProtocal(self.users, self._logger, self.db, self._conf)
def sched_cleanup(self):
"""定时清理"""
while True:
time.sleep(self._conf.CLEANUP_INTERVAL)
self.cleanup()
def cleanup(self):
self.db.cleanup_handle_queue() | yy-spider | /yy-spider-1.0.0.tar.gz/yy-spider-1.0.0/yy_spider/server/server.py | server.py |
import time
import traceback
import requests
from yy_spider.common.yy_exceptions import InvalidTaskException, Http404Exception
from yy_spider.message_bus import common
class BaseSpider(object):
""""""
def __init__(self, conf, logger, task_queue, result_queue):
self._conf = conf
self._logger = logger
self._task_queue = task_queue
self._result_queue = result_queue
self._change_session_limit = conf.CHANGE_SESSION_LIMIT
self._last_crawl_count = 0
self._session = None
self.init_session()
def run(self):
"""运行爬虫"""
self.init_seed()
while True:
try:
task = self._task_queue.get()
self._logger.debug("begin to do task {}".format(task['_id']))
self.crawl(task)
if self._conf.TASK_COLL:
self._result_queue.put((common.CHANGE_TASK_STATUS, self._conf.TASK_COLL,
[{'_id': task['_id'], 'status': common.CRAWL_SUCCESS}]))
self._logger.debug("finish to do task {}".format(task['_id']))
except InvalidTaskException as e:
if self._conf.TASK_COLL:
self._result_queue.put((common.CHANGE_TASK_STATUS, self._conf.TASK_COLL,
[{'_id': task['_id'], 'status': common.INVALID}]))
self._logger.debug("finish to do task {}".format(task['_id']))
except Exception as e:
trace = traceback.format_exc()
self._logger.error("error:{},trace:{}".format(str(e), trace))
if self._conf.TASK_COLL:
self._result_queue.put((common.CHANGE_TASK_STATUS, self._conf.TASK_COLL,
[{'_id': task['_id'], 'status': common.CRAWL_FAIL}]))
self._logger.debug("finish to do task {}".format(task['_id']))
def init_seed(self):
"""初始化种子"""
def _update_headers(self, headers):
self._conf.DEFAULT_HEADERS.update(headers)
return self._conf.DEFAULT_HEADERS
def crawl_url(self, url, method='get', headers={}, data={}, timeout=None):
"""抓取url, 尝试MAX_RETRY_TIME次"""
try_times = 0
self._logger.debug("begin to crawl url :{}".format(url))
t0 = time.time()
if not timeout:
timeout = self._conf.DEFAULT_HTTP_TIMEOUT
headers = self._update_headers(headers)
while try_times < self._conf.MAX_RETRY_TIME:
try_times += 1
if self._last_crawl_count >= self._change_session_limit: # 同一个session抓取一定的数量就应该reset
self.init_session()
try:
res = getattr(self._session, method)(url, headers=headers, params=data, timeout=timeout)
if res.status_code != 200: # http status
if res.status_code == 404:
raise Http404Exception
raise Exception("status_code != 200")
time.sleep(self._conf.DOWNLOAD_SLEEP_TIME)
break
except Exception as e:
err_info = 'download html failed, url: {}, error:{}'.format(url, str(e))
self._logger.error(err_info)
if str(e) in self._conf.INVALID_TASK_ERRORS:
raise InvalidTaskException
self._set_id_info_status(is_ok=0, err_info=err_info)
self.init_session()
if not res:
raise Exception("res is None")
if res.status_code != 200: # http status
raise Exception("status_code != 200")
t_diff = time.time() - t0
self._logger.debug("finish to crawl url :%s, use time:%s" % (url, t_diff))
return res
def init_session(self):
self._logger.warning('begin to reset session')
self._last_crawl_count = 0
self._session = requests.Session()
self._set_id_info()
self._logger.warning('reset session success')
def _set_cookie(self, cookies):
"""设置cookie,cookie是'k=v;k=v'格式的字符串"""
if not cookies:
return
for s in cookies.split(';'):
k = s.split('=')[0]
v = s.split('=')[1]
self.session.cookies.set(k, v)
def _set_proxy(self, proxy):
"""设置代理信息"""
if not proxy:
return
if proxy['schema'] == 'socks5':
schema = "{}h".format(proxy['schema'])
else:
schema = proxy['schema']
username = proxy['username']
password = proxy['password']
ip = proxy['ip']
port = proxy['port']
if username and password:
proxies = {'http': '{}://{}:{}@{}:{}'.format(schema, username, password, ip, port),
'https': '{}://{}:{}@{}:{}'.format(schema, username, password, ip, port),
}
else:
proxies = {'http': '{}://{}:{}'.format(schema, ip, port),
'https': '{}://{}:{}'.format(schema, ip, port),
}
self.session.proxies = proxies
def _set_id_info(self):
"""设置身份信息信息,可能包括cookie,账号,代理等"""
if not self._conf.ID_INFO_SERVER:
return
while True:
try:
url = 'http://{}/id_info'.format(self._conf.ID_INFO_SERVER)
res = requests.get(url, headers={'TOKEN': self._conf.ID_INFO_TOKEN})
d = res.json()['data']
self._id = d['_id']
self._set_cookie(d['cookies'])
self._username = d['username']
self._password = d['password']
self._id_extra = d['extra']
self._set_proxy(d['proxy'])
except Exception as e:
self._logger.warning("_set_id_info:{}".format(str(e)))
time.sleep(10)
def _set_id_info_status(self, is_ok, err_info):
"""设置身份信息信息,例如账号标记状态为不健康"""
if not self._conf.ID_INFO_SERVER:
return
while True:
try:
url = 'http://{}/id_info/{}'.format(self._conf.ID_INFO_SERVER, self._id)
data = {'is_ok': is_ok, 'err_info': err_info}
res = requests.put(url, headers={'TOKEN': self._conf.ID_INFO_TOKEN}, data=data)
if res.status_code != 200:
raise Exception('res.status_code!=200')
except Exception as e:
self._logger.warning("_set_id_info:{}".format(str(e)))
time.sleep(10) | yy-spider | /yy-spider-1.0.0.tar.gz/yy-spider-1.0.0/yy_spider/client/spiders/base_spider.py | base_spider.py |
import socket
import struct
import time
import msgpack
# 本地库
from yy_spider import utils
from . import common
from .base_bus import BaseBus
class MessageBus(BaseBus):
def __init__(self, conf, task_queue, result_queue, logger):
self.host = conf.SOCKET_HOST
self.port = conf.SOCKET_PORT
self.key = conf.SOCKET_KEY
self.user_name = conf.SOCKET_USERNAME
self._logger = logger
self._task_queue = task_queue
self._result_queue = result_queue
self._conf = conf
self.connect()
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect((self.host, self.port))
self.sock.settimeout(60 * 10)
except Exception as e:
self._logger.error(str(e))
self.reconnect(sleep_time=60)
return
self.auth()
# server端需要认证
def auth(self):
password = utils.md5(self.user_name + self.key)
try:
token = "%s@@@***%s" % (self.user_name, password)
self.sock.sendall(token.encode('utf8'))
res = self.recvall(4)
if res != b'OK!!':
self._logger.error("invalid password!!!")
self.reconnect(sleep_time=30)
return
except Exception as e:
self._logger.error(str(e))
self.reconnect(sleep_time=30)
return
def __del__(self):
del self.sock
def reconnect(self, sleep_time=15):
self.__del__()
time.sleep(sleep_time)
self.connect()
def send_msg(self, msg):
msg1 = struct.pack('>I', len(msg)) + msg
try:
self.mysend(msg1)
if len(msg) - 4 - 4 > 0:
self._logger.info("send:%s" % (len(msg) - 4))
return True
except Exception as e:
self._logger.error(str(e))
self.reconnect(sleep_time=60)
return False
def mysend(self, msg):
totalsent = 0
MSGLEN = len(msg)
while totalsent < MSGLEN:
if len(msg) > self._conf.SOCKET_ONE_TIME_SEND:
sent = self.sock.send(msg)
msg = msg[sent:]
else:
sent = self.sock.send(msg)
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def recv_msg(self):
# Read message length and unpack it into an integer
try:
raw_msglen = self.recvall(4)
except socket.timeout:
self._logger.error('recv msg timeout ......')
return None
except Exception as e:
self._logger.error(str(e))
self.reconnect(sleep_time=60)
return None
if not raw_msglen:
self._logger.warning("not raw_msglen")
self.reconnect(sleep_time=60)
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
try:
return self.recvall(msglen)
except Exception as e:
self._logger.error(str(e))
self.reconnect(sleep_time=60)
return None
def recvall(self, n):
# Helper function to recv n bytes or return None if EOF is hit
data = b''
while len(data) < n:
if n - len(data) > 4:
packet = self.sock.recv(4)
else:
packet = self.sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def get_tasks(self):
"""获取任务,放入任务队列"""
while True:
if self._task_queue.qsize() < self._conf.TASK_QUEUE_LIMIT:
tasks = self.get_task(self._conf.TASK_COLL, self._conf.THREAD_NUM)
self._logger.debug('get {} tasks'.format(len(tasks)))
[self._task_queue.put([t]) for t in tasks]
if len(tasks) == 0:
time.sleep(10)
else:
time.sleep(1)
def save_result(self):
"""读取_result_queue中的数据,发给server"""
while True:
self._logger.debug("current result_queue qsize {}".format(self._result_queue.qsize()))
if self._result_queue.qsize() > 0:
res = self._result_queue.get()
self.do_request(res[0], res[1], res[2])
else:
time.sleep(1)
def do_request(self, action, coll_name, data):
""""""
req = {'type': common.REQUEST_MESSAGE,
'action': action,
'coll_name': coll_name,
'data': data}
_req = msgpack.packb(req)
self.send_msg(_req)
while True:
res = self.recv_msg()
if res:
return msgpack.unpackb(res)
def get_task(self, coll_name, count=1):
data = {'count': count}
return self.do_request(common.GET_TASK, coll_name, data)[b'data']
def insert_data(self, coll_name, data):
return self.do_request(common.PUT_DATA, coll_name, data)
def update_data(self, coll_name, data):
return self.do_request(common.UPDATE_DATA, coll_name, data)
def insert_data_if_not_exist(self, coll_name, data):
return self.do_request(common.INSERT_DATA_IF_NOT_EXIST, coll_name, data)
def change_task_status(self, coll_name, data):
return self.do_request(common.CHANGE_TASK_STATUS, coll_name, data)
def put_task(self, coll_name, data):
return self.do_request(common.PUT_TASK, coll_name, data) | yy-spider | /yy-spider-1.0.0.tar.gz/yy-spider-1.0.0/yy_spider/message_bus/socket_bus.py | socket_bus.py |
import logging
import os
class ColoredFormatter(logging.Formatter):
def __init__(self, fmt=None):
logging.Formatter.__init__(self, fmt=fmt)
def format(self, record):
COLORS = {
'Black': '0;30',
'Red': '0;31',
'Green': '0;32',
'Brown': '0;33',
'Blue': '0;34',
'Purple': '0;35',
'Cyan': '0;36',
'Light_Gray': '0;37',
'Dark_Gray': '1;30',
'Light_Red': '1;31',
'Light_Green': '1;32',
'Yellow': '1;33',
'Light_Blue': '1;34',
'Light_Purple': '1;35',
'Light_Cyan': '1;36',
'White': '1;37',
}
COLOR_SEQ = "\033[%sm"
RESET_SEQ = "\033[0m"
message = logging.Formatter.format(self, record)
if record.levelno == logging.DEBUG:
message = COLOR_SEQ % COLORS['White'] + message + RESET_SEQ
elif record.levelno == logging.INFO:
message = COLOR_SEQ % COLORS['Green'] + message + RESET_SEQ
pass
elif record.levelno == logging.WARNING:
message = COLOR_SEQ % COLORS['Brown'] + message + RESET_SEQ
elif record.levelno == logging.ERROR:
message = COLOR_SEQ % COLORS['Red'] + message + RESET_SEQ
elif record.levelno == logging.CRITICAL:
message = COLOR_SEQ % COLORS['Purple'] + message + RESET_SEQ
return message
import logging.handlers
def get_logger(log_name="",
log_path='/tmp/logs',
single_log_file_size=1024 * 1024 * 600,
log_to_file=True,
backup_count=3):
""":return a logger"""
if not os.path.exists(log_path):
try:
os.makedirs(log_path)
except Exception as e:
print(str(e))
logger = logging.getLogger("{}".format(log_name))
logger.setLevel(logging.DEBUG)
if log_name and log_to_file:
# file
log_file = "{}/{}.log".format(log_path, log_name)
fh = logging.handlers.RotatingFileHandler(log_file, maxBytes=single_log_file_size, backupCount=backup_count)
color_formatter = ColoredFormatter(fmt='%(asctime)s %(funcName)s[line:%(lineno)d] [%(levelname)s]: %(message)s')
fh.setFormatter(color_formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# stdout
sh = logging.StreamHandler()
color_formatter = ColoredFormatter(fmt='%(asctime)s %(funcName)s[line:%(lineno)d] [%(levelname)s]: %(message)s')
sh.setFormatter(color_formatter)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
return logger | yy-spider | /yy-spider-1.0.0.tar.gz/yy-spider-1.0.0/yy_spider/utils/yy_logger.py | yy_logger.py |
```bash
usage: maker.py [-h] -i INPUT_PATH [-d DURATION_INTERVAL_SECOND] [-rw MAX_ROW_WIDTH] [-mp MIN_PARTITION]
options:
-h, --help show this help message and exit
-i INPUT_PATH, --input-path INPUT_PATH
The path of input files or the directory that keeps the inputs.
-d DURATION_INTERVAL_SECOND, --duration-interval-second DURATION_INTERVAL_SECOND
The interval that partition the video.
-rw MAX_ROW_WIDTH, --max-row-width MAX_ROW_WIDTH
The width of each row.
-mp MIN_PARTITION, --min-partition MIN_PARTITION
Miniumn partition of the grids.
```
| yy-vtm | /yy_vtm-0.2.4.tar.gz/yy_vtm-0.2.4/README.md | README.md |
import cv2
import numpy as np
import os
import sys
import glob
from tqdm.auto import tqdm
import traceback
import math
import argparse
def framing(input_path, duration_intv_sec=10, max_row_width=4, min_partition=8):
input_location, input_filename = os.path.split(input_path)
# print(input_location, input_filename)
frame_folder = os.path.join(input_location, 'frame')
if not os.path.exists(frame_folder):
os.mkdir(frame_folder)
imgs = []
cap = cv2.VideoCapture(input_path)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_shape = None
duration = frame_len // fps
partitions = max(int(duration / duration_intv_sec), min_partition)
interval_in_frame = int(frame_len / partitions)
# print(duration, duration_intv_sec)
# print(frame_len, interval_in_frame)
try:
start = 0
while start < frame_len:
end = start + interval_in_frame if start + \
interval_in_frame < frame_len else frame_len
if frame_len - end < interval_in_frame / 2:
end = frame_len
current_frame = int((start + end) / 2)
# print((start, end), end - start, current_frame)
cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame)
cap.grab()
_, frame_image = cap.retrieve()
if frame_shape is None:
frame_shape = frame_image.shape
if frame_image is not None:
imgs.append(frame_image)
start = end
except Exception as e:
print(e)
finally:
cap.release()
imgs = np.array(imgs)
row_concats = []
s = 0
for row in range(math.ceil(imgs.shape[0] / max_row_width)):
e = s + max_row_width if s + \
max_row_width < imgs.shape[0] else imgs.shape[0]
curr = imgs[s:e, :, :, :]
if max_row_width - (e - s) > 0:
padding = np.zeros(
shape=(max_row_width - (e - s), *curr.shape[1:]), dtype=np.uint8)
curr = np.vstack((curr, padding))
row_concats.append(cv2.hconcat(curr))
s = e
grid_img = cv2.vconcat(row_concats)
frame_file_name = f"{input_filename}.jpg"
cv2.imwrite(os.path.join(frame_folder, frame_file_name), grid_img)
def FrameCapture(input_path, duration_intv_sec=600, max_row_width=3, min_partition=8):
accepted_video_extension = ['.mp4', '.mkv', '.avi', '.ts',
'.wmv', '.webm', '.mpeg', 'mpe', 'mpv', '.ogg', '.m4p', '.m4v']
all_video_extension = [*accepted_video_extension, '.rmvb']
input_path = os.path.abspath(input_path)
if os.path.isfile(input_path):
all_files = [input_path]
else:
all_files = []
for ext in all_video_extension:
files = []
files.extend(
glob.glob(input_path + f'/**/*{ext}', recursive=True))
files.extend(
glob.glob(input_path + f'/**/*{ext.upper()}', recursive=True))
if len(files) > 0:
if ext in accepted_video_extension:
all_files.extend(files)
else:
print(f'Not supported: {files}')
if len(all_files) > 0:
for file in tqdm(all_files, bar_format='{l_bar}{bar:30}{r_bar}{bar:-10b}'):
try:
framing(file, duration_intv_sec, max_row_width, min_partition)
except Exception as e:
traceback.print_exc()
def run():
class MyParser(argparse.ArgumentParser):
def error(self, message):
self.print_help()
sys.exit(2)
sys.path.append(os.getcwd())
parser = MyParser()
parser.add_argument(
"-i", "--input-path", type=str,
help="The path of input files or the directory that keeps the inputs.", required=True)
parser.add_argument(
"-d", "--duration-interval-second", type=int,
help="The interval that partition the video.", default=600)
parser.add_argument(
"-rw", "--max-row-width", type=int,
help="The width of each row.", default=3)
parser.add_argument(
"-mp", "--min-partition", type=int,
help="Miniumn partition of the grids.", default=8)
args = parser.parse_args()
input_path = args.input_path
duration_intv_sec = args.duration_interval_second
max_row_width = args.max_row_width
min_partition = args.min_partition
FrameCapture(input_path, duration_intv_sec=duration_intv_sec,
max_row_width=max_row_width, min_partition=min_partition)
if __name__ == '__main__':
run() | yy-vtm | /yy_vtm-0.2.4.tar.gz/yy_vtm-0.2.4/yy_vtm/maker.py | maker.py |
===========================
pytodos
===========================
Command line lightweight todos.
.. image:: https://travis-ci.org/chuanwu/PyTodos.svg?branch=master
.. image:: https://badge.fury.io/py/pytodos.svg
:target: https://badge.fury.io/py/pytodos
Install::
pip install pytodos
Usage::
add: 增加任务
list: 显示未完成的任务以及一个礼拜以内处理完的任务
listall:显示未完成的任务以及处理完的所有任务
got x: x表示未完成任务的编号,将其标记为完成
.. image:: https://ooo.0o0.ooo/2017/04/19/58f729d498fa7.png
Attention::
文件保存在/tmp/data.txt,请注意不要误删了,否则您的数据会丢失。
| yy | /yy-1.0.16.tar.gz/yy-1.0.16/README.rst | README.rst |
# YYeTsBot
[](https://github.com/tgbot-collection/YYeTsBot/actions/workflows/docker.yaml)
[](https://hub.docker.com/r/bennythink/yyetsbot)
* 人人影视bot,[戳我使用](https://t.me/yyets_bot)
* 人人影视分享站,[戳我使用](https://yyets.dmesg.app/)
机器人和网站由我长期维护,如果遇到问题可以提issue。

👉 前端[在这里](https://github.com/tgbot-collection/YYeTsFE) 👈
# 使用说明
直接发送想要看的剧集名称就可以了,可选分享网页或者链接(ed2k和磁力链接)。
支持字幕侠、人人影视离线资源
搜索资源时,会按照我预定的优先级(人人影视离线、字幕侠)进行搜索,当然也可以使用命令强制某个字幕组,如 `/yyets_offline 逃避可耻`
**由于译名的不同,建议输入部分译名,然后从列表中进行选择。比如说想看权力的游戏第四季,那么直接搜索"权力的游戏"就可以了。**
## 命令
```
start - 开始使用
help - 帮助
credits - 致谢
ping - 运行状态
settings - 获取公告
zimuxia_online - 字幕侠在线数据
newzmz_online - new字幕组在线数据
yyets_offline - 人人影视离线数据
```
# 截图
## 常规搜索

## 资源分享站截图
本网站永久免费,并且没有任何限制。


支持收藏功能,会跨设备同步

## 指定字幕组搜索
目前只支持YYeTsOffline、ZimuxiaOnline和NewzmzOnline

# 如何下载磁力和电驴资源?迅雷提示资源敏感
## 电驴资源
请下载使用 [eMule](https://www.emule-project.net/home/perl/general.cgi?l=42) ,然后添加如下两个server list
* [server.met](http://www.server-met.de/)
* [server list for emule](https://www.emule-security.org/serverlist/)

速度还可以哦
## 磁力
使用百度网盘、115等离线,或使用utorrent等工具,记得更新下 [tracker list](https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all.txt)
哦
# 小白使用
想要自己留一份资源,但是又不懂编程? 没关系!目前提供两种方式,请根据自己情况选择
“离线使用” 意味着可以断网使用,但是不会自动更新资源,需要手动更新数据库;“在线应用” 意味着需要有互联网才可以使用。
## 离线 完整运行包
这个版本是新的UI,拥有全部的最新功能。运行在你本地的电脑上,不依赖外界环境。
[参考文档](https://github.com/tgbot-collection/YYeTsBot/blob/master/DEVELOPMENT.md#%E4%B8%80%E9%94%AE%E8%84%9A%E6%9C%AC)
## 离线 一键运行包
一键运行包。拥有比较新的UI,只不过只有最基础的搜索、查看资源的功能。使用方法步骤如下
1. 请到 [GitHub Release](https://github.com/tgbot-collection/YYeTsBot/releases) ,找最新的 `YYeTsBot 离线一键运行包`
2. windows:双击第一步下载的exe文件; macos/Linux,cd到你的目录, `chmod +x yyetsweb ; ./yyetsweb`
3. 程序会自动下载数据库并启动。等到出现启动提示时, 打开浏览器 http://127.0.0.1:8888 就可以看到熟悉的搜索界面啦!
## 在线 原生应用程序
使用tauri封装的网页。内容等同于 `https://yyets.dmesg.app`,只不过是原生的App。使用方法如下
1. 请到 [GitHub Release](https://github.com/tgbot-collection/YYeTsBot/releases) ,找最新的 `YYeTsBot App`
2. windows下载msi,macos下载dmg或tar.gz,Linux下载AppImage或deb(Debian based)
3. 安装后,打开App,就可以看到熟悉的搜索界面啦!
# 开发
## 网站开发
如何部署、参与开发、具体API接口,可以 [参考这个文档](DEVELOPMENT.md)
## Python Library
也可以作为Python Library去调用
`pip3 install yyets`
```
>>> from yyets import YYeTs
>>> yy=YYeTs("逃避")
[2021-09-21 19:22:32 __init__.py:54 I] Fetching 逃避可耻却有用...https://yyets.dmesg.app/api/resource?id=34812
[2021-09-21 19:22:33 __init__.py:54 I] Fetching 无法逃避...https://yyets.dmesg.app/api/resource?id=29540
[2021-09-21 19:22:35 __init__.py:54 I] Fetching 逃避者...https://yyets.dmesg.app/api/resource?id=37089
>>> yy.result
[<yyets.Resource object at 0x10cc7b130>, <yyets.Resource object at 0x10ca0e880>, <yyets.Resource object at 0x10cc7b040>]
>>> for y in yy.result:
print(y)
逃避可耻却有用 - NIGERUHA HAJIDAGA YAKUNITATSU
无法逃避 - Inescapable
逃避者 - Shirkers
>>> yy.result[0].cnname
'逃避可耻却有用'
>>> yy.result[0].list
[{'season_num': '101', 'season_cn': '单剧', 'items': {'APP': [{'ite
```
# Credits
* [人人影视](http://www.zmz2019.com/)
* [追新番](http://www.fanxinzhui.com/)
* [FIX字幕侠](https://www.zimuxia.cn/)
* [new字幕组](https://newzmz.com/)
# 支持我
觉得本项目对你有帮助?你可以通过以下方式表达你的感受:
* 感谢字幕组
* 点一个star🌟和fork🍴
* 宣传,使用,提交问题报告
* 收藏[我的博客](https://dmesg.app/)
* [Telegram Channel](https://t.me/mikuri520)
* 捐助我,[给我买杯咖啡?](https://www.buymeacoffee.com/bennythink)
* 捐助我,[爱发电?](https://afdian.net/@BennyThink)
* 捐助我,[GitHub Sponsor](https://github.com/sponsors/BennyThink)
* 捐助我,[Stripe](https://buy.stripe.com/dR67vU4p13Ox73a6oq)
<img src="./assets/CNY.png" width = 30% alt="stripe" />
# 感谢
感谢所有[支持本项目](SPONSOR.md)的人!
# License
[MIT](LICENSE)
| yyets | /yyets-1.0.1.tar.gz/yyets-1.0.1/README.md | README.md |
<p align="center">
<img width="65%" src="https://user-images.githubusercontent.com/30433053/68877902-c5bd2f00-0741-11ea-8cac-af227a77bb14.png" style="max-width:65%;">
</a>
</p>
# Introduction
yyimg is a high-level image-processing tool, written in Python and using [OpenCV](https://github.com/opencv/opencv) as backbend. This repo helps you with processing images for your deep learning projects.
# Installation
Commands to install from pip or download the source code from our website https://pypi.org/project/yyimg
```bashrc
$ pip3 install yyimg==1.0.0rc
```
# Example Useage
Take one image in Kitti dataset for example:
```python
import yyimg
from PIL import Image
image, boxes, classes = yyimg.load_data()
```
|Items|Description|
|---|---
|image|a numpy array of shape (height, width, #channels)
|boxes|a numpy array of shape (N, 5), representing N 2Dboxes of `[class_index, xmin, ymin, xmax, ymax]`
|classes|a list of class names
```python
print(classes)
['Car', 'Truck', 'Van', 'Pedestrian']
```
## visualize 2D boxes
```python
draw_image = yyimg.draw_2Dbox(image, boxes, class_category=classes)
draw_image = cv2.cvtColor(draw_image, cv2.COLOR_BGR2RGB) # BGR -> RGB
Image.fromarray(draw_image).show()
```

## data augmentation
### - horizontal_flip
with 2D bounding boxes:
```python
aug_image, boxes = yyimg.horizontal_flip(image, boxes)
```
without 2D bounding boxes:
```python
aug_image = yyimg.horizontal_flip(image)
```

### - add_rain
```python
aug_image = yyimg.add_rain(image)
```

### - shift_gama
```python
aug_image = yyimg.shift_gamma(image)
```

### - shift_brightness
```python
aug_image = yyimg.shift_brightness(image)
```

### - shift_color
```python
aug_image = yyimg.shift_color(image)
```

| yyimg | /yyimg-1.0.2.tar.gz/yyimg-1.0.2/README.md | README.md |
## Doc for Developers
```bash
# clone
git clone https://github.com/openatx/weditor
pip install -e yyperf
```
`-e`这个选项可以将weditor目录下的代码直接关联到python的`site-packages`中。
修改完后,直接运行`python -m weditor`调试
## 网页的基本布局
```
----------------------------
NAV
----------------------------
Screen | Properties | Tree
----------------------------
```
The following code is written in pug(rename from jade)
```pug
body
nav
#upper
#left
section#screen
section#footer
#horizon-gap
#console
#vertical-gap1
#middle
.panel
.panel-body
table
input(type="text")
pre.editor-container
.vertical-gap
#right
.panel
.panel-heading
div(class=["input-group", "input-group-sm"])
.input-group-btn
input#jstree-search
span.input-gropu-btn
.box
#jstree-hierarchy
```
See example: https://codepen.io/codeskyblue/pen/mYdjGb
## 发布到PYPI
目前先打`git tag`, push到github之后,再通过travis发布到pypi上
## References
- https://www.jstree.com/
- fontawesome icons: https://fontawesome.com/v4.7.0/icons/
- element-ui 组件:https://element.eleme.cn
- bootstrap v3: https://v3.bootcss.com/
# LocalStorage
store keys:
- windowHierarchy: JSON.stringified data
- screenshotBase64
- code
| yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/DEVELOP.md | DEVELOP.md |
# yyperf
[](https://pypi.python.org/pypi/weditor)
[](https://github.com/alibaba/web-editor)
[](https://travis-ci.org/alibaba/web-editor)
[中文文档](README.md)
This project is subproject for smart phone test framework [openatx](https://github.com/openatx)
for easily use web browser to edit UI scripts.
Screenshot

## Installation
Dependencies
- Python3.6+
- [uiautomator2](https://github.com/openatx/uiautomator2)
- [facebook-wda](https://github.com/openatx/facebook-wda)
- [weditor](https://github.com/openatx/weditor)
- [tidevice](https://github.com/alibaba/taobao-iphone-device)
- [py-ios-device](https://github.com/YueChen-C/py-ios-device)
> Only tested in `Google Chrome`, _IE_ seems not working well.
```bash
git clone https://github.com/mrx1203
pip3 install -r requirements.txt
python3 setup.py install --user
```
## Usage
Create Shortcut in Desktop (Only windows)
```
yyperf --shortcut
```
By click shortcut or run in command line
```
yyperf
```
This command will start a local server with port 17310,
and then open a browser tab for you to editor you code.
Port 17310 is to memorize the created day -- 2017/03/10
To see more usage run `yyperf -h`
## Hotkeys(Both Mac and Win)
- Right click screen: `Dump Hierarchy`
### Hotkeys(only Mac)
- Command+Enter: Run the whole code
- Command+Shift+Enter: Run selected code or current line if not selected
### Hotkeys(only Win)
- Ctrl+Enter: Run the whole code
- Ctrl+Shift+Enter: Run selected code or current line if not selected
## LICENSE
[MIT](LICENSE)
| yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/README_EN.md | README_EN.md |
# yyperf
编辑器能够提供辅助编写脚本,查看组件信息,调试代码,采集性能数据等功能。
## 安装
依赖项目
- Python3.6+
- [uiautomator2](https://github.com/openatx/uiautomator2)
- [facebook-wda](https://github.com/openatx/facebook-wda)
- [weditor](https://github.com/openatx/weditor)
- [tidevice](https://github.com/alibaba/taobao-iphone-device)
- [py-ios-device](https://github.com/YueChen-C/py-ios-device)
> Only tested in `Google Chrome`, _IE_ seems not working well.
```bash
git clone https://github.com/mrx1203
pip3 install -r requirements.txt
python3 setup.py install #如果提示没有权限,则加--user
```
## 使用方法
```bash
yyperf # 启动server并打开浏览器
```
创建桌面快捷方式(仅限Windows)
```bash
yyperf --shortcut
```
更多选项通过 `yyperf --help` 查看
如果浏览器没有自动打开,可以手动访问 <http://localhost:17310>
## Windows下使用
windows下,如果是安装到用户目录下的site-packages,则需要将用户目录下的python3.x\Scripts目录添加到环境变量。
windows采集ios,需要安装iTunes。
## 执行Android用例
1. 在控件查看中,选择Android,输入设备id(通过adb devices查看),点击Connect.
2. 录制用例,
3. 点击右边工具栏的三角形按钮,执行用例
## 执行iOS用例
1. 首先安装WDA,不知道怎么安装的可以联系周云鹏。
2. 获取WDA的bundleid:`tidevice applist` 在输出结果中找到WebDriverAgentRunner-Runner 对应的bundleid
3. 启动WDA:`tidevice wdaproxy -B com.yy.perftest.WebDriverAgentRunner.xctrunner -p 6103` 。其中com.yy.perftest.WebDriverAgentRunner.xctrunner为bundleid
4. 在控件查看中,选择ios,输入http://localhost:6103 .点击Connect.
5. 录制用例
6. 点击右边工具栏的三角形按钮,执行用例
## 性能采集
1. 选择测试设备(可以点击 更新设备列表 刷新后面接入的设备)
2. 选择测试app(如果没有获取到app列表,试试切换测试设备)
3. 点击开始采集数据,等待一段时间后(10s左右),数据实时显示,也可以通过yyperf启动窗口的日志找到数据保存路径。
## 常用快捷键
**Mac**
- Command+Enter: 运行编辑器中所有代码
- Command+SHIFT+Enter: 运行选中代码或光标所在行代码
**Windows**
- CTRL+Enter: 运行编辑器中所有代码
- CTRL+SHIFT+Enter: 运行选中代码或光标所在行代码
## LICENSE
[MIT](LICENSE)
| yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/README.md | README.md |
# WEditor
Editor Driver for ATX
Port: 17310
To memorize the create day: 2017/03/10
# Installation
```
pip install weditor
```
# Usage
```
python -m weditor
```
# API
### Get Version
This method returns local server version
```
GET /api/v1/version
```
#### Response
Status: 200
```json
{
"name": "0.0.2"
}
```
## File API
### Get contents
This method returns the contents of a file or directory in a repository
```
GET /api/v1/contents/:path
```
#### Response if content is a file
Status: 200 OK
```json
{
"type": "file",
"encoding": "base64",
"size": 5362,
"name": "README_EN.md",
"path": "README_EN.md",
"content": "encoded content ...",
"sha": "3d21ec53a331a6f037a91c368710b99387d012c1"
}
```
#### Response if content is a directory
Status: 200 OK
```json
[
{
"type": "file",
"size": 5362,
"name": "README_EN.md",
"path": "README_EN.md",
"sha": "3d21ec53a331a6f037a91c368710b99387d012c1"
},
{
"type": "dir",
"size": 0,
"name": "foo",
"path": "foo",
}
]
```
### Create a file
This method creates a new file in repository
```
POST /api/v1/contents/:path
```
#### Example Input
```json
{
"content": "bXkgbmV3IGZpbGUgY29udGVudHM="
}
```
#### Response
Status: 201 Created
```json
{
"content": {
"type": "file",
"name": "hello.txt",
"path": "notes/hello.txt",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9
}
}
```
## Device API
### Get device list
This method returns devices connected to PC
```
GET /api/v1/devices
```
### Get current using device
```
GET /api/v1/user/device
```
### Set current using device
```
PUT /api/v1/user/device
```
#### Example Input
```json
{
"serial": "cff12345"
}
```
### Get device screenshot
```
GET /api/v1/devices/:serial/screenshot
```
#### Response
```json
{
"type": "jpeg",
"data": "bXkgbmV3IGZpbGUgY29udGVudHM"
}
```
#### Response if error
Status: 403
```json
{
"description": "Some reason"
}
```
### Get UIView
Get uiview with json response
```
GET /api/v1/devices/{serial}/uiview
```
#### Response
Status: 200
Every node will always has an `id` field. iOS and Android got the some response structure.
```json
{
"nodes": [{
"id": 0,
"text": "Hello",
"description": "hello",
"other...": "..."
}, {
"id": 1,
"other...": ".."
}]
}
```
## Python Debug WebSocket API
### Run code
This method run and get the live output
```
WebSocket CONNECT /api/v1/build
```
SEND json data
```json
{
"content": "print('hello')"
}
```
RECV json data when running
```json
{
"buffer": "hello"
}
```
RECV json data when finished. __duration unit is ms.__
```json
{
"buffer": "end ...",
"result": {
"duration": 1002,
"exitCode": 1
}
}
```
# LICENSE
[MIT](LICENSE)
| yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/API.md | API.md |
define("ace/snippets",["require","exports","module","ace/lib/oop","ace/lib/event_emitter","ace/lib/lang","ace/range","ace/anchor","ace/keyboard/hash_handler","ace/tokenizer","ace/lib/dom","ace/editor"], function(require, exports, module) {
"use strict";
var oop = require("./lib/oop");
var EventEmitter = require("./lib/event_emitter").EventEmitter;
var lang = require("./lib/lang");
var Range = require("./range").Range;
var Anchor = require("./anchor").Anchor;
var HashHandler = require("./keyboard/hash_handler").HashHandler;
var Tokenizer = require("./tokenizer").Tokenizer;
var comparePoints = Range.comparePoints;
var SnippetManager = function() {
this.snippetMap = {};
this.snippetNameMap = {};
};
(function() {
oop.implement(this, EventEmitter);
this.getTokenizer = function() {
function TabstopToken(str, _, stack) {
str = str.substr(1);
if (/^\d+$/.test(str) && !stack.inFormatString)
return [{tabstopId: parseInt(str, 10)}];
return [{text: str}];
}
function escape(ch) {
return "(?:[^\\\\" + ch + "]|\\\\.)";
}
SnippetManager.$tokenizer = new Tokenizer({
start: [
{regex: /:/, onMatch: function(val, state, stack) {
if (stack.length && stack[0].expectIf) {
stack[0].expectIf = false;
stack[0].elseBranch = stack[0];
return [stack[0]];
}
return ":";
}},
{regex: /\\./, onMatch: function(val, state, stack) {
var ch = val[1];
if (ch == "}" && stack.length) {
val = ch;
}else if ("`$\\".indexOf(ch) != -1) {
val = ch;
} else if (stack.inFormatString) {
if (ch == "n")
val = "\n";
else if (ch == "t")
val = "\n";
else if ("ulULE".indexOf(ch) != -1) {
val = {changeCase: ch, local: ch > "a"};
}
}
return [val];
}},
{regex: /}/, onMatch: function(val, state, stack) {
return [stack.length ? stack.shift() : val];
}},
{regex: /\$(?:\d+|\w+)/, onMatch: TabstopToken},
{regex: /\$\{[\dA-Z_a-z]+/, onMatch: function(str, state, stack) {
var t = TabstopToken(str.substr(1), state, stack);
stack.unshift(t[0]);
return t;
}, next: "snippetVar"},
{regex: /\n/, token: "newline", merge: false}
],
snippetVar: [
{regex: "\\|" + escape("\\|") + "*\\|", onMatch: function(val, state, stack) {
stack[0].choices = val.slice(1, -1).split(",");
}, next: "start"},
{regex: "/(" + escape("/") + "+)/(?:(" + escape("/") + "*)/)(\\w*):?",
onMatch: function(val, state, stack) {
var ts = stack[0];
ts.fmtString = val;
val = this.splitRegex.exec(val);
ts.guard = val[1];
ts.fmt = val[2];
ts.flag = val[3];
return "";
}, next: "start"},
{regex: "`" + escape("`") + "*`", onMatch: function(val, state, stack) {
stack[0].code = val.splice(1, -1);
return "";
}, next: "start"},
{regex: "\\?", onMatch: function(val, state, stack) {
if (stack[0])
stack[0].expectIf = true;
}, next: "start"},
{regex: "([^:}\\\\]|\\\\.)*:?", token: "", next: "start"}
],
formatString: [
{regex: "/(" + escape("/") + "+)/", token: "regex"},
{regex: "", onMatch: function(val, state, stack) {
stack.inFormatString = true;
}, next: "start"}
]
});
SnippetManager.prototype.getTokenizer = function() {
return SnippetManager.$tokenizer;
};
return SnippetManager.$tokenizer;
};
this.tokenizeTmSnippet = function(str, startState) {
return this.getTokenizer().getLineTokens(str, startState).tokens.map(function(x) {
return x.value || x;
});
};
this.$getDefaultValue = function(editor, name) {
if (/^[A-Z]\d+$/.test(name)) {
var i = name.substr(1);
return (this.variables[name[0] + "__"] || {})[i];
}
if (/^\d+$/.test(name)) {
return (this.variables.__ || {})[name];
}
name = name.replace(/^TM_/, "");
if (!editor)
return;
var s = editor.session;
switch(name) {
case "CURRENT_WORD":
var r = s.getWordRange();
case "SELECTION":
case "SELECTED_TEXT":
return s.getTextRange(r);
case "CURRENT_LINE":
return s.getLine(editor.getCursorPosition().row);
case "PREV_LINE": // not possible in textmate
return s.getLine(editor.getCursorPosition().row - 1);
case "LINE_INDEX":
return editor.getCursorPosition().column;
case "LINE_NUMBER":
return editor.getCursorPosition().row + 1;
case "SOFT_TABS":
return s.getUseSoftTabs() ? "YES" : "NO";
case "TAB_SIZE":
return s.getTabSize();
case "FILENAME":
case "FILEPATH":
return "";
case "FULLNAME":
return "Ace";
}
};
this.variables = {};
this.getVariableValue = function(editor, varName) {
if (this.variables.hasOwnProperty(varName))
return this.variables[varName](editor, varName) || "";
return this.$getDefaultValue(editor, varName) || "";
};
this.tmStrFormat = function(str, ch, editor) {
var flag = ch.flag || "";
var re = ch.guard;
re = new RegExp(re, flag.replace(/[^gi]/, ""));
var fmtTokens = this.tokenizeTmSnippet(ch.fmt, "formatString");
var _self = this;
var formatted = str.replace(re, function() {
_self.variables.__ = arguments;
var fmtParts = _self.resolveVariables(fmtTokens, editor);
var gChangeCase = "E";
for (var i = 0; i < fmtParts.length; i++) {
var ch = fmtParts[i];
if (typeof ch == "object") {
fmtParts[i] = "";
if (ch.changeCase && ch.local) {
var next = fmtParts[i + 1];
if (next && typeof next == "string") {
if (ch.changeCase == "u")
fmtParts[i] = next[0].toUpperCase();
else
fmtParts[i] = next[0].toLowerCase();
fmtParts[i + 1] = next.substr(1);
}
} else if (ch.changeCase) {
gChangeCase = ch.changeCase;
}
} else if (gChangeCase == "U") {
fmtParts[i] = ch.toUpperCase();
} else if (gChangeCase == "L") {
fmtParts[i] = ch.toLowerCase();
}
}
return fmtParts.join("");
});
this.variables.__ = null;
return formatted;
};
this.resolveVariables = function(snippet, editor) {
var result = [];
for (var i = 0; i < snippet.length; i++) {
var ch = snippet[i];
if (typeof ch == "string") {
result.push(ch);
} else if (typeof ch != "object") {
continue;
} else if (ch.skip) {
gotoNext(ch);
} else if (ch.processed < i) {
continue;
} else if (ch.text) {
var value = this.getVariableValue(editor, ch.text);
if (value && ch.fmtString)
value = this.tmStrFormat(value, ch);
ch.processed = i;
if (ch.expectIf == null) {
if (value) {
result.push(value);
gotoNext(ch);
}
} else {
if (value) {
ch.skip = ch.elseBranch;
} else
gotoNext(ch);
}
} else if (ch.tabstopId != null) {
result.push(ch);
} else if (ch.changeCase != null) {
result.push(ch);
}
}
function gotoNext(ch) {
var i1 = snippet.indexOf(ch, i + 1);
if (i1 != -1)
i = i1;
}
return result;
};
this.insertSnippetForSelection = function(editor, snippetText) {
var cursor = editor.getCursorPosition();
var line = editor.session.getLine(cursor.row);
var tabString = editor.session.getTabString();
var indentString = line.match(/^\s*/)[0];
if (cursor.column < indentString.length)
indentString = indentString.slice(0, cursor.column);
snippetText = snippetText.replace(/\r/g, "");
var tokens = this.tokenizeTmSnippet(snippetText);
tokens = this.resolveVariables(tokens, editor);
tokens = tokens.map(function(x) {
if (x == "\n")
return x + indentString;
if (typeof x == "string")
return x.replace(/\t/g, tabString);
return x;
});
var tabstops = [];
tokens.forEach(function(p, i) {
if (typeof p != "object")
return;
var id = p.tabstopId;
var ts = tabstops[id];
if (!ts) {
ts = tabstops[id] = [];
ts.index = id;
ts.value = "";
}
if (ts.indexOf(p) !== -1)
return;
ts.push(p);
var i1 = tokens.indexOf(p, i + 1);
if (i1 === -1)
return;
var value = tokens.slice(i + 1, i1);
var isNested = value.some(function(t) {return typeof t === "object"});
if (isNested && !ts.value) {
ts.value = value;
} else if (value.length && (!ts.value || typeof ts.value !== "string")) {
ts.value = value.join("");
}
});
tabstops.forEach(function(ts) {ts.length = 0});
var expanding = {};
function copyValue(val) {
var copy = [];
for (var i = 0; i < val.length; i++) {
var p = val[i];
if (typeof p == "object") {
if (expanding[p.tabstopId])
continue;
var j = val.lastIndexOf(p, i - 1);
p = copy[j] || {tabstopId: p.tabstopId};
}
copy[i] = p;
}
return copy;
}
for (var i = 0; i < tokens.length; i++) {
var p = tokens[i];
if (typeof p != "object")
continue;
var id = p.tabstopId;
var i1 = tokens.indexOf(p, i + 1);
if (expanding[id]) {
if (expanding[id] === p)
expanding[id] = null;
continue;
}
var ts = tabstops[id];
var arg = typeof ts.value == "string" ? [ts.value] : copyValue(ts.value);
arg.unshift(i + 1, Math.max(0, i1 - i));
arg.push(p);
expanding[id] = p;
tokens.splice.apply(tokens, arg);
if (ts.indexOf(p) === -1)
ts.push(p);
}
var row = 0, column = 0;
var text = "";
tokens.forEach(function(t) {
if (typeof t === "string") {
var lines = t.split("\n");
if (lines.length > 1){
column = lines[lines.length - 1].length;
row += lines.length - 1;
} else
column += t.length;
text += t;
} else {
if (!t.start)
t.start = {row: row, column: column};
else
t.end = {row: row, column: column};
}
});
var range = editor.getSelectionRange();
var end = editor.session.replace(range, text);
var tabstopManager = new TabstopManager(editor);
var selectionId = editor.inVirtualSelectionMode && editor.selection.index;
tabstopManager.addTabstops(tabstops, range.start, end, selectionId);
};
this.insertSnippet = function(editor, snippetText) {
var self = this;
if (editor.inVirtualSelectionMode)
return self.insertSnippetForSelection(editor, snippetText);
editor.forEachSelection(function() {
self.insertSnippetForSelection(editor, snippetText);
}, null, {keepOrder: true});
if (editor.tabstopManager)
editor.tabstopManager.tabNext();
};
this.$getScope = function(editor) {
var scope = editor.session.$mode.$id || "";
scope = scope.split("/").pop();
if (scope === "html" || scope === "php") {
if (scope === "php" && !editor.session.$mode.inlinePhp)
scope = "html";
var c = editor.getCursorPosition();
var state = editor.session.getState(c.row);
if (typeof state === "object") {
state = state[0];
}
if (state.substring) {
if (state.substring(0, 3) == "js-")
scope = "javascript";
else if (state.substring(0, 4) == "css-")
scope = "css";
else if (state.substring(0, 4) == "php-")
scope = "php";
}
}
return scope;
};
this.getActiveScopes = function(editor) {
var scope = this.$getScope(editor);
var scopes = [scope];
var snippetMap = this.snippetMap;
if (snippetMap[scope] && snippetMap[scope].includeScopes) {
scopes.push.apply(scopes, snippetMap[scope].includeScopes);
}
scopes.push("_");
return scopes;
};
this.expandWithTab = function(editor, options) {
var self = this;
var result = editor.forEachSelection(function() {
return self.expandSnippetForSelection(editor, options);
}, null, {keepOrder: true});
if (result && editor.tabstopManager)
editor.tabstopManager.tabNext();
return result;
};
this.expandSnippetForSelection = function(editor, options) {
var cursor = editor.getCursorPosition();
var line = editor.session.getLine(cursor.row);
var before = line.substring(0, cursor.column);
var after = line.substr(cursor.column);
var snippetMap = this.snippetMap;
var snippet;
this.getActiveScopes(editor).some(function(scope) {
var snippets = snippetMap[scope];
if (snippets)
snippet = this.findMatchingSnippet(snippets, before, after);
return !!snippet;
}, this);
if (!snippet)
return false;
if (options && options.dryRun)
return true;
editor.session.doc.removeInLine(cursor.row,
cursor.column - snippet.replaceBefore.length,
cursor.column + snippet.replaceAfter.length
);
this.variables.M__ = snippet.matchBefore;
this.variables.T__ = snippet.matchAfter;
this.insertSnippetForSelection(editor, snippet.content);
this.variables.M__ = this.variables.T__ = null;
return true;
};
this.findMatchingSnippet = function(snippetList, before, after) {
for (var i = snippetList.length; i--;) {
var s = snippetList[i];
if (s.startRe && !s.startRe.test(before))
continue;
if (s.endRe && !s.endRe.test(after))
continue;
if (!s.startRe && !s.endRe)
continue;
s.matchBefore = s.startRe ? s.startRe.exec(before) : [""];
s.matchAfter = s.endRe ? s.endRe.exec(after) : [""];
s.replaceBefore = s.triggerRe ? s.triggerRe.exec(before)[0] : "";
s.replaceAfter = s.endTriggerRe ? s.endTriggerRe.exec(after)[0] : "";
return s;
}
};
this.snippetMap = {};
this.snippetNameMap = {};
this.register = function(snippets, scope) {
var snippetMap = this.snippetMap;
var snippetNameMap = this.snippetNameMap;
var self = this;
if (!snippets)
snippets = [];
function wrapRegexp(src) {
if (src && !/^\^?\(.*\)\$?$|^\\b$/.test(src))
src = "(?:" + src + ")";
return src || "";
}
function guardedRegexp(re, guard, opening) {
re = wrapRegexp(re);
guard = wrapRegexp(guard);
if (opening) {
re = guard + re;
if (re && re[re.length - 1] != "$")
re = re + "$";
} else {
re = re + guard;
if (re && re[0] != "^")
re = "^" + re;
}
return new RegExp(re);
}
function addSnippet(s) {
if (!s.scope)
s.scope = scope || "_";
scope = s.scope;
if (!snippetMap[scope]) {
snippetMap[scope] = [];
snippetNameMap[scope] = {};
}
var map = snippetNameMap[scope];
if (s.name) {
var old = map[s.name];
if (old)
self.unregister(old);
map[s.name] = s;
}
snippetMap[scope].push(s);
if (s.tabTrigger && !s.trigger) {
if (!s.guard && /^\w/.test(s.tabTrigger))
s.guard = "\\b";
s.trigger = lang.escapeRegExp(s.tabTrigger);
}
if (!s.trigger && !s.guard && !s.endTrigger && !s.endGuard)
return;
s.startRe = guardedRegexp(s.trigger, s.guard, true);
s.triggerRe = new RegExp(s.trigger, "", true);
s.endRe = guardedRegexp(s.endTrigger, s.endGuard, true);
s.endTriggerRe = new RegExp(s.endTrigger, "", true);
}
if (snippets && snippets.content)
addSnippet(snippets);
else if (Array.isArray(snippets))
snippets.forEach(addSnippet);
this._signal("registerSnippets", {scope: scope});
};
this.unregister = function(snippets, scope) {
var snippetMap = this.snippetMap;
var snippetNameMap = this.snippetNameMap;
function removeSnippet(s) {
var nameMap = snippetNameMap[s.scope||scope];
if (nameMap && nameMap[s.name]) {
delete nameMap[s.name];
var map = snippetMap[s.scope||scope];
var i = map && map.indexOf(s);
if (i >= 0)
map.splice(i, 1);
}
}
if (snippets.content)
removeSnippet(snippets);
else if (Array.isArray(snippets))
snippets.forEach(removeSnippet);
};
this.parseSnippetFile = function(str) {
str = str.replace(/\r/g, "");
var list = [], snippet = {};
var re = /^#.*|^({[\s\S]*})\s*$|^(\S+) (.*)$|^((?:\n*\t.*)+)/gm;
var m;
while (m = re.exec(str)) {
if (m[1]) {
try {
snippet = JSON.parse(m[1]);
list.push(snippet);
} catch (e) {}
} if (m[4]) {
snippet.content = m[4].replace(/^\t/gm, "");
list.push(snippet);
snippet = {};
} else {
var key = m[2], val = m[3];
if (key == "regex") {
var guardRe = /\/((?:[^\/\\]|\\.)*)|$/g;
snippet.guard = guardRe.exec(val)[1];
snippet.trigger = guardRe.exec(val)[1];
snippet.endTrigger = guardRe.exec(val)[1];
snippet.endGuard = guardRe.exec(val)[1];
} else if (key == "snippet") {
snippet.tabTrigger = val.match(/^\S*/)[0];
if (!snippet.name)
snippet.name = val;
} else {
snippet[key] = val;
}
}
}
return list;
};
this.getSnippetByName = function(name, editor) {
var snippetMap = this.snippetNameMap;
var snippet;
this.getActiveScopes(editor).some(function(scope) {
var snippets = snippetMap[scope];
if (snippets)
snippet = snippets[name];
return !!snippet;
}, this);
return snippet;
};
}).call(SnippetManager.prototype);
var TabstopManager = function(editor) {
if (editor.tabstopManager)
return editor.tabstopManager;
editor.tabstopManager = this;
this.$onChange = this.onChange.bind(this);
this.$onChangeSelection = lang.delayedCall(this.onChangeSelection.bind(this)).schedule;
this.$onChangeSession = this.onChangeSession.bind(this);
this.$onAfterExec = this.onAfterExec.bind(this);
this.attach(editor);
};
(function() {
this.attach = function(editor) {
this.index = 0;
this.ranges = [];
this.tabstops = [];
this.$openTabstops = null;
this.selectedTabstop = null;
this.editor = editor;
this.editor.on("change", this.$onChange);
this.editor.on("changeSelection", this.$onChangeSelection);
this.editor.on("changeSession", this.$onChangeSession);
this.editor.commands.on("afterExec", this.$onAfterExec);
this.editor.keyBinding.addKeyboardHandler(this.keyboardHandler);
};
this.detach = function() {
this.tabstops.forEach(this.removeTabstopMarkers, this);
this.ranges = null;
this.tabstops = null;
this.selectedTabstop = null;
this.editor.removeListener("change", this.$onChange);
this.editor.removeListener("changeSelection", this.$onChangeSelection);
this.editor.removeListener("changeSession", this.$onChangeSession);
this.editor.commands.removeListener("afterExec", this.$onAfterExec);
this.editor.keyBinding.removeKeyboardHandler(this.keyboardHandler);
this.editor.tabstopManager = null;
this.editor = null;
};
this.onChange = function(delta) {
var changeRange = delta;
var isRemove = delta.action[0] == "r";
var start = delta.start;
var end = delta.end;
var startRow = start.row;
var endRow = end.row;
var lineDif = endRow - startRow;
var colDiff = end.column - start.column;
if (isRemove) {
lineDif = -lineDif;
colDiff = -colDiff;
}
if (!this.$inChange && isRemove) {
var ts = this.selectedTabstop;
var changedOutside = ts && !ts.some(function(r) {
return comparePoints(r.start, start) <= 0 && comparePoints(r.end, end) >= 0;
});
if (changedOutside)
return this.detach();
}
var ranges = this.ranges;
for (var i = 0; i < ranges.length; i++) {
var r = ranges[i];
if (r.end.row < start.row)
continue;
if (isRemove && comparePoints(start, r.start) < 0 && comparePoints(end, r.end) > 0) {
this.removeRange(r);
i--;
continue;
}
if (r.start.row == startRow && r.start.column > start.column)
r.start.column += colDiff;
if (r.end.row == startRow && r.end.column >= start.column)
r.end.column += colDiff;
if (r.start.row >= startRow)
r.start.row += lineDif;
if (r.end.row >= startRow)
r.end.row += lineDif;
if (comparePoints(r.start, r.end) > 0)
this.removeRange(r);
}
if (!ranges.length)
this.detach();
};
this.updateLinkedFields = function() {
var ts = this.selectedTabstop;
if (!ts || !ts.hasLinkedRanges)
return;
this.$inChange = true;
var session = this.editor.session;
var text = session.getTextRange(ts.firstNonLinked);
for (var i = ts.length; i--;) {
var range = ts[i];
if (!range.linked)
continue;
var fmt = exports.snippetManager.tmStrFormat(text, range.original);
session.replace(range, fmt);
}
this.$inChange = false;
};
this.onAfterExec = function(e) {
if (e.command && !e.command.readOnly)
this.updateLinkedFields();
};
this.onChangeSelection = function() {
if (!this.editor)
return;
var lead = this.editor.selection.lead;
var anchor = this.editor.selection.anchor;
var isEmpty = this.editor.selection.isEmpty();
for (var i = this.ranges.length; i--;) {
if (this.ranges[i].linked)
continue;
var containsLead = this.ranges[i].contains(lead.row, lead.column);
var containsAnchor = isEmpty || this.ranges[i].contains(anchor.row, anchor.column);
if (containsLead && containsAnchor)
return;
}
this.detach();
};
this.onChangeSession = function() {
this.detach();
};
this.tabNext = function(dir) {
var max = this.tabstops.length;
var index = this.index + (dir || 1);
index = Math.min(Math.max(index, 1), max);
if (index == max)
index = 0;
this.selectTabstop(index);
if (index === 0)
this.detach();
};
this.selectTabstop = function(index) {
this.$openTabstops = null;
var ts = this.tabstops[this.index];
if (ts)
this.addTabstopMarkers(ts);
this.index = index;
ts = this.tabstops[this.index];
if (!ts || !ts.length)
return;
this.selectedTabstop = ts;
if (!this.editor.inVirtualSelectionMode) {
var sel = this.editor.multiSelect;
sel.toSingleRange(ts.firstNonLinked.clone());
for (var i = ts.length; i--;) {
if (ts.hasLinkedRanges && ts[i].linked)
continue;
sel.addRange(ts[i].clone(), true);
}
if (sel.ranges[0])
sel.addRange(sel.ranges[0].clone());
} else {
this.editor.selection.setRange(ts.firstNonLinked);
}
this.editor.keyBinding.addKeyboardHandler(this.keyboardHandler);
};
this.addTabstops = function(tabstops, start, end) {
if (!this.$openTabstops)
this.$openTabstops = [];
if (!tabstops[0]) {
var p = Range.fromPoints(end, end);
moveRelative(p.start, start);
moveRelative(p.end, start);
tabstops[0] = [p];
tabstops[0].index = 0;
}
var i = this.index;
var arg = [i + 1, 0];
var ranges = this.ranges;
tabstops.forEach(function(ts, index) {
var dest = this.$openTabstops[index] || ts;
for (var i = ts.length; i--;) {
var p = ts[i];
var range = Range.fromPoints(p.start, p.end || p.start);
movePoint(range.start, start);
movePoint(range.end, start);
range.original = p;
range.tabstop = dest;
ranges.push(range);
if (dest != ts)
dest.unshift(range);
else
dest[i] = range;
if (p.fmtString) {
range.linked = true;
dest.hasLinkedRanges = true;
} else if (!dest.firstNonLinked)
dest.firstNonLinked = range;
}
if (!dest.firstNonLinked)
dest.hasLinkedRanges = false;
if (dest === ts) {
arg.push(dest);
this.$openTabstops[index] = dest;
}
this.addTabstopMarkers(dest);
}, this);
if (arg.length > 2) {
if (this.tabstops.length)
arg.push(arg.splice(2, 1)[0]);
this.tabstops.splice.apply(this.tabstops, arg);
}
};
this.addTabstopMarkers = function(ts) {
var session = this.editor.session;
ts.forEach(function(range) {
if (!range.markerId)
range.markerId = session.addMarker(range, "ace_snippet-marker", "text");
});
};
this.removeTabstopMarkers = function(ts) {
var session = this.editor.session;
ts.forEach(function(range) {
session.removeMarker(range.markerId);
range.markerId = null;
});
};
this.removeRange = function(range) {
var i = range.tabstop.indexOf(range);
range.tabstop.splice(i, 1);
i = this.ranges.indexOf(range);
this.ranges.splice(i, 1);
this.editor.session.removeMarker(range.markerId);
if (!range.tabstop.length) {
i = this.tabstops.indexOf(range.tabstop);
if (i != -1)
this.tabstops.splice(i, 1);
if (!this.tabstops.length)
this.detach();
}
};
this.keyboardHandler = new HashHandler();
this.keyboardHandler.bindKeys({
"Tab": function(ed) {
if (exports.snippetManager && exports.snippetManager.expandWithTab(ed)) {
return;
}
ed.tabstopManager.tabNext(1);
},
"Shift-Tab": function(ed) {
ed.tabstopManager.tabNext(-1);
},
"Esc": function(ed) {
ed.tabstopManager.detach();
},
"Return": function(ed) {
return false;
}
});
}).call(TabstopManager.prototype);
var changeTracker = {};
changeTracker.onChange = Anchor.prototype.onChange;
changeTracker.setPosition = function(row, column) {
this.pos.row = row;
this.pos.column = column;
};
changeTracker.update = function(pos, delta, $insertRight) {
this.$insertRight = $insertRight;
this.pos = pos;
this.onChange(delta);
};
var movePoint = function(point, diff) {
if (point.row == 0)
point.column += diff.column;
point.row += diff.row;
};
var moveRelative = function(point, start) {
if (point.row == start.row)
point.column -= start.column;
point.row -= start.row;
};
require("./lib/dom").importCssString("\
.ace_snippet-marker {\
-moz-box-sizing: border-box;\
box-sizing: border-box;\
background: rgba(194, 193, 208, 0.09);\
border: 1px dotted rgba(211, 208, 235, 0.62);\
position: absolute;\
}");
exports.snippetManager = new SnippetManager();
var Editor = require("./editor").Editor;
(function() {
this.insertSnippet = function(content, options) {
return exports.snippetManager.insertSnippet(this, content, options);
};
this.expandSnippet = function(options) {
return exports.snippetManager.expandWithTab(this, options);
};
}).call(Editor.prototype);
});
define("ace/autocomplete/popup",["require","exports","module","ace/virtual_renderer","ace/editor","ace/range","ace/lib/event","ace/lib/lang","ace/lib/dom"], function(require, exports, module) {
"use strict";
var Renderer = require("../virtual_renderer").VirtualRenderer;
var Editor = require("../editor").Editor;
var Range = require("../range").Range;
var event = require("../lib/event");
var lang = require("../lib/lang");
var dom = require("../lib/dom");
var $singleLineEditor = function(el) {
var renderer = new Renderer(el);
renderer.$maxLines = 4;
var editor = new Editor(renderer);
editor.setHighlightActiveLine(false);
editor.setShowPrintMargin(false);
editor.renderer.setShowGutter(false);
editor.renderer.setHighlightGutterLine(false);
editor.$mouseHandler.$focusWaitTimout = 0;
editor.$highlightTagPending = true;
return editor;
};
var AcePopup = function(parentNode) {
var el = dom.createElement("div");
var popup = new $singleLineEditor(el);
if (parentNode)
parentNode.appendChild(el);
el.style.display = "none";
popup.renderer.content.style.cursor = "default";
popup.renderer.setStyle("ace_autocomplete");
popup.setOption("displayIndentGuides", false);
popup.setOption("dragDelay", 150);
var noop = function(){};
popup.focus = noop;
popup.$isFocused = true;
popup.renderer.$cursorLayer.restartTimer = noop;
popup.renderer.$cursorLayer.element.style.opacity = 0;
popup.renderer.$maxLines = 8;
popup.renderer.$keepTextAreaAtCursor = false;
popup.setHighlightActiveLine(false);
popup.session.highlight("");
popup.session.$searchHighlight.clazz = "ace_highlight-marker";
popup.on("mousedown", function(e) {
var pos = e.getDocumentPosition();
popup.selection.moveToPosition(pos);
selectionMarker.start.row = selectionMarker.end.row = pos.row;
e.stop();
});
var lastMouseEvent;
var hoverMarker = new Range(-1,0,-1,Infinity);
var selectionMarker = new Range(-1,0,-1,Infinity);
selectionMarker.id = popup.session.addMarker(selectionMarker, "ace_active-line", "fullLine");
popup.setSelectOnHover = function(val) {
if (!val) {
hoverMarker.id = popup.session.addMarker(hoverMarker, "ace_line-hover", "fullLine");
} else if (hoverMarker.id) {
popup.session.removeMarker(hoverMarker.id);
hoverMarker.id = null;
}
};
popup.setSelectOnHover(false);
popup.on("mousemove", function(e) {
if (!lastMouseEvent) {
lastMouseEvent = e;
return;
}
if (lastMouseEvent.x == e.x && lastMouseEvent.y == e.y) {
return;
}
lastMouseEvent = e;
lastMouseEvent.scrollTop = popup.renderer.scrollTop;
var row = lastMouseEvent.getDocumentPosition().row;
if (hoverMarker.start.row != row) {
if (!hoverMarker.id)
popup.setRow(row);
setHoverMarker(row);
}
});
popup.renderer.on("beforeRender", function() {
if (lastMouseEvent && hoverMarker.start.row != -1) {
lastMouseEvent.$pos = null;
var row = lastMouseEvent.getDocumentPosition().row;
if (!hoverMarker.id)
popup.setRow(row);
setHoverMarker(row, true);
}
});
popup.renderer.on("afterRender", function() {
var row = popup.getRow();
var t = popup.renderer.$textLayer;
var selected = t.element.childNodes[row - t.config.firstRow];
if (selected == t.selectedNode)
return;
if (t.selectedNode)
dom.removeCssClass(t.selectedNode, "ace_selected");
t.selectedNode = selected;
if (selected)
dom.addCssClass(selected, "ace_selected");
});
var hideHoverMarker = function() { setHoverMarker(-1) };
var setHoverMarker = function(row, suppressRedraw) {
if (row !== hoverMarker.start.row) {
hoverMarker.start.row = hoverMarker.end.row = row;
if (!suppressRedraw)
popup.session._emit("changeBackMarker");
popup._emit("changeHoverMarker");
}
};
popup.getHoveredRow = function() {
return hoverMarker.start.row;
};
event.addListener(popup.container, "mouseout", hideHoverMarker);
popup.on("hide", hideHoverMarker);
popup.on("changeSelection", hideHoverMarker);
popup.session.doc.getLength = function() {
return popup.data.length;
};
popup.session.doc.getLine = function(i) {
var data = popup.data[i];
if (typeof data == "string")
return data;
return (data && data.value) || "";
};
var bgTokenizer = popup.session.bgTokenizer;
bgTokenizer.$tokenizeRow = function(row) {
var data = popup.data[row];
var tokens = [];
if (!data)
return tokens;
if (typeof data == "string")
data = {value: data};
if (!data.caption)
data.caption = data.value || data.name;
var last = -1;
var flag, c;
for (var i = 0; i < data.caption.length; i++) {
c = data.caption[i];
flag = data.matchMask & (1 << i) ? 1 : 0;
if (last !== flag) {
tokens.push({type: data.className || "" + ( flag ? "completion-highlight" : ""), value: c});
last = flag;
} else {
tokens[tokens.length - 1].value += c;
}
}
if (data.meta) {
var maxW = popup.renderer.$size.scrollerWidth / popup.renderer.layerConfig.characterWidth;
var metaData = data.meta;
if (metaData.length + data.caption.length > maxW - 2) {
metaData = metaData.substr(0, maxW - data.caption.length - 3) + "\u2026"
}
tokens.push({type: "rightAlignedText", value: metaData});
}
return tokens;
};
bgTokenizer.$updateOnChange = noop;
bgTokenizer.start = noop;
popup.session.$computeWidth = function() {
return this.screenWidth = 0;
};
popup.$blockScrolling = Infinity;
popup.isOpen = false;
popup.isTopdown = false;
popup.data = [];
popup.setData = function(list) {
popup.setValue(lang.stringRepeat("\n", list.length), -1);
popup.data = list || [];
popup.setRow(0);
};
popup.getData = function(row) {
return popup.data[row];
};
popup.getRow = function() {
return selectionMarker.start.row;
};
popup.setRow = function(line) {
line = Math.max(0, Math.min(this.data.length, line));
if (selectionMarker.start.row != line) {
popup.selection.clearSelection();
selectionMarker.start.row = selectionMarker.end.row = line || 0;
popup.session._emit("changeBackMarker");
popup.moveCursorTo(line || 0, 0);
if (popup.isOpen)
popup._signal("select");
}
};
popup.on("changeSelection", function() {
if (popup.isOpen)
popup.setRow(popup.selection.lead.row);
popup.renderer.scrollCursorIntoView();
});
popup.hide = function() {
this.container.style.display = "none";
this._signal("hide");
popup.isOpen = false;
};
popup.show = function(pos, lineHeight, topdownOnly) {
var el = this.container;
var screenHeight = window.innerHeight;
var screenWidth = window.innerWidth;
var renderer = this.renderer;
var maxH = renderer.$maxLines * lineHeight * 1.4;
var top = pos.top + this.$borderSize;
var allowTopdown = top > screenHeight / 2 && !topdownOnly;
if (allowTopdown && top + lineHeight + maxH > screenHeight) {
renderer.$maxPixelHeight = top - 2 * this.$borderSize;
el.style.top = "";
el.style.bottom = screenHeight - top + "px";
popup.isTopdown = false;
} else {
top += lineHeight;
renderer.$maxPixelHeight = screenHeight - top - 0.2 * lineHeight;
el.style.top = top + "px";
el.style.bottom = "";
popup.isTopdown = true;
}
el.style.display = "";
this.renderer.$textLayer.checkForSizeChanges();
var left = pos.left;
if (left + el.offsetWidth > screenWidth)
left = screenWidth - el.offsetWidth;
el.style.left = left + "px";
this._signal("show");
lastMouseEvent = null;
popup.isOpen = true;
};
popup.getTextLeftOffset = function() {
return this.$borderSize + this.renderer.$padding + this.$imageSize;
};
popup.$imageSize = 0;
popup.$borderSize = 1;
return popup;
};
dom.importCssString("\
.ace_editor.ace_autocomplete .ace_marker-layer .ace_active-line {\
background-color: #CAD6FA;\
z-index: 1;\
}\
.ace_editor.ace_autocomplete .ace_line-hover {\
border: 1px solid #abbffe;\
margin-top: -1px;\
background: rgba(233,233,253,0.4);\
}\
.ace_editor.ace_autocomplete .ace_line-hover {\
position: absolute;\
z-index: 2;\
}\
.ace_editor.ace_autocomplete .ace_scroller {\
background: none;\
border: none;\
box-shadow: none;\
}\
.ace_rightAlignedText {\
color: gray;\
display: inline-block;\
position: absolute;\
right: 4px;\
text-align: right;\
z-index: -1;\
}\
.ace_editor.ace_autocomplete .ace_completion-highlight{\
color: #000;\
text-shadow: 0 0 0.01em;\
}\
.ace_editor.ace_autocomplete {\
width: 280px;\
z-index: 200000;\
background: #fbfbfb;\
color: #444;\
border: 1px lightgray solid;\
position: fixed;\
box-shadow: 2px 3px 5px rgba(0,0,0,.2);\
line-height: 1.4;\
}");
exports.AcePopup = AcePopup;
});
define("ace/autocomplete/util",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.parForEach = function(array, fn, callback) {
var completed = 0;
var arLength = array.length;
if (arLength === 0)
callback();
for (var i = 0; i < arLength; i++) {
fn(array[i], function(result, err) {
completed++;
if (completed === arLength)
callback(result, err);
});
}
};
var ID_REGEX = /[a-zA-Z_0-9\$\-\u00A2-\uFFFF]/;
exports.retrievePrecedingIdentifier = function(text, pos, regex) {
regex = regex || ID_REGEX;
var buf = [];
for (var i = pos-1; i >= 0; i--) {
if (regex.test(text[i]))
buf.push(text[i]);
else
break;
}
return buf.reverse().join("");
};
exports.retrieveFollowingIdentifier = function(text, pos, regex) {
regex = regex || ID_REGEX;
var buf = [];
for (var i = pos; i < text.length; i++) {
if (regex.test(text[i]))
buf.push(text[i]);
else
break;
}
return buf;
};
exports.getCompletionPrefix = function (editor) {
var pos = editor.getCursorPosition();
var line = editor.session.getLine(pos.row);
var prefix;
editor.completers.forEach(function(completer) {
if (completer.identifierRegexps) {
completer.identifierRegexps.forEach(function(identifierRegex) {
if (!prefix && identifierRegex)
prefix = this.retrievePrecedingIdentifier(line, pos.column, identifierRegex);
}.bind(this));
}
}.bind(this));
return prefix || this.retrievePrecedingIdentifier(line, pos.column);
};
});
define("ace/autocomplete",["require","exports","module","ace/keyboard/hash_handler","ace/autocomplete/popup","ace/autocomplete/util","ace/lib/event","ace/lib/lang","ace/lib/dom","ace/snippets"], function(require, exports, module) {
"use strict";
var HashHandler = require("./keyboard/hash_handler").HashHandler;
var AcePopup = require("./autocomplete/popup").AcePopup;
var util = require("./autocomplete/util");
var event = require("./lib/event");
var lang = require("./lib/lang");
var dom = require("./lib/dom");
var snippetManager = require("./snippets").snippetManager;
var Autocomplete = function() {
this.autoInsert = false;
this.autoSelect = true;
this.exactMatch = false;
this.gatherCompletionsId = 0;
this.keyboardHandler = new HashHandler();
this.keyboardHandler.bindKeys(this.commands);
this.blurListener = this.blurListener.bind(this);
this.changeListener = this.changeListener.bind(this);
this.mousedownListener = this.mousedownListener.bind(this);
this.mousewheelListener = this.mousewheelListener.bind(this);
this.changeTimer = lang.delayedCall(function() {
this.updateCompletions(true);
}.bind(this));
this.tooltipTimer = lang.delayedCall(this.updateDocTooltip.bind(this), 50);
};
(function() {
this.$init = function() {
this.popup = new AcePopup(document.body || document.documentElement);
this.popup.on("click", function(e) {
this.insertMatch();
e.stop();
}.bind(this));
this.popup.focus = this.editor.focus.bind(this.editor);
this.popup.on("show", this.tooltipTimer.bind(null, null));
this.popup.on("select", this.tooltipTimer.bind(null, null));
this.popup.on("changeHoverMarker", this.tooltipTimer.bind(null, null));
return this.popup;
};
this.getPopup = function() {
return this.popup || this.$init();
};
this.openPopup = function(editor, prefix, keepPopupPosition) {
if (!this.popup)
this.$init();
this.popup.setData(this.completions.filtered);
editor.keyBinding.addKeyboardHandler(this.keyboardHandler);
var renderer = editor.renderer;
this.popup.setRow(this.autoSelect ? 0 : -1);
if (!keepPopupPosition) {
this.popup.setTheme(editor.getTheme());
this.popup.setFontSize(editor.getFontSize());
var lineHeight = renderer.layerConfig.lineHeight;
var pos = renderer.$cursorLayer.getPixelPosition(this.base, true);
pos.left -= this.popup.getTextLeftOffset();
var rect = editor.container.getBoundingClientRect();
pos.top += rect.top - renderer.layerConfig.offset;
pos.left += rect.left - editor.renderer.scrollLeft;
pos.left += renderer.gutterWidth;
this.popup.show(pos, lineHeight);
} else if (keepPopupPosition && !prefix) {
this.detach();
}
};
this.detach = function() {
this.editor.keyBinding.removeKeyboardHandler(this.keyboardHandler);
this.editor.off("changeSelection", this.changeListener);
this.editor.off("blur", this.blurListener);
this.editor.off("mousedown", this.mousedownListener);
this.editor.off("mousewheel", this.mousewheelListener);
this.changeTimer.cancel();
this.hideDocTooltip();
this.gatherCompletionsId += 1;
if (this.popup && this.popup.isOpen)
this.popup.hide();
if (this.base)
this.base.detach();
this.activated = false;
this.completions = this.base = null;
};
this.changeListener = function(e) {
var cursor = this.editor.selection.lead;
if (cursor.row != this.base.row || cursor.column < this.base.column) {
this.detach();
}
if (this.activated)
this.changeTimer.schedule();
else
this.detach();
};
this.blurListener = function(e) {
var el = document.activeElement;
var text = this.editor.textInput.getElement();
var fromTooltip = e.relatedTarget && e.relatedTarget == this.tooltipNode;
var container = this.popup && this.popup.container;
if (el != text && el.parentNode != container && !fromTooltip
&& el != this.tooltipNode && e.relatedTarget != text
) {
this.detach();
}
};
this.mousedownListener = function(e) {
this.detach();
};
this.mousewheelListener = function(e) {
this.detach();
};
this.goTo = function(where) {
var row = this.popup.getRow();
var max = this.popup.session.getLength() - 1;
switch(where) {
case "up": row = row <= 0 ? max : row - 1; break;
case "down": row = row >= max ? -1 : row + 1; break;
case "start": row = 0; break;
case "end": row = max; break;
}
this.popup.setRow(row);
};
this.insertMatch = function(data, options) {
if (!data)
data = this.popup.getData(this.popup.getRow());
if (!data)
return false;
if (data.completer && data.completer.insertMatch) {
data.completer.insertMatch(this.editor, data);
} else {
if (this.completions.filterText) {
var ranges = this.editor.selection.getAllRanges();
for (var i = 0, range; range = ranges[i]; i++) {
range.start.column -= this.completions.filterText.length;
this.editor.session.remove(range);
}
}
if (data.snippet)
snippetManager.insertSnippet(this.editor, data.snippet);
else
this.editor.execCommand("insertstring", data.value || data);
}
this.detach();
};
this.commands = {
"Up": function(editor) { editor.completer.goTo("up"); },
"Down": function(editor) { editor.completer.goTo("down"); },
"Ctrl-Up|Ctrl-Home": function(editor) { editor.completer.goTo("start"); },
"Ctrl-Down|Ctrl-End": function(editor) { editor.completer.goTo("end"); },
"Esc": function(editor) { editor.completer.detach(); },
"Return": function(editor) { return editor.completer.insertMatch(); },
"Shift-Return": function(editor) { editor.completer.insertMatch(null, {deleteSuffix: true}); },
"Tab": function(editor) {
var result = editor.completer.insertMatch();
if (!result && !editor.tabstopManager)
editor.completer.goTo("down");
else
return result;
},
"PageUp": function(editor) { editor.completer.popup.gotoPageUp(); },
"PageDown": function(editor) { editor.completer.popup.gotoPageDown(); }
};
this.gatherCompletions = function(editor, callback) {
var session = editor.getSession();
var pos = editor.getCursorPosition();
var line = session.getLine(pos.row);
var prefix = util.getCompletionPrefix(editor);
this.base = session.doc.createAnchor(pos.row, pos.column - prefix.length);
this.base.$insertRight = true;
var matches = [];
var total = editor.completers.length;
editor.completers.forEach(function(completer, i) {
completer.getCompletions(editor, session, pos, prefix, function(err, results) {
if (!err && results)
matches = matches.concat(results);
var pos = editor.getCursorPosition();
var line = session.getLine(pos.row);
callback(null, {
prefix: prefix,
matches: matches,
finished: (--total === 0)
});
});
});
return true;
};
this.showPopup = function(editor) {
if (this.editor)
this.detach();
this.activated = true;
this.editor = editor;
if (editor.completer != this) {
if (editor.completer)
editor.completer.detach();
editor.completer = this;
}
editor.on("changeSelection", this.changeListener);
editor.on("blur", this.blurListener);
editor.on("mousedown", this.mousedownListener);
editor.on("mousewheel", this.mousewheelListener);
this.updateCompletions();
};
this.updateCompletions = function(keepPopupPosition) {
if (keepPopupPosition && this.base && this.completions) {
var pos = this.editor.getCursorPosition();
var prefix = this.editor.session.getTextRange({start: this.base, end: pos});
if (prefix == this.completions.filterText)
return;
this.completions.setFilter(prefix);
if (!this.completions.filtered.length)
return this.detach();
if (this.completions.filtered.length == 1
&& this.completions.filtered[0].value == prefix
&& !this.completions.filtered[0].snippet)
return this.detach();
this.openPopup(this.editor, prefix, keepPopupPosition);
return;
}
var _id = this.gatherCompletionsId;
this.gatherCompletions(this.editor, function(err, results) {
var detachIfFinished = function() {
if (!results.finished) return;
return this.detach();
}.bind(this);
var prefix = results.prefix;
var matches = results && results.matches;
if (!matches || !matches.length)
return detachIfFinished();
if (prefix.indexOf(results.prefix) !== 0 || _id != this.gatherCompletionsId)
return;
this.completions = new FilteredList(matches);
if (this.exactMatch)
this.completions.exactMatch = true;
this.completions.setFilter(prefix);
var filtered = this.completions.filtered;
if (!filtered.length)
return detachIfFinished();
if (filtered.length == 1 && filtered[0].value == prefix && !filtered[0].snippet)
return detachIfFinished();
if (this.autoInsert && filtered.length == 1 && results.finished)
return this.insertMatch(filtered[0]);
this.openPopup(this.editor, prefix, keepPopupPosition);
}.bind(this));
};
this.cancelContextMenu = function() {
this.editor.$mouseHandler.cancelContextMenu();
};
this.updateDocTooltip = function() {
var popup = this.popup;
var all = popup.data;
var selected = all && (all[popup.getHoveredRow()] || all[popup.getRow()]);
var doc = null;
if (!selected || !this.editor || !this.popup.isOpen)
return this.hideDocTooltip();
this.editor.completers.some(function(completer) {
if (completer.getDocTooltip)
doc = completer.getDocTooltip(selected);
return doc;
});
if (!doc)
doc = selected;
if (typeof doc == "string")
doc = {docText: doc};
if (!doc || !(doc.docHTML || doc.docText))
return this.hideDocTooltip();
this.showDocTooltip(doc);
};
this.showDocTooltip = function(item) {
if (!this.tooltipNode) {
this.tooltipNode = dom.createElement("div");
this.tooltipNode.className = "ace_tooltip ace_doc-tooltip";
this.tooltipNode.style.margin = 0;
this.tooltipNode.style.pointerEvents = "auto";
this.tooltipNode.tabIndex = -1;
this.tooltipNode.onblur = this.blurListener.bind(this);
}
var tooltipNode = this.tooltipNode;
if (item.docHTML) {
tooltipNode.innerHTML = item.docHTML;
} else if (item.docText) {
tooltipNode.textContent = item.docText;
}
if (!tooltipNode.parentNode)
document.body.appendChild(tooltipNode);
var popup = this.popup;
var rect = popup.container.getBoundingClientRect();
tooltipNode.style.top = popup.container.style.top;
tooltipNode.style.bottom = popup.container.style.bottom;
if (window.innerWidth - rect.right < 320) {
tooltipNode.style.right = window.innerWidth - rect.left + "px";
tooltipNode.style.left = "";
} else {
tooltipNode.style.left = (rect.right + 1) + "px";
tooltipNode.style.right = "";
}
tooltipNode.style.display = "block";
};
this.hideDocTooltip = function() {
this.tooltipTimer.cancel();
if (!this.tooltipNode) return;
var el = this.tooltipNode;
if (!this.editor.isFocused() && document.activeElement == el)
this.editor.focus();
this.tooltipNode = null;
if (el.parentNode)
el.parentNode.removeChild(el);
};
}).call(Autocomplete.prototype);
Autocomplete.startCommand = {
name: "startAutocomplete",
exec: function(editor) {
if (!editor.completer)
editor.completer = new Autocomplete();
editor.completer.autoInsert = false;
editor.completer.autoSelect = true;
editor.completer.showPopup(editor);
editor.completer.cancelContextMenu();
},
bindKey: "Ctrl-Space|Ctrl-Shift-Space|Alt-Space"
};
var FilteredList = function(array, filterText) {
this.all = array;
this.filtered = array;
this.filterText = filterText || "";
this.exactMatch = false;
};
(function(){
this.setFilter = function(str) {
if (str.length > this.filterText && str.lastIndexOf(this.filterText, 0) === 0)
var matches = this.filtered;
else
var matches = this.all;
this.filterText = str;
matches = this.filterCompletions(matches, this.filterText);
matches = matches.sort(function(a, b) {
return b.exactMatch - a.exactMatch || b.score - a.score;
});
var prev = null;
matches = matches.filter(function(item){
var caption = item.snippet || item.caption || item.value;
if (caption === prev) return false;
prev = caption;
return true;
});
this.filtered = matches;
};
this.filterCompletions = function(items, needle) {
var results = [];
var upper = needle.toUpperCase();
var lower = needle.toLowerCase();
loop: for (var i = 0, item; item = items[i]; i++) {
var caption = item.value || item.caption || item.snippet;
if (!caption) continue;
var lastIndex = -1;
var matchMask = 0;
var penalty = 0;
var index, distance;
if (this.exactMatch) {
if (needle !== caption.substr(0, needle.length))
continue loop;
}else{
for (var j = 0; j < needle.length; j++) {
var i1 = caption.indexOf(lower[j], lastIndex + 1);
var i2 = caption.indexOf(upper[j], lastIndex + 1);
index = (i1 >= 0) ? ((i2 < 0 || i1 < i2) ? i1 : i2) : i2;
if (index < 0)
continue loop;
distance = index - lastIndex - 1;
if (distance > 0) {
if (lastIndex === -1)
penalty += 10;
penalty += distance;
}
matchMask = matchMask | (1 << index);
lastIndex = index;
}
}
item.matchMask = matchMask;
item.exactMatch = penalty ? 0 : 1;
item.score = (item.score || 0) - penalty;
results.push(item);
}
return results;
};
}).call(FilteredList.prototype);
exports.Autocomplete = Autocomplete;
exports.FilteredList = FilteredList;
});
define("ace/autocomplete/text_completer",["require","exports","module","ace/range"], function(require, exports, module) {
var Range = require("../range").Range;
var splitRegex = /[^a-zA-Z_0-9\$\-\u00C0-\u1FFF\u2C00-\uD7FF\w]+/;
function getWordIndex(doc, pos) {
var textBefore = doc.getTextRange(Range.fromPoints({row: 0, column:0}, pos));
return textBefore.split(splitRegex).length - 1;
}
function wordDistance(doc, pos) {
var prefixPos = getWordIndex(doc, pos);
var words = doc.getValue().split(splitRegex);
var wordScores = Object.create(null);
var currentWord = words[prefixPos];
words.forEach(function(word, idx) {
if (!word || word === currentWord) return;
var distance = Math.abs(prefixPos - idx);
var score = words.length - distance;
if (wordScores[word]) {
wordScores[word] = Math.max(score, wordScores[word]);
} else {
wordScores[word] = score;
}
});
return wordScores;
}
exports.getCompletions = function(editor, session, pos, prefix, callback) {
var wordScore = wordDistance(session, pos, prefix);
var wordList = Object.keys(wordScore);
callback(null, wordList.map(function(word) {
return {
caption: word,
value: word,
score: wordScore[word],
meta: "local"
};
}));
};
});
define("ace/ext/language_tools",["require","exports","module","ace/snippets","ace/autocomplete","ace/config","ace/lib/lang","ace/autocomplete/util","ace/autocomplete/text_completer","ace/editor","ace/config"], function(require, exports, module) {
"use strict";
var snippetManager = require("../snippets").snippetManager;
var Autocomplete = require("../autocomplete").Autocomplete;
var config = require("../config");
var lang = require("../lib/lang");
var util = require("../autocomplete/util");
var textCompleter = require("../autocomplete/text_completer");
var keyWordCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
if (session.$mode.completer) {
return session.$mode.completer.getCompletions(editor, session, pos, prefix, callback);
}
var state = editor.session.getState(pos.row);
var completions = session.$mode.getCompletions(state, session, pos, prefix);
callback(null, completions);
}
};
var snippetCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var snippetMap = snippetManager.snippetMap;
var completions = [];
snippetManager.getActiveScopes(editor).forEach(function(scope) {
var snippets = snippetMap[scope] || [];
for (var i = snippets.length; i--;) {
var s = snippets[i];
var caption = s.name || s.tabTrigger;
if (!caption)
continue;
completions.push({
caption: caption,
snippet: s.content,
meta: s.tabTrigger && !s.name ? s.tabTrigger + "\u21E5 " : "snippet",
type: "snippet"
});
}
}, this);
callback(null, completions);
},
getDocTooltip: function(item) {
if (item.type == "snippet" && !item.docHTML) {
item.docHTML = [
"<b>", lang.escapeHTML(item.caption), "</b>", "<hr></hr>",
lang.escapeHTML(item.snippet)
].join("");
}
}
};
var completers = [snippetCompleter, textCompleter, keyWordCompleter];
exports.setCompleters = function(val) {
completers.length = 0;
if (val) completers.push.apply(completers, val);
};
exports.addCompleter = function(completer) {
completers.push(completer);
};
exports.textCompleter = textCompleter;
exports.keyWordCompleter = keyWordCompleter;
exports.snippetCompleter = snippetCompleter;
var expandSnippet = {
name: "expandSnippet",
exec: function(editor) {
return snippetManager.expandWithTab(editor);
},
bindKey: "Tab"
};
var onChangeMode = function(e, editor) {
loadSnippetsForMode(editor.session.$mode);
};
var loadSnippetsForMode = function(mode) {
var id = mode.$id;
if (!snippetManager.files)
snippetManager.files = {};
loadSnippetFile(id);
if (mode.modes)
mode.modes.forEach(loadSnippetsForMode);
};
var loadSnippetFile = function(id) {
if (!id || snippetManager.files[id])
return;
var snippetFilePath = id.replace("mode", "snippets");
snippetManager.files[id] = {};
config.loadModule(snippetFilePath, function(m) {
if (m) {
snippetManager.files[id] = m;
if (!m.snippets && m.snippetText)
m.snippets = snippetManager.parseSnippetFile(m.snippetText);
snippetManager.register(m.snippets || [], m.scope);
if (m.includeScopes) {
snippetManager.snippetMap[m.scope].includeScopes = m.includeScopes;
m.includeScopes.forEach(function(x) {
loadSnippetFile("ace/mode/" + x);
});
}
}
});
};
var doLiveAutocomplete = function(e) {
var editor = e.editor;
var hasCompleter = editor.completer && editor.completer.activated;
if (e.command.name === "backspace") {
if (hasCompleter && !util.getCompletionPrefix(editor))
editor.completer.detach();
}
else if (e.command.name === "insertstring") {
var prefix = util.getCompletionPrefix(editor);
if (prefix && !hasCompleter) {
if (!editor.completer) {
editor.completer = new Autocomplete();
}
editor.completer.autoInsert = false;
editor.completer.showPopup(editor);
}
}
};
var Editor = require("../editor").Editor;
require("../config").defineOptions(Editor.prototype, "editor", {
enableBasicAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
this.commands.addCommand(Autocomplete.startCommand);
} else {
this.commands.removeCommand(Autocomplete.startCommand);
}
},
value: false
},
enableLiveAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
this.commands.on('afterExec', doLiveAutocomplete);
} else {
this.commands.removeListener('afterExec', doLiveAutocomplete);
}
},
value: false
},
enableSnippets: {
set: function(val) {
if (val) {
this.commands.addCommand(expandSnippet);
this.on("changeMode", onChangeMode);
onChangeMode(null, this);
} else {
this.commands.removeCommand(expandSnippet);
this.off("changeMode", onChangeMode);
}
},
value: false
}
});
});
(function() {
window.require(["ace/ext/language_tools"], function() {});
})(); | yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/samples/ace/ext-language_tools.js | ext-language_tools.js |
define("ace/theme/monokai",["require","exports","module","ace/lib/dom"], function(require, exports, module) {
exports.isDark = true;
exports.cssClass = "ace-monokai";
exports.cssText = ".ace-monokai .ace_gutter {\
background: #2F3129;\
color: #8F908A\
}\
.ace-monokai .ace_print-margin {\
width: 1px;\
background: #555651\
}\
.ace-monokai {\
background-color: #272822;\
color: #F8F8F2\
}\
.ace-monokai .ace_cursor {\
color: #F8F8F0\
}\
.ace-monokai .ace_marker-layer .ace_selection {\
background: #49483E\
}\
.ace-monokai.ace_multiselect .ace_selection.ace_start {\
box-shadow: 0 0 3px 0px #272822;\
}\
.ace-monokai .ace_marker-layer .ace_step {\
background: rgb(102, 82, 0)\
}\
.ace-monokai .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid #49483E\
}\
.ace-monokai .ace_marker-layer .ace_active-line {\
background: #202020\
}\
.ace-monokai .ace_gutter-active-line {\
background-color: #272727\
}\
.ace-monokai .ace_marker-layer .ace_selected-word {\
border: 1px solid #49483E\
}\
.ace-monokai .ace_invisible {\
color: #52524d\
}\
.ace-monokai .ace_entity.ace_name.ace_tag,\
.ace-monokai .ace_keyword,\
.ace-monokai .ace_meta.ace_tag,\
.ace-monokai .ace_storage {\
color: #F92672\
}\
.ace-monokai .ace_punctuation,\
.ace-monokai .ace_punctuation.ace_tag {\
color: #fff\
}\
.ace-monokai .ace_constant.ace_character,\
.ace-monokai .ace_constant.ace_language,\
.ace-monokai .ace_constant.ace_numeric,\
.ace-monokai .ace_constant.ace_other {\
color: #AE81FF\
}\
.ace-monokai .ace_invalid {\
color: #F8F8F0;\
background-color: #F92672\
}\
.ace-monokai .ace_invalid.ace_deprecated {\
color: #F8F8F0;\
background-color: #AE81FF\
}\
.ace-monokai .ace_support.ace_constant,\
.ace-monokai .ace_support.ace_function {\
color: #66D9EF\
}\
.ace-monokai .ace_fold {\
background-color: #A6E22E;\
border-color: #F8F8F2\
}\
.ace-monokai .ace_storage.ace_type,\
.ace-monokai .ace_support.ace_class,\
.ace-monokai .ace_support.ace_type {\
font-style: italic;\
color: #66D9EF\
}\
.ace-monokai .ace_entity.ace_name.ace_function,\
.ace-monokai .ace_entity.ace_other,\
.ace-monokai .ace_entity.ace_other.ace_attribute-name,\
.ace-monokai .ace_variable {\
color: #A6E22E\
}\
.ace-monokai .ace_variable.ace_parameter {\
font-style: italic;\
color: #FD971F\
}\
.ace-monokai .ace_string {\
color: #E6DB74\
}\
.ace-monokai .ace_comment {\
color: #75715E\
}\
.ace-monokai .ace_indent-guide {\
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWPQ0FD0ZXBzd/wPAAjVAoxeSgNeAAAAAElFTkSuQmCC) right repeat-y\
}";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/samples/ace/theme-monokai.js | theme-monokai.js |
define("ace/mode/python_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var PythonHighlightRules = function() {
var keywords = (
"and|as|assert|break|class|continue|def|del|elif|else|except|exec|" +
"finally|for|from|global|if|import|in|is|lambda|not|or|pass|print|" +
"raise|return|try|while|with|yield"
);
var builtinConstants = (
"True|False|None|NotImplemented|Ellipsis|__debug__"
);
var builtinFunctions = (
"abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern"
);
var keywordMapper = this.createKeywordMapper({
"invalid.deprecated": "debugger",
"support.function": builtinFunctions,
"constant.language": builtinConstants,
"keyword": keywords
}, "identifier");
var strPre = "(?:r|u|ur|R|U|UR|Ur|uR)?";
var decimalInteger = "(?:(?:[1-9]\\d*)|(?:0))";
var octInteger = "(?:0[oO]?[0-7]+)";
var hexInteger = "(?:0[xX][\\dA-Fa-f]+)";
var binInteger = "(?:0[bB][01]+)";
var integer = "(?:" + decimalInteger + "|" + octInteger + "|" + hexInteger + "|" + binInteger + ")";
var exponent = "(?:[eE][+-]?\\d+)";
var fraction = "(?:\\.\\d+)";
var intPart = "(?:\\d+)";
var pointFloat = "(?:(?:" + intPart + "?" + fraction + ")|(?:" + intPart + "\\.))";
var exponentFloat = "(?:(?:" + pointFloat + "|" + intPart + ")" + exponent + ")";
var floatNumber = "(?:" + exponentFloat + "|" + pointFloat + ")";
var stringEscape = "\\\\(x[0-9A-Fa-f]{2}|[0-7]{3}|[\\\\abfnrtv'\"]|U[0-9A-Fa-f]{8}|u[0-9A-Fa-f]{4})";
this.$rules = {
"start" : [ {
token : "comment",
regex : "#.*$"
}, {
token : "string", // multi line """ string start
regex : strPre + '"{3}',
next : "qqstring3"
}, {
token : "string", // " string
regex : strPre + '"(?=.)',
next : "qqstring"
}, {
token : "string", // multi line ''' string start
regex : strPre + "'{3}",
next : "qstring3"
}, {
token : "string", // ' string
regex : strPre + "'(?=.)",
next : "qstring"
}, {
token : "constant.numeric", // imaginary
regex : "(?:" + floatNumber + "|\\d+)[jJ]\\b"
}, {
token : "constant.numeric", // float
regex : floatNumber
}, {
token : "constant.numeric", // long integer
regex : integer + "[lL]\\b"
}, {
token : "constant.numeric", // integer
regex : integer + "\\b"
}, {
token : keywordMapper,
regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
}, {
token : "keyword.operator",
regex : "\\+|\\-|\\*|\\*\\*|\\/|\\/\\/|%|<<|>>|&|\\||\\^|~|<|>|<=|=>|==|!=|<>|="
}, {
token : "paren.lparen",
regex : "[\\[\\(\\{]"
}, {
token : "paren.rparen",
regex : "[\\]\\)\\}]"
}, {
token : "text",
regex : "\\s+"
} ],
"qqstring3" : [ {
token : "constant.language.escape",
regex : stringEscape
}, {
token : "string", // multi line """ string end
regex : '"{3}',
next : "start"
}, {
defaultToken : "string"
} ],
"qstring3" : [ {
token : "constant.language.escape",
regex : stringEscape
}, {
token : "string", // multi line ''' string end
regex : "'{3}",
next : "start"
}, {
defaultToken : "string"
} ],
"qqstring" : [{
token : "constant.language.escape",
regex : stringEscape
}, {
token : "string",
regex : "\\\\$",
next : "qqstring"
}, {
token : "string",
regex : '"|$',
next : "start"
}, {
defaultToken: "string"
}],
"qstring" : [{
token : "constant.language.escape",
regex : stringEscape
}, {
token : "string",
regex : "\\\\$",
next : "qstring"
}, {
token : "string",
regex : "'|$",
next : "start"
}, {
defaultToken: "string"
}]
};
};
oop.inherits(PythonHighlightRules, TextHighlightRules);
exports.PythonHighlightRules = PythonHighlightRules;
});
define("ace/mode/folding/pythonic",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(markers) {
this.foldingStartMarker = new RegExp("([\\[{])(?:\\s*)$|(" + markers + ")(?:\\s*)(?:#.*)?$");
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.getFoldWidgetRange = function(session, foldStyle, row) {
var line = session.getLine(row);
var match = line.match(this.foldingStartMarker);
if (match) {
if (match[1])
return this.openingBracketBlock(session, match[1], row, match.index);
if (match[2])
return this.indentationBlock(session, row, match.index + match[2].length);
return this.indentationBlock(session, row);
}
}
}).call(FoldMode.prototype);
});
define("ace/mode/python",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/python_highlight_rules","ace/mode/folding/pythonic","ace/range"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var PythonHighlightRules = require("./python_highlight_rules").PythonHighlightRules;
var PythonFoldMode = require("./folding/pythonic").FoldMode;
var Range = require("../range").Range;
var Mode = function() {
this.HighlightRules = PythonHighlightRules;
this.foldingRules = new PythonFoldMode("\\:");
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "#";
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start") {
var match = line.match(/^.*[\{\(\[\:]\s*$/);
if (match) {
indent += tab;
}
}
return indent;
};
var outdents = {
"pass": 1,
"return": 1,
"raise": 1,
"break": 1,
"continue": 1
};
this.checkOutdent = function(state, line, input) {
if (input !== "\r\n" && input !== "\r" && input !== "\n")
return false;
var tokens = this.getTokenizer().getLineTokens(line.trim(), state).tokens;
if (!tokens)
return false;
do {
var last = tokens.pop();
} while (last && (last.type == "comment" || (last.type == "text" && last.value.match(/^\s+$/))));
if (!last)
return false;
return (last.type == "keyword" && outdents[last.value]);
};
this.autoOutdent = function(state, doc, row) {
row += 1;
var indent = this.$getIndent(doc.getLine(row));
var tab = doc.getTabString();
if (indent.slice(-tab.length) == tab)
doc.remove(new Range(row, indent.length-tab.length, row, indent.length));
};
this.$id = "ace/mode/python";
}).call(Mode.prototype);
exports.Mode = Mode;
}); | yyperf | /yyperf-0.6.9.tar.gz/yyperf-0.6.9/samples/ace/mode-python.js | mode-python.js |
# yypget
For my beautiful baby Teacher Yang.
```console
@#@@@#######&&#######@@@@@@@@@###@@@@&%!%@@$
@@&&########@############@@@@@@####@@&%!$@@%
@###@##############@####@&##@@@#@##@@$||$@@%
#######################@@@&&@@@@@@@@@@$|&@@$
&@@@#@@###@@##############@&$&&&&&&@&@@$&#@$
%$#####@@######@#########@@@&$$%|%$&@&&@@@@$
###@@################@@#@@&&$$$|!|&&@#@&@@#$
@@@@@@@############@#@&&&$&@@@&%;:!&#@@&@##$
&@$%@####&&####@@@@&&&&&&&&$|!:'''!@@@&&@@@$
@@@@########@||&&@&%%%||%$$@$%!'``!@@&$$&@@%
&@@##@######@!:;!%%||;'`:!||;`...`!@@&%%$&&%
@&&&&@@#####@!:'''::;'. .''``.. `%@&$%$!';!
#@&@#########$:::':!;'..```````. !#@&%$&&&&%
##@&@####$&###$;::!;:'. .':'```.;@@&%$@@@@&%
###############$;;!||;''`..```.:@#&$|$##@@@$
@###############@|!%$%|||;'`.`|##$%|%@#@#@@%
@@@@@@@@@#########@%|%||!;;:%##@$$%|$&@@@@@$
&&&&&&&&&@##########@|!;:;|&@#@%|$@@@&%||&#$
&&&&&@@@@@##########@$|!!!|%@@@$!|@#@!''`. :
&&&&&&&&@@########@$|!;!!!||$@###@$|!;!!:``:
$@&&&&&&&@########@%;;;;::':''````;!;|!;''`:
!@&&&&&$&@#######@%|!;::::'::''`':::;!;::'`:
'$&$$&&&@@######@%!;::::'''''''':'':!::;:'':
.|&$$$$$&@#####&%!':':::'''''';:`'':;;::::';
:&&$$$$&@####&%!:''':'''''''::'''';;;;;;::;
.|@&$%$&&##@&%!!!:::::'''';:''`:'':;;;;;;;;
:$&&$%&&@@$%%|:;|::;;;::``':``'':::!!!|!!!
`|&&$%%%&@$%|;:'``':'':``''``''`':;!!||||!
:$&&$%$&&@%!!:'``'::``'''`````':;;;!;:;;;
```
## Installation
### Prerequisites
The following dependencies are required and must be installed separately.
* **[Python 3](https://www.python.org/downloads/)**
### Install via pip
```console
$ pip3 install yypget
```
## Upgrading
```console
$ pip3 install --upgrade yypget
```
## Supported Sites
### Video
| Site | URL | Video |
| :------: | :------ | :------: |
| 好看视频 | <https://sv.baidu.com/> | * |
| 搜狐视频 | <https://tv.sohu.com/> | * |
### Document
| Site | URL | DOC | TXT | PPT | PDF |
| :------: | :------ | :------: | :------: | :------: | :------: |
| 百度文库 | <https://wenku.baidu.com/> | * | * | | |
| yypget | /yypget-0.0.4.tar.gz/yypget-0.0.4/README.md | README.md |
# YYSUT
### plist
#### chain call
I implemented chain call of list in class `plist`.
currently, I implemented `map`, `filter`, `reduce`, `any`, `all` method.
You can use `log()` method to print the intermediate result.
```python
from yysut import plist
ans=plist(range(10)).log(lambda x:print(f"origin : {x}"))\
.filter(lambda x: x % 2 == 0).log(lambda x:print(f"filter ans : {x}"))\
.map(lambda x: x * 2).log(lambda x:print(f"map ans : {x}"))\
.reduce(lambda x, y: x + y)
print(ans)
"""
origin : 0 ,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9
filter ans : 0 ,2 ,4 ,6 ,8
map ans : 0 ,4 ,8 ,12 ,16
40
"""
```
#### indexs
```python
from yysut import plist
ans=plist(range(100))
# get item
print(ans[2,5,7]) # [2, 5, 7]
print(ans[2:5]) # [2, 3, 4]
# set item
ans[2,5,7]=0
print(ans[:10]) # [0, 1, 0, 3, 4, 0, 6, 0, 8, 9]
```
#### groupby
```python
from yysut import plist
# 1. return dict type
ans=plist(range(10)).groupby(lambda x:x%2)
print(ans) # {0: [0, 2, 4, 6, 8], 1: [1, 3, 5, 7, 9]}
# 2. return list type
ans=plist(range(10)).groupby(lambda x:x%2,return_type="list")
print(ans) # [[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]
```
#### sort
`sort` method is same as python3 `sorted` method, it returns new list.
```python
from yysut import plist
ans=plist(range(10)).sort(lambda x:x%2)
print(ans) # [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]
```
#### parllal
The method is similar to `map` method, but it can use multi process.
! Attention: `parllal` method could not use lambda function.
```python
from yysut import plist
import time
def compute_func(x):
time.sleep(1)
return x**2
# parallel
t=time.time()
ans=plist(range(5)).parallel(
# n is the number of process, -1 means all process
compute_func,n=-1
).log(lambda x:print(x,"time:",time.time()-t))
# [0, 1, 4, 9, 16] time: 2.0206313133239746
# normal
t=time.time()
ans=plist(range(5)).map(
compute_func
).log(lambda x:print(x,"time:",time.time()-t))
# [0, 1, 4, 9, 16] time: 5.004805564880371
``` | yysut | /yysut-0.0.2.tar.gz/yysut-0.0.2/README.md | README.md |
# yyutils: provide some python tools which often used
<br>
All tools are be provided as decorators.
## Installation
Not provide using `pip` install yet.
## Usage
### Counter
A decorator to count the number of times the function is called.
```
from yyutils import Counter
@Counter
def foo(*args,**kwargs):
pass
```
### Timer
A decorator to calculate function execution time.
```
from yyutils import Timer
@Timer
def foo(*args,**kwargs):
pass
```
### Retry_timer
A decorator to help if function execution fail, how many times will retry and what is the retry interval.
```
from yyutils import Retry_timer
@Retry_timer()
def foo(*arg,**kwargs):
pass
```
with parameters:
```
@Retry_timer(interval=1, retry_times=10)
def foo(*arg,**kwargs):
pass
```
### Schedule
A decorator to schedule the function execution time.
```
from yyutils import Schedule
@Schedule()
def foo(*arg,**kwargs):
pass
```
with parameters:
```
@Schedule(interval=10)
def foo(*arg,**kwargs):
pass
```
### Error_Log
A decorator for logging Exception but not stop the program.
```
from yyutils import Error_Log
@Error_Log
def foo(*args,**kwargs):
pass
```
### TypePrints
A decorator for print func.__doc__ like type prints.
```
from yyutils import TypePrints
@TypePrints
def foo(*args,**kwargs):
pass
```
| yyutils | /yyutils-0.0.1.tar.gz/yyutils-0.0.1/README.md | README.md |
import datetime
import json
from pymysql.cursors import Cursor, DictCursor
from yyxx_game_pkg.conf import settings
from yyxx_game_pkg.dbops.mysql_op import MysqlOperation
from yyxx_game_pkg.helpers.mysql_helper import get_dbpool
from yyxx_game_pkg.helpers.redis_helper import get_redis
class OPHelper:
# --------------- mysql start ---------------
@classmethod
def connection(cls, mysql_alias="default", dict_cursor=True):
db_settings = {}
for k, v in settings.DATABASES[mysql_alias].items():
if k == "PORT" and isinstance(v, str) and v.isdigit(): # PORT 必须为数字
v = int(v)
db_settings[k.lower()] = v
if k == "NAME":
db_settings["db"] = db_settings.pop("name")
db_settings["cursor"] = DictCursor if dict_cursor else Cursor
return get_dbpool(db_settings).get_connection()
@classmethod
def mp(cls):
return MysqlOperation()
@classmethod
def sql_func_get_one(cls):
return cls.mp().get_one
@classmethod
def sql_func_get_all(cls):
return cls.mp().get_all
# --------------- mysql end ---------------
# --------------- redis start ---------------
@classmethod
def redis(cls, redis_alias="default"):
return get_redis(settings.REDIS_SERVER[redis_alias])
# --------------- redis end ---------------
# --------------- redis cache start ---------------
@classmethod
def cache(
cls,
sql="",
sql_func=None,
redis_key="",
ex=None,
redis_alias="default",
mysql_alias="default",
):
"""
:param sql: sql语句
:param sql_func: sql方法 execute get_one get_all insert
:param redis_key: 缓存key
:param ex: 缓存过期时间,None表示不设置过期时间
:param redis_alias: 从redis_config中获取对应redis配置
:param mysql_alias: 从mysql_config中获取对应mysql配置
"""
_redis = cls.redis(redis_alias)
data = _redis.get_data(redis_key)
if not data:
data = sql_func(sql, cls.connection(mysql_alias))
if data:
_redis.set_data(redis_key, json.dumps(str(data)), ex)
if isinstance(data, bytes):
data = eval(json.loads(data))
return data
@classmethod
def cache_sql_one(
cls,
sql,
redis_key,
ex=None,
redis_alias="default",
mysql_alias="default",
):
sql_func = cls.mp().get_one
return cls.cache(sql, sql_func, redis_key, ex, redis_alias, mysql_alias)
@classmethod
def cache_sql_all(
cls,
sql,
redis_key,
ex=None,
redis_alias="default",
mysql_alias="default",
):
sql_func = cls.mp().get_all
return cls.cache(sql, sql_func, redis_key, ex, redis_alias, mysql_alias)
# --------------- redis cache end ---------------
redis = OPHelper.redis()
mp = OPHelper.mp() | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/helpers/op_helper.py | op_helper.py |
import redis
from yyxx_game_pkg.logger.log import root_log
from yyxx_game_pkg.utils.decorator import singleton_unique_obj_args
class RedisConfig:
"""
redis config
"""
HOST = None
PORT = None
DB = None
PASSWORD = None
OVERDUE_SECOND = 86400
def __str__(self):
return "host:{}, port:{}, db:{}, OVERDUE_SECOND:{}".format(
self.HOST, self.PORT, self.DB, self.OVERDUE_SECOND
)
@singleton_unique_obj_args
class RedisHelper:
def __init__(self, config: RedisConfig):
connection_pool = redis.ConnectionPool(
host=config.HOST, port=config.PORT, db=config.DB, password=config.PASSWORD
)
self.__r = redis.Redis(connection_pool=connection_pool)
root_log(f"<RedisHelper> init, info:{config}")
@property
def redis_cli(self):
return self.__r
def get_data(self, key):
return self.__r.get(key)
def set_data(self, key, value, ex=None, _px=None):
return self.__r.set(key, value, ex, _px)
def list_keys(self, pattern="*"):
return self.__r.keys(pattern)
def delete(self, key):
return self.__r.delete(key)
def hset(self, name, key, value):
return self.__r.hset(name, key, value)
def hget(self, name, key):
return self.__r.hget(name, key)
def hdel(self, name, *keys):
return self.__r.hdel(name, *keys)
def hgetall(self, name):
return self.__r.hgetall(name)
def hlen(self, name):
return self.__r.hlen(name)
def incr(self, name, amount=1):
return self.__r.incr(name, amount)
def expire(self, key, ex):
"""
设置key的过期时间
:param key:
:param ex:
:return:
"""
return self.__r.expire(key, ex)
def lpush(self, key, *val):
"""
在key对应的list中添加元素,每个新的元素都添加到列表的最左边
:param key:
:param val:
:return:
"""
return self.__r.lpush(key, *val)
def rpush(self, key, *val):
"""
同lpush,但每个新的元素都添加到列表的最右边
:param key:
:param val:
:return:
"""
return self.__r.rpush(key, *val)
def lrange(self, key, start=0, end=-1):
"""
分片获取元素
:param key:
:param start:
:param end:
:return:
"""
return self.__r.lrange(key, start, end)
def get_redis(config: dict) -> RedisHelper:
"""
缓存redis
:return:
"""
class Config(RedisConfig):
"""
redis config
"""
HOST = config["host"]
PORT = config["port"]
DB = config["db"]
PASSWORD = config["password"]
OVERDUE_SECOND = config.get("overdue_second", 86400)
return RedisHelper(Config()) | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/helpers/redis_helper.py | redis_helper.py |
import pymysql
from dbutils.pooled_db import PooledDB
from pymysql.cursors import Cursor
from yyxx_game_pkg.logger.log import root_log
from yyxx_game_pkg.utils.decorator import (
except_monitor,
log_execute_time_monitor,
singleton_unique_obj_args,
)
# ####################################################
class MysqlConfig:
HOST = None
PORT = None
USER = None
PASSWD = None
DB = None
USE_UNICODE = None
CHARSET = None
MAX_CACHED = None
MAX_CONNECTIONS = None
CURSOR = None
def __str__(self):
# 不能返回无法序列化的数据, 否则单例会失效
return "host:{},port:{},db:{},use_unicode:{},charset:{},max_cache:{},max_connections:{}".format(
self.HOST,
self.PORT,
self.DB,
self.USE_UNICODE,
self.CHARSET,
self.MAX_CACHED,
self.MAX_CONNECTIONS
)
@singleton_unique_obj_args
class MysqlDbPool(object):
def __init__(self, config: MysqlConfig):
self.DB_POOL = PooledDB(
creator=pymysql,
maxcached=config.MAX_CACHED,
maxconnections=config.MAX_CONNECTIONS,
host=config.HOST,
port=config.PORT,
user=config.USER,
passwd=config.PASSWD,
db=config.DB,
use_unicode=config.USE_UNICODE,
charset=config.CHARSET,
cursorclass=config.CURSOR,
)
root_log(f"<MysqlDbPool> init, info:{config}")
@except_monitor
@log_execute_time_monitor()
def get_connection(self):
return self.DB_POOL.connection()
def close_connection(self):
"""
关闭线程池,线程池最少占用1连接,100个进程跑1000个相同IP库的服时,最多会生成10W连接,所以需要关闭线程池,释放全部连接。
优化点:以后可以相同IP的服务器共用1个线程池(现阶段sql查game库没有指定库名,改动地方多,搁置)
:return:
"""
self.DB_POOL.close()
# #################### 模块对外接口 ####################
def get_dbpool(config: dict) -> MysqlDbPool:
class Config(MysqlConfig):
HOST = config["host"]
PORT = config["port"]
USER = config["user"]
PASSWD = config["password"]
DB = config["db"]
USE_UNICODE = config.get("use_unicode", True)
CHARSET = config.get("charset", "utf8")
MAX_CACHED = config.get("maxcached", 0)
MAX_CONNECTIONS = config.get("maxconnections", 0)
CURSOR = config.get("cursor", Cursor)
return MysqlDbPool(Config()) | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/helpers/mysql_helper.py | mysql_helper.py |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from yyxx_game_pkg.center_api.sdk.map_core import MapCore
from yyxx_game_pkg.conf import settings
@dataclass
class Params:
"""
@param extra: 拓参
@param cp_order_id: 厂商订单ID, 由厂商生成
@param channel_order_id: 渠道方订单ID
@param player_id: 角色ID
@param is_check_username: 是否验证帐号与玩家ID
@param channel_username: 渠道帐号
@param is_test: 是否测试订单
"""
extra: str = "extra"
cp_order_id: str = "billno"
channel_order_id: str = "order_id"
player_id: str = "role_id"
channel_username: str = "openid"
money: str = "amount"
is_check_username: int = 1
is_test: int = 0
class BaseRecharge(MapCore, ABC):
"""
注意:
方法 modify_params 用来修改 params 的参数值
需要实现 get_params_handler feedback 方法
get_params_handler 是对 get_params 参数的补充
feedback
"""
params = Params()
def modify_params(self):
"""
修改 self.params 属性
"""
pass
def get_params(self, data) -> dict:
self.modify_params()
extra = data.get(self.params.extra, "")
if not extra:
return {}
ext_ary = extra.split(",")
data_ary = {"extra": extra}
self.get_params_core(data, data_ary, ext_ary)
self.get_params_helper(data, data_ary)
return data_ary
def get_params_core(self, data, data_ary, ext_ary):
data_ary["cp_order_id"] = data.get(self.params.cp_order_id, "")
data_ary["channel_order_id"] = data.get(self.params.channel_order_id, "")
data_ary["player_id"] = data.get(self.params.player_id)
data_ary["is_check_username"] = self.params.is_check_username
data_ary["channel_username"] = data.get(self.params.channel_username, "")
if len(ext_ary) > 6:
data_ary["recharge_id"] = int(ext_ary[5])
def get_params_helper(self, data, data_ary) -> None:
"""
补充数据, 添加额外参数
对 get_params 中 data_ary 数据的补充
无法在 get_params_core 中通过通用方式获得的参数,在此处进行处理
--------------------------------
money 金额
real_money 实付金额
extra_gold 赠送元宝(渠道返利)
extra_gold_bind 赠送绑元(渠道返利)
pay_dt 充值时间(秒)
--------------------------------
"""
amount = int(data.get(self.params.money, 0))
data_ary["real_money"] = int(amount / 100)
data_ary["money"] = amount / 100
def make_sign_helper(self, values) -> (dict, str):
ext_ary = values[self.params.extra].split(",")
plat_code = ext_ary[0]
game_channel_id = ext_ary[1]
sdk_data = self.operator.get_key(plat_code, game_channel_id)
pay_key = sdk_data.get("pay_key", "")
return values, pay_key
def make_sign(self, values) -> str:
values, pay_key = self.make_sign_helper(values)
return self.channel_make_sign(values, pay_key)
@abstractmethod
def feedback(self, error_code, data: dict = None, msg="", *args, **kwargs):
"""
根据需求 return 相应的数据
"""
return error_code | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/sdk/recharge.py | recharge.py |
import time
from abc import ABC, abstractmethod
from typing import Callable, NewType
from urllib.parse import unquote
from yyxx_game_pkg.center_api.sdk.map_core import MapCore
from yyxx_game_pkg.utils.error_code import ErrorCode
from yyxx_game_pkg.utils.xhttp import http_request
from yyxx_game_pkg.utils.xstring import parse_json
SDK_HELPER = NewType("SDK_HELPER", Callable[[...], None])
RESPONSE_HELPER = NewType("RESPONSE_HELPER", Callable[[...], None])
class BaseCheckToken(MapCore, ABC):
"""
注意:需要实现 response_helper 方法
@func response_helper: 处理返回数据
@func sdk_check_token: 验证token方法
@func sdk_helper: sdk 参数处理
@func channel_make_sign: 默认 sorted(params) md5
根据渠道需求填写以下参数
@param is_https: 请求是否为https;默认 True
@param method: 请求方式 POST GET;默认 POST
@param params: (key)发送和(value)接收 参数的字段名
"""
is_https = True # True False
method = "POST"
# params = {}
sdk_exclude = ()
def run_check_token(self, *args, **kwargs) -> dict:
"""
run check token
"""
sdk_helper, response_helper = self.sdk_version_choice(**kwargs)
if sdk_helper is None:
return self.sdk_rechfeed(ErrorCode.ERROR_INVALID_PARAM)
channel_data, post_data = sdk_helper(self.sdk_exclude, **kwargs)
response = self.sdk_check_token(channel_data, post_data)
return response_helper(response, **kwargs)
@abstractmethod
def response_helper(self, response: dict | None, **kwargs) -> dict:
"""
根据需求 return 相应的数据
:return: {"ret": 1, "user_id": "any_user_id"}
"""
return self.sdk_rechfeed(ErrorCode.ERROR_INVALID_PARAM, "验证失败")
@property
def _params(self):
"""
params = {
"appId": "sdk_appId",
"accountId": "sdk_accountId",
"token": "sdk_token",
}
"""
if self.params is None:
raise ValueError("params must be specified as a dict")
return self.params
def sdk_helper(self, sdk_exclude=(), **kwargs) -> (dict, dict):
"""
处理 sdk 数据
:param sdk_exclude: sdk_helper 处理数据,要排除的key
可选值: time(self.Time) sign(self.Flag)
"""
channel_data = kwargs.get("channel_data", {})
post_data = {}
for k, v in self._params.items():
post_data[k] = kwargs.get(v)
if self.Time not in sdk_exclude:
post_data[self.Time] = int(time.time())
if self.Flag not in sdk_exclude:
post_data[self.Flag] = self.channel_make_sign(
post_data, channel_data.get("app_key", "")
)
return channel_data, post_data
def sdk_check_token(self, channel_data, post_data) -> dict | None:
"""
处理方法不适用时,重写此方法
默认使用发送请求的方式获取token验证结果
"""
url = channel_data.get("api_url", "")
if not url:
return None
result = http_request(
url=url,
data=post_data,
is_https=self.is_https,
method=self.method,
)
return parse_json(unquote(result))
@property
def sdk_version_map(self) -> dict:
"""
sdk version map
如果存在多个version版本,需要添加对应的版本映射
"""
return {
"1.0.0": {
"sdk_helper": self.sdk_helper,
"response_helper": self.response_helper,
},
}
def sdk_version_choice(self, **kwargs) -> (SDK_HELPER, RESPONSE_HELPER):
"""
匹配对应 sdk version 相关方法 sdk_handler response_helper
"""
sdk_version = kwargs.get("sdk_version", "1.0.0")
version_map = self.sdk_version_map.get(sdk_version, None)
if version_map is None:
return None, None
sdk_helper = version_map["sdk_helper"]
response_helper = version_map["response_helper"]
return sdk_helper, response_helper | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/sdk/check_token.py | check_token.py |
import json
import time
from abc import abstractmethod
from flask import request
from yyxx_game_pkg.center_api.model.Operator import Operator
from yyxx_game_pkg.center_api.model.OperatorServer import OperatorServer
from yyxx_game_pkg.conf import settings
from yyxx_game_pkg.crypto.basic import RANDOM_STRING_CHARS_LOWER, get_random_string, md5
from yyxx_game_pkg.crypto.make_sign import make_sign
from yyxx_game_pkg.helpers.op_helper import OPHelper
class MapCore(OPHelper):
Flag = "sign"
Time = "time"
Gmip = None
Imei = None
Callback = None
OutTime = 0
make_sign_exclude = {"gmip", "cp_platform", "ch_conter", "opts"}
API_KEY = settings.API_KEY
params = None
_plat_code = None
_operator = None
_game_channel_id = None
# 大额充值限制
max_money_limit = 5000
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def init_ip_imei(self, values):
self.Gmip = values.get("gmip", "")
self.Imei = values.get("imei", "")
def get_params(self, data):
return data
def get_params_helper(self, data, data_ary) -> None:
pass
def check_sign(self, values):
sign = values.get(self.Flag, None)
if sign is None:
return False
_sign = self.make_sign(values)
if sign != _sign:
return False
return True
def make_sign(self, values) -> str:
return make_sign(
values, self.api_key, exclude=self.make_sign_exclude, time_key=self.Time
)
def channel_make_sign(self, values, sign_key) -> str:
return make_sign(
values, sign_key, exclude=self.make_sign_exclude, time_key=None
)
def check_time_out(self, values):
_time = int(values.get(self.Time, 0))
t = time.time()
if self.OutTime != 0 and int(t) - _time > self.OutTime:
return False
return True
def check_public(self, values) -> bool:
return True
def sdk_rechfeed(self, error_code, msg="") -> dict:
if not msg:
msg = str(error_code.get("msg", ""))
code = int(error_code.get("code", 0))
return {"ret": code, "msg": msg}
def feedback(
self, error_code, msg_data: dict | list = None, msg="", *args, **kwargs
):
if type(error_code) == dict:
if not msg:
msg = str(error_code.get("msg", ""))
code = int(error_code.get("code", 0))
else:
code = error_code
result = {
f"{get_random_string(5, RANDOM_STRING_CHARS_LOWER)}_myzd_a": str(
int(time.time())
),
f"{get_random_string(5, RANDOM_STRING_CHARS_LOWER)}_myzd_b": str(
int(time.time())
),
"server_time": int(time.time()),
}
if msg_data or msg_data == 0:
receive_data = request.values
receive_path = request.path
receive_oid = receive_data.get("oid", "")
receive_gcid = receive_data.get("gcid", "")
receive_action = ""
if not receive_gcid:
receive_gcid = receive_data.get("game_channel_id", "")
receive_path_list = receive_path.split("/")
if receive_oid and receive_gcid:
if len(receive_path_list) > 2:
receive_action = receive_path_list[2]
else:
receive_action = receive_path_list[1]
oid_data = OperatorServer.get_oid_data(receive_oid, receive_gcid)
if oid_data.get("is_close_check", None):
result["close_check"] = "yesyes"
data_str = json.dumps(msg_data)
data_str = "\\/".join(data_str.split("/"))
data_sign = md5(f"{data_str}{receive_action}{self.API_KEY}")
result["code"] = code
result["msg"] = msg
result["data"] = msg_data
result["data_sign"] = data_sign
result = "\\\n".join(json.dumps(result, ensure_ascii=False).split("\n"))
else:
result = json.dumps({"code": code, "msg": msg}, ensure_ascii=False)
if self.Callback:
result = "{}({})".format(self.Callback, result)
return result
def is_open_ip(self, gmip=""):
pass
@property
def operator(self):
return Operator
@property
def api_key(self):
print(self.API_KEY)
if self.API_KEY is None:
raise ValueError("API_KEY must be specified")
return self.API_KEY
class MapCoreMinix:
def get_params(self, data):
data_ary = {
"cp_platform": data.get("cp_platform", ""),
"page_size": 10000,
"page": 1,
}
self.get_params_helper(data, data_ary)
return data_ary
def make_sign(self, values):
sdk_data = self.operator.get_key(self._plat_code, self._game_channel_id)
pay_key = sdk_data.get("pay_key", "")
return self.channel_make_sign(values, pay_key)
@abstractmethod
def get_params_helper(self, data, data_ary) -> None:
"""
补充数据
for k, v in self.params.items():
if v:
data_ary[k] = data.get(v, "")
"""
@abstractmethod
def feedback_helper(self, data_list, error_code, ex=None):
"""
if data_list:
code = 1
message = "success"
else:
code = 2
message = error_code.get("msg", "")
return {"code": code, "message": message, "data": data_list}
""" | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/sdk/map_core.py | map_core.py |
import json
import time
from yyxx_game_pkg.helpers.op_helper import OPHelper
from yyxx_game_pkg.utils.xstring import parse_json
class TableFieldConf(OPHelper):
@classmethod
def get_field_config_by_table(cls, table_name):
result = {}
cache_key = f"sys_table_field_config_{table_name}"
sql = """
SELECT
*
FROM
sys_table_field_config
WHERE
table_name='{}'
""".format(
table_name
)
data = cls.cache(sql, cls.sql_func_get_one(), cache_key)
if data:
for value in data:
result[value["field_name"]] = value
return result
@classmethod
def filter_table_config(cls, table_name, field_name, filter_data):
"""
过滤 filter_data 的值,如果有表字段配置,必须 在表字段配置中
:param table_name:
:param field_name:
:param filter_data:
:return:
"""
if not table_name:
return filter_data
cache_data = cls.get_field_config_by_table(table_name)
if not cache_data:
return filter_data
if isinstance(cache_data, dict):
field_data = cache_data.get(field_name, None)
if field_data is None:
return filter_data
field_config = field_data.get("field_config", "{}")
res = parse_json(field_config)
if not res:
return {}
result = {}
df_time = int(time.time())
df_json = json.dumps({})
for key, val in res.items():
fdv = filter_data.get(key, None)
if fdv is None:
val_d = val.get("default", "")
val_t = val.get("type", "")
if val_t == "int":
val_d = int(val_d)
elif val_t == "json" or val_t == "jsons":
val_d = df_json
elif val_t == "time":
val_d = df_time
elif val_t == "times":
val_d = [df_time, df_time]
elif val_t == "switch":
val_d = 0
else:
val_d = 0
fdv = val_d
result[key] = fdv
return result
else:
return filter_data | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/model/TableFieldConf.py | TableFieldConf.py |
import json
from redis import AuthenticationError
from yyxx_game_pkg.helpers.op_helper import OPHelper, mp, redis
from yyxx_game_pkg.utils.xstring import parse_json
class Operator(OPHelper):
"""
注意:需要先设置 connection 和 redis_handle
"""
@classmethod
def get_key(cls, operator, game_channel_id):
try:
cache_key = "api_operator_channel_%s_%s_key" % (
operator,
game_channel_id,
)
package = {}
subpackage = {}
sdk_data = redis.get_data(cache_key)
if not sdk_data:
sdk_data = {}
sql = """
SELECT
t1.alias as operator, t2.game_channel_id, t2.group_id, t2.iw_id, t2.sdk_config, t3.alias as iw_alias
FROM
svr_operator t1, svr_channel t2 left join svr_inter_working_group t3 on t2.iw_id = t3.id
WHERE
((t1.alias = '%s' AND t2.game_channel_id = '%s') OR (t1.alias = '%s' AND t2.game_channel_id='0'))
AND t1.oid = t2.oid
ORDER BY t2.id
DESC
""" % (
operator,
game_channel_id,
operator,
)
data = mp.get_all(sql, cls.connection())
if data:
for item in data:
if (
item["game_channel_id"] == "0"
or item["game_channel_id"] == 0
):
# 母包配置
package = item
else:
# 分包配置
subpackage = item
if subpackage.get("sdk_config", "") or package.get(
"sdk_config", ""
):
sdk_data["operator"] = (
subpackage["operator"]
if subpackage.get("operator", "")
else package.get("operator", "")
)
sdk_data["game_channel_id"] = (
subpackage["game_channel_id"]
if subpackage.get("game_channel_id", "")
else package.get("game_channel_id", "")
)
sdk_data["group_id"] = (
subpackage["group_id"]
if subpackage.get("group_id", "")
else package.get("group_id", "")
)
sdk_data["iw_id"] = (
subpackage["iw_id"]
if subpackage.get("iw_id", "")
else package.get("iw_id", "")
)
sdk_data["iw_alias"] = (
subpackage["iw_alias"]
if subpackage.get("iw_alias", "")
else package.get("iw_alias", "")
)
try:
if subpackage.get("sdk_config", ""):
sdk_subpackage = json.loads(
subpackage.get("sdk_config", "{}")
)
sdk_package = json.loads(
package.get("sdk_config", "{}")
)
for index, ist in sdk_subpackage.items():
if sdk_subpackage.get(index, ""):
sdk_package[index] = sdk_subpackage.get(
index, ""
)
subpackage["sdk_config"] = json.dumps(sdk_package)
except (TypeError, json.decoder.JSONDecodeError):
subpackage["sdk_config"] = {}
sdk_config = (
subpackage["sdk_config"]
if subpackage.get("sdk_config", "")
else package.get("sdk_config", "")
)
sdk_config = parse_json(sdk_config) if sdk_config else {}
sdk_data.update(sdk_config)
redis.set_data(cache_key, json.dumps(sdk_data))
else:
sdk_data = {}
else:
sdk_data = {}
else:
sdk_data = parse_json(sdk_data)
return sdk_data
except AuthenticationError:
print("redis error")
return {}
except Exception as e:
print(e, type(e))
return {} | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/model/Operator.py | Operator.py |
import json
from yyxx_game_pkg.helpers.op_helper import OPHelper
from yyxx_game_pkg.utils.xstring import parse_json
class RechargeConfig(OPHelper):
@classmethod
def get_mapping_config(cls, oid="", gcid=""):
try:
sql = """
SELECT
t1.id,
IFNULL(t4.json, '{}') json
FROM
svr_channel t1
LEFT JOIN svr_channel_group t2 ON t1.group_id = t2.id
LEFT JOIN svr_operator t3 ON t1.oid = t3.oid
LEFT JOIN api_recharge_mapping t4 ON t1.id = t4.channel_auto_id
WHERE
t3.alias ='%s'
AND t1.game_channel_id = '%s'
ORDER BY
t1.id DESC
""" % (
oid,
gcid,
)
result = cls.mp().get_one(sql, cls.connection())
if result and result.get("json", ""):
return parse_json(result["json"])
return {}
except:
return False
@classmethod
def get_recharge_config(cls):
try:
sql = "SELECT * FROM api_recharge_config"
res = cls.mp().get_all(sql, cls.connection())
result = {}
if res:
for v in res:
vid = v["id"]
result[str(vid)] = v
return result
except:
return {}
@classmethod
def get_check_recharge_config(cls, param_server_id):
try:
sql = (
f"SELECT * FROM api_check_recharge_config where sid = {param_server_id}"
)
res = cls.mp().get_all(sql, cls.connection())
result = {}
if res:
for v in res:
vid = v["recharge_id"]
result[str(vid)] = v
return result
except:
return False
@classmethod
def recharge_config(cls):
redis_key = "api_recharge_platform"
recharge_config = cls.redis().get_data(redis_key)
if not recharge_config:
recharge_config = cls.get_recharge_config()
if recharge_config:
cls.redis().set_data(redis_key, json.dumps(recharge_config))
if not isinstance(recharge_config, dict):
recharge_config = json.loads(recharge_config)
return recharge_config | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/center_api/model/RechargeConfig.py | RechargeConfig.py |
class LogConfig:
"""
log config class
不同项目配置调整继承该类
"""
DEBUG_LOGGER_NAME = "py_debug"
LOCAL_LOGGER_NAME = "py_local"
LOCAL_LOG_FILE = "/tmp/local.log"
DEBUG_LOG_FILE = "/tmp/debug.log"
@classmethod
def dict_config(cls):
"""
LOG_CONFIG DICT
"""
log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"def_fmt": {
"datefmt": "%Y-%m-%d %H:%M:%S",
"class": "yyxx_game_pkg.logger.formatters.TraceFormatter",
"format": (
"[%(asctime)s,%(msecs)d: %(levelname)s/%(process)d][%(filename)s:%(funcName)s:%(lineno)d]"
"[%(trace_id)s] %(message)s"
),
},
},
"handlers": {
"rotate_file_handler": {
"level": "INFO",
"formatter": "def_fmt",
"class": "yyxx_game_pkg.logger.handlers.MultiProcessTimedRotatingFileHandler",
"filename": cls.LOCAL_LOG_FILE,
"when": "MIDNIGHT",
"backupCount": 7,
},
"debug_file_handler": {
"level": "DEBUG",
"formatter": "def_fmt",
"class": "logging.FileHandler",
"filename": cls.DEBUG_LOG_FILE,
},
"console_handler": {
"level": "INFO",
"formatter": "def_fmt",
"class": "logging.StreamHandler",
},
},
"loggers": {
"": { # root logger
"handlers": ["rotate_file_handler", "console_handler"],
"level": "WARNING",
"propagate": False,
},
cls.LOCAL_LOGGER_NAME: {
"handlers": ["rotate_file_handler", "console_handler"],
"level": "INFO",
"propagate": False,
},
cls.DEBUG_LOGGER_NAME: {
"handlers": ["debug_file_handler", "console_handler"],
"level": "DEBUG",
"propagate": False,
},
},
}
return log_config | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/logger/config.py | config.py |
import logging.config
import traceback
# from typing import Literal, Type, TypeVar
from pathlib import Path
from .config import LogConfig
# log日志级别
# LogLevelTyping = Literal["critical", "error", "warning", "info", "debug"]
# LogConfig类及其子类
# LogConfigTyping = TypeVar("LogConfigTyping", bound=LogConfig)
def root_log(msg, level="warning", stacklevel=2, addstacklevel=0):
"""
root logger
:param msg: 消息文本
:param level: 消息级别
:param stacklevel: 堆栈信息向上查找层数(默认2层,即为调用此函数的堆栈)
:param addstacklevel: 以调用此函数的堆栈(stacklevel的值)作为基础,继续向上查找的层数,即stacklevel+addstacklevel层
使用此参数无需关心下层函数的层级,只需要关心调用函数上层的层级即可
"""
getattr(logging.getLogger(), level.lower())(msg)
class Log:
"""
singleton Log
"""
_instance = None
_init = False
config = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, log_config=LogConfig):
if self._init:
return
self._init = True
# 日志配置初始化
self.init_config(log_config)
@classmethod
def init_config(cls, log_config=LogConfig):
"""应用新配置"""
self = cls()
if log_config == self.config:
return
try:
self.config = log_config
self.make_path()
logging.config.dictConfig(log_config.dict_config())
root_log("logger init")
except ValueError as _e:
traceback.print_exc()
def make_path(self):
"""
检查日志输出文件路径, 不存在则创建
"""
handlers_config = self.config.dict_config().get("handlers", {})
if not handlers_config:
return
file_paths = []
for _, configs in handlers_config.items():
for cfg_key, val in configs.items():
if cfg_key != "filename":
continue
file_paths.append(val)
try:
for path in file_paths:
path_obj = Path(path)
path_obj.parent.mkdir(parents=True, exist_ok=True)
path_obj.touch(exist_ok=True)
except OSError as _e:
traceback.print_exc()
def root_logger(self) -> logging.Logger:
"""
local_logger
:return:
"""
return logging.getLogger()
def local_logger(self) -> logging.Logger:
"""
local_logger
:return:
"""
return logging.getLogger(self.config.LOCAL_LOGGER_NAME)
def debug_logger(self) -> logging.Logger:
"""
debug_logger
:return:
"""
return logging.getLogger(self.config.DEBUG_LOGGER_NAME)
def local_log(self, msg: str, level="info", stacklevel=2, addstacklevel=0, **kwargs):
"""
正常滚动日志 输出路径见 config.LOG_FILE
:param msg: 消息文本
:param level: 消息级别
:param stacklevel: 堆栈信息向上查找层数(默认2层,即为调用此函数的堆栈)
:param addstacklevel: 以调用此函数的堆栈(stacklevel的值)作为基础,继续向上查找的层数,即stacklevel+addstacklevel层
使用此参数无需关心下层函数的层级,只需要关心调用函数上层的层级即可
:param kwargs: 额外参数
:return:
"""
if kwargs:
self.root_logger().warning(f"[yyxx-Log] Unexpected parameters => {kwargs}")
getattr(self.local_logger(), level.lower())(msg, stacklevel=stacklevel + addstacklevel)
def debug_log(self, msg: str, level="info", stacklevel=2, addstacklevel=0, **kwargs):
"""
测试日志 不滚动 输出路径见 config.LOG_FILE
:param msg: 消息文本
:param level: 消息级别
:param stacklevel: 堆栈信息向上查找层数(默认2层,即为调用此函数的堆栈)
:param addstacklevel: 以调用此函数的堆栈(stacklevel的值)作为基础,继续向上查找的层数,即stacklevel+addstacklevel层
使用此参数无需关心下层函数的层级,只需要关心调用函数上层的层级即可
:param kwargs: 额外参数
:return:
"""
if kwargs:
self.root_logger().warning(f"[yyxx-Log] Unexpected parameters => {kwargs}")
getattr(self.debug_logger(), level.lower())(msg, stacklevel=stacklevel + addstacklevel)
logger = Log()
local_logger = logger.local_logger()
local_log = logger.local_log
debug_logger = logger.debug_logger()
debug_log = logger.debug_log | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/logger/log.py | log.py |
import os
import time
import fcntl
import traceback
import logging.handlers
class MultiProcessTimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
"""
自定义多进程下TimedRotatingFileHandler
"""
def rollover_at(self):
"""
计算下次滚动时间
"""
current_time = int(time.time())
dst_now = time.localtime(current_time)[-1]
new_rollover_at = self.computeRollover(current_time)
while new_rollover_at <= current_time:
new_rollover_at = new_rollover_at + self.interval
# If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == "MIDNIGHT" or self.when.startswith("W")) and not self.utc:
dst_at_rollover = time.localtime(new_rollover_at)[-1]
if dst_now != dst_at_rollover:
if (
not dst_now
): # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
dst_at_rollover += addend
self.rolloverAt = new_rollover_at
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
current_time = int(time.time())
dst_now = time.localtime(current_time)[-1]
diff_t = self.rolloverAt - self.interval
if self.utc:
time_tuple = time.gmtime(diff_t)
else:
time_tuple = time.localtime(diff_t)
dst_then = time_tuple[-1]
if dst_now != dst_then:
if dst_now:
addend = 3600
else:
addend = -3600
time_tuple = time.localtime(diff_t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, time_tuple)
if os.path.exists(dfn):
self.rollover_at()
return
# Issue 18940: A file may not have been created if delay is True.
if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
# lock rename file
try:
with open(self.baseFilename, "a", encoding="utf-8") as file:
# LOCK_EX 独占
# LOCK_NB 非阻塞式
fcntl.flock(file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) # 获取文件锁
os.rename(self.baseFilename, dfn) # 更改文件名
fcntl.flock(file.fileno(), fcntl.LOCK_UN) # 释放文件锁
except IOError:
traceback.print_exc()
return
if self.backupCount > 0:
for _d in self.getFilesToDelete():
os.remove(_d)
if not self.delay:
self.stream = self._open()
self.rollover_at() | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/logger/handlers.py | handlers.py |
import pandas as pd
from yyxx_game_pkg.dbops.base import DatabaseOperation
from yyxx_game_pkg.utils import xListStr
class MysqlOperation(DatabaseOperation):
"""
Mysql数据库操作
"""
def execute(self, sql, conn, params=None):
"""
执行sql返回处理结果
:param sql:
:param conn:
:param params:
:return:
"""
sql = self.check_sql(sql)
with conn:
with conn.cursor() as cursor:
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
conn.commit()
def get_one(self, sql, conn, params=None):
"""
查询一条数据, 返回元组结构
:param sql:
:param conn:
:param params:
:return:
"""
sql = self.check_sql(sql)
with conn:
with conn.cursor() as cursor:
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
return cursor.fetchone()
def get_all(self, sql, conn, params=None):
"""
查询多条数据,返回list(元组) 结构
:param sql:
:param conn:
:param params:
:return:
"""
sql = self.check_sql(sql)
with conn:
with conn.cursor() as cursor:
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
return cursor.fetchall()
def get_one_df(self, *args, **kwargs):
"""
获取单次数据
:param args:
:param kwargs:
:return:
"""
def get_all_df(self, sql, connection):
"""
获取所有数据 dataframe
:param sql:
:param connection:
:return:
"""
return pd.read_sql(sql, connection)
def insert(self, conn, save_table, results):
"""
:param conn:
:param save_table:
:param results:
:return:
"""
def get_field_str(_data):
"""
根据数据长度生成{data_value}
:param _data:
:return:
"""
_size = len(_data[0])
_list = []
for _ in range(_size):
_list.append("%s")
_str = ",".join(_list)
return _str
def get_table_desc(_table_name, _data_list, _cs):
"""
:param _table_name:
:param _data_list:
:return:
"""
sql = f"describe {_table_name}"
_cs.execute(sql)
_desc = _cs.fetchall()
_column = []
for _data in _desc:
if _data[0] in ("id", "create_time"): # 自增id和默认插入时间过滤
continue
_column.append(_data[0])
_size = len(_data_list[0])
table_column = _column[:_size]
return ",".join(table_column)
insert_sql_template = (
"INSERT INTO {save_table} ({column_value}) VALUES({data_value})"
)
results = xListStr.split_list(results)
with conn:
with conn.cursor() as cursor:
for result in results:
if not result:
continue
field_str = get_field_str(result)
column_value = get_table_desc(save_table, result, cursor)
insert_sql = insert_sql_template.format(
save_table=save_table, column_value=column_value, data_value=field_str
)
cursor.executemany(insert_sql, result)
conn.commit() | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/dbops/mysql_op.py | mysql_op.py |
import re
import requests
import numpy as np
import pandas as pd
import ujson as json
def trans_unsupported_types(val):
"""
转化json.dumps不支持的数据类型 : int64, bytes, ...
:param val:
:return:
"""
if isinstance(val, dict):
new_dict = {}
for k, _v in val.items():
k = trans_unsupported_types(k)
_v = trans_unsupported_types(_v)
new_dict[k] = _v
return new_dict
if isinstance(val, list):
for idx, _v in enumerate(val):
_v = trans_unsupported_types(_v)
val[idx] = _v
elif isinstance(val, np.int64):
val = int(val)
elif isinstance(val, bytes):
val = val.decode(encoding="utf8")
return val
class DasApiException(Exception):
pass
class DasApiChQueryException(DasApiException):
pass
class DasApiChExecuteException(DasApiException):
pass
class DasApiMongoQueryException(DasApiException):
pass
class DasApiEsQueryException(DasApiException):
pass
class DasApiEsInsertException(DasApiException):
pass
class DasApi:
"""
DasApi py
"""
@staticmethod
def _post(das_url, post_type, post_data):
url = f"{das_url}/{post_type}"
post_data = trans_unsupported_types(post_data)
res = requests.post(json=post_data, url=url, timeout=600)
return res.ok, res.content
@staticmethod
def mongo_query(das_url, post_data):
"""
sql语句 查询 mongo 库
:param das_url: das_http_url
:param post_data: {
'sql': sql, # sql语句 支持sql 和 js_sql
'server': mongo_url # mongo链接
}
:return:
"""
b_ok, res = DasApi._post(das_url, "das/mgo/query", post_data=post_data)
if not b_ok:
raise DasApiMongoQueryException(res)
res = re.sub(
r'{\\"\$numberLong\\": \\"\d+\\"}',
lambda m: re.search(r"\d+", m.group()).group(),
res.decode("utf-8"),
)
data = json.loads(res)
data_list = data["data"]
res_list = []
if data_list:
for data in data_list:
res_list.append(json.loads(data))
res_df = pd.DataFrame(res_list)
return res_df
@staticmethod
def es_query(das_url, post_data):
"""
sql语句 查询 elasticsearch 库
:param das_url: das_http_url
:param post_data: {
"sql": sql, # sql语句
"engine": 1, # es引擎版本 1:官方 2: open distro
"search_from": search_from, # 分页查询offset 最大5w
"fetch_size": fetch_size # 单次查询总行数
}
:return:
"""
b_ok, res = DasApi._post(das_url, "das/es/query", post_data=post_data)
if not b_ok:
raise DasApiEsQueryException(res)
engine = post_data.get("engine", 0)
use_search = post_data.get("search_from", -1) >= 0
data = json.loads(res)
if engine == 0:
# opendistro
col_dict_lst = data["schema"]
data_rows = data["datarows"]
# total = data["total"]
# size = data["size"]
# status = data["status"]
else:
# origin
if use_search:
data_rows = data["map_rows"]
return pd.DataFrame(data_rows)
col_dict_lst = data["columns"]
data_rows = data["rows"]
df_cols = [col_dict["name"] for col_dict in col_dict_lst]
if not data_rows:
return pd.DataFrame(columns=df_cols)
res_df = pd.DataFrame(np.array(data_rows), columns=df_cols)
return res_df
@staticmethod
def es_insert(das_url, post_data):
"""
elasticsearch 数据插入
:param das_url: das_http_url
:param post_data = {
"kafka_addr": kafka_addr, # kafka地址
"topic": topic, # kafka Topic
"data_rows": data_rows # 数据行
}
:return:
"""
b_ok, res = DasApi._post(das_url, "das/es/insert", post_data=post_data)
if not b_ok:
raise DasApiEsInsertException(res)
return res
@staticmethod
def ch_query(das_url, post_data):
"""
sql语句 查询 clickhouse 库
:param das_url: das_http_url
:param post_data: {
"sql": sql, # sql语句
}
:return:
"""
b_ok, res = DasApi._post(das_url, "/das/ch/query", post_data=post_data)
if not b_ok:
raise DasApiChQueryException(res)
data = json.loads(res)
res_df = pd.DataFrame(data["datarows"], columns=data["columns"])
return res_df
@staticmethod
def ch_execute(das_url, post_data):
"""
clickhouse 执行 sql (数据插入)
:param das_url: das_http_url
:param post_data: {
"sql": sql, # sql语句
}
:return:
"""
b_ok, res = DasApi._post(das_url, "/das/ch/exec", post_data=post_data)
if not b_ok:
raise DasApiChExecuteException(res)
return b_ok
# if __name__ == '__main__':
# post_type = "das/mgo/query"
# post_data_ = dict()
# post_data_['js_sql'] = 'db.getSiblingDB("fumo_test").getCollection("player").find({})'
# post_data_['server'] = 'test'
#
# # DasApi.post(post_type=post_type, post_data=post_data)
# res_ = DasApi.mongo_query(post_data_)
#
# post_data_ = dict()
# post_data_['sql'] = 'SELECT * FROM log_money LIMIT 1'
# post_data_['engine'] = 1
# res_ = DasApi.es_query(post_data_)
# post_data = dict()
# post_data['sql'] = 'select * from main_test.log_player_op limit 10;'
# res_ = DasApi.ch_query(post_data)
#
# print (res_) | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/dbops/das_api.py | das_api.py |
from abc import abstractmethod
import pandas as pd
from pymongo import MongoClient
from yyxx_game_pkg.dbops.base import DatabaseOperation
from yyxx_game_pkg.dbops.mongo_op.sql2mongo import sql_to_mongo_spec
from yyxx_game_pkg.utils.decorator import (
except_monitor,
log_execute_time_monitor,
singleton_unique,
)
@singleton_unique
class SingletonMongoClient(MongoClient):
"""
SingletonMongo
根据db链接确定单例
"""
def __init__(self, mongo_uri):
super().__init__(mongo_uri)
def query_sql(self, sql, collection=None):
"""
sql 查询接口 仅支持select语法 暂不支持join
别名仅支持关键字使用[仅能识别 name as player_name 不能识别 name player_name]: as
支持判断关键字: = > < != in like
支持聚合关键字: [group by [cols]] sum, count, avg, min, max
支持排序关键字: order by desc[asc]
支持翻页关键字: limit 0 [,30]
:param sql:
:param collection:
:return:
"""
assert collection is not None
mongo_spec = sql_to_mongo_spec(sql)
pipeline = []
for k, val in mongo_spec.items():
if k == "documents":
continue
if not val:
continue
pipeline.append({k: val})
docs = mongo_spec.get("documents")
cursor = self[collection][docs].aggregate(pipeline)
return pd.DataFrame(list(cursor))
class PyMongoClient:
"""
PyMongoClient
"""
def __init__(self, mongo_uri, db_name):
self.db_name = db_name
self.mgo_client = SingletonMongoClient(mongo_uri)
def __getattr__(self, item):
return self.mgo_client.__getattr__(item)
def __getitem__(self, item):
return self.mgo_client.__getitem__(item)
@property
def game_db(self):
"""
:return:
"""
return self.mgo_client[self.db_name]
def query(self, sql):
"""
:param sql:
:return:
"""
return self.mgo_client.query_sql(sql, self.db_name)
class MongoOperation(DatabaseOperation):
"""
MongoOperation
"""
@abstractmethod
def get_mongo_info(self, *args, **kwargs) -> {str, str}:
"""
:param args:
:param kwargs:
:return:
"""
@staticmethod
def new_client(mongo_url, game_db) -> PyMongoClient:
"""
:param mongo_url:
:param game_db:
:return:
"""
mgo_client = PyMongoClient(mongo_url, game_db)
return mgo_client
@except_monitor
@log_execute_time_monitor()
def get_one_df(self, sql, *args, **kwargs):
"""
:param sql:
:param args:
:param kwargs:
:return:
"""
mongo_url, game_db = self.get_mongo_info(*args, **kwargs)
res_df = self.new_client(mongo_url, game_db).query(sql)
return res_df.iloc[0] if not res_df.empty else res_df
@except_monitor
@log_execute_time_monitor()
def get_all_df(self, sql, *args, **kwargs):
"""
:param sql:
:param args:
:param kwargs:
:return:
"""
mongo_url, game_db = self.get_mongo_info(*args, **kwargs)
res_df = self.new_client(mongo_url, game_db).query(sql)
return res_df | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/dbops/mongo_op/mongo_op.py | mongo_op.py |
from pyparsing import (
Word,
alphas,
CaselessKeyword,
Group,
Optional,
ZeroOrMore,
Forward,
Suppress,
alphanums,
OneOrMore,
quotedString,
Combine,
Keyword,
Literal,
replaceWith,
oneOf,
nums,
removeQuotes,
QuotedString,
Dict,
)
# keyword declare
LPAREN, RPAREN = map(Suppress, "()")
EXPLAIN = CaselessKeyword("EXPLAIN").setParseAction(lambda t: {"explain": True})
SELECT = Suppress(CaselessKeyword("SELECT"))
DISTINCT = CaselessKeyword("distinct")
COUNT = CaselessKeyword("count")
WHERE = Suppress(CaselessKeyword("WHERE"))
FROM = Suppress(CaselessKeyword("FROM"))
CONDITIONS = oneOf("= != < > <= >= like", caseless=True)
AND = CaselessKeyword("and")
OR = CaselessKeyword("or")
ORDER_BY = Suppress(CaselessKeyword("ORDER BY"))
GROUP_BY = Suppress(CaselessKeyword("GROUP BY"))
DESC = CaselessKeyword("desc")
ASC = CaselessKeyword("asc")
LIMIT = Suppress(CaselessKeyword("LIMIT"))
SKIP = Suppress(CaselessKeyword("SKIP"))
# aggregate func
AGG_SUM = CaselessKeyword("sum")
AGG_AVG = CaselessKeyword("avg")
AGG_MAX = CaselessKeyword("max")
AGG_MIN = CaselessKeyword("min")
AGG_WORDS = AGG_SUM | AGG_AVG | AGG_MIN | AGG_MAX
def sql_to_spec(query_sql):
"""
Convert a SQL query to a spec dict for parsing.
Support Sql Statement [select, from ,where, limit, count(*), order by, group by]
param query_sql: string. standard sql
return: None or a dictionary
"""
# morphology
word_match = Word(alphanums + "._") | quotedString
optional_as = Optional(Suppress(CaselessKeyword("as")) + word_match)
word_as_match = Group(word_match + optional_as)
number = Word(nums)
# select
select_word = word_as_match | Group(Keyword("*"))
count_ = Group(COUNT + LPAREN + Keyword("*") + RPAREN)
count_word = Group(count_ + optional_as)
select_agg = Group(AGG_WORDS + Suppress(LPAREN) + word_match + Suppress(RPAREN))
select_agg_word = Group(select_agg + optional_as)
select_complex = count_word | select_agg_word | select_word
select_clause = (
SELECT + select_complex + ZeroOrMore(Suppress(",") + select_complex)
).setParseAction(lambda matches: {"select": matches.asList()})
# from
from_clause = (FROM + word_match).setParseAction(
lambda matches: {"from": matches[0]}
)
# where
in_condition = (
word_match
+ CaselessKeyword("in")
+ LPAREN
+ (word_match + ZeroOrMore(Suppress(",") + word_match))
+ RPAREN
)
def condition_prefix(matches=None):
vals = matches[2:]
fix_vals = []
for val in vals:
if val.find("'") == -1 and val.isdigit():
val = int(val)
else:
val = val.strip("'")
fix_vals.append(val)
return [matches[0:2] + fix_vals]
condition = (in_condition | (word_match + CONDITIONS + word_match)).setParseAction(
condition_prefix
)
def condition_combine(matches=None):
if not matches:
return {}
if len(matches) == 1:
return matches
res = {f"{matches[1]}": [matches[0], matches[2]]}
left_ = matches[3:]
for i in range(0, len(left_), 2):
key_word, cond = left_[i], left_[i + 1]
res = {f"{key_word}": [res, cond]}
return res
term = (
OneOrMore(condition) + ZeroOrMore((AND + condition) | (OR + condition))
).setParseAction(condition_combine)
where_clause = (WHERE + term).setParseAction(
lambda matches: {"where": matches.asList()}
)
# group by
group_by_clause = (
GROUP_BY + word_match + ZeroOrMore(Suppress(",") + word_match)
).setParseAction(lambda matches: {"group": matches.asList()})
# order by
order_by_word = Group(word_match + Optional(DESC | ASC))
order_by_clause = (
ORDER_BY + order_by_word + ZeroOrMore(Suppress(",") + order_by_word)
).setParseAction(lambda matches: {"order": matches.asList()})
# limit
def limit_prefix(matches=None):
matches = list(map(int, matches))
return {"limit": matches}
limit_clause = (LIMIT + number + Optional(Suppress(",") + number)).setParseAction(
limit_prefix
)
list_term = (
Optional(EXPLAIN)
+ select_clause
+ from_clause
+ Optional(where_clause)
+ Optional(group_by_clause)
+ Optional(order_by_clause)
+ Optional(limit_clause)
)
expr = Forward()
expr << list_term
ret = expr.parseString(query_sql.strip())
spec_dict = {}
for d in ret:
spec_dict.update(d)
return spec_dict
COND_KEYWORDS = {
"=": "$eq",
"!=": "$ne",
">": "$gt",
">=": "$gte",
"<": "$lt",
"<=": "$lte",
"like": "$regex",
"or": "$or",
"and": "$and",
"in": "$in",
}
def create_mongo_spec(spec_dict):
"""
param sql: string. standard sql
return: dict mongo aggregate pipeline params
"""
# parsing from
from_spec = spec_dict.get("from")
if not from_spec:
raise ValueError(f"Error 'from' spec {spec_dict}")
spec_parse_results = {}
# parsing select
op_func_map = {
"count": "$sum",
"sum": "$sum",
"avg": "$avg",
"max": "$max",
"min": "$min",
}
select_spec = spec_dict.get("select")
select_results = {
"$project": {},
"$addFields": {},
"$group": {},
"documents": from_spec,
}
drop_id = True
for lst_field in select_spec:
if len(lst_field) == 2:
real_field, as_field = lst_field
else:
real_field, as_field = lst_field[0], None
if isinstance(real_field, str):
if not isinstance(real_field, str):
continue
if real_field == "*":
drop_id = False
break
if real_field == "_id":
drop_id = False
if as_field:
select_results["$project"].update({f"{as_field}": f"${real_field}"})
else:
select_results["$project"].update({real_field: 1})
elif isinstance(real_field, list):
# [count, sum ,avg, ...]
select_results["$group"].update({"_id": None})
agg_func, agg_key = real_field
real_field = f"{agg_func}({agg_key})"
op_func = op_func_map[agg_func]
op_val = 1 if agg_key == "*" else f"${agg_key}"
if as_field:
select_results["$group"].update({as_field: {op_func: op_val}})
else:
select_results["$group"].update({real_field: {op_func: op_val}})
if drop_id:
select_results["$project"].update({"_id": 0})
# where parsing
where_spec = spec_dict.get("where")
where_results = {}
if where_spec:
where_spec = where_spec[0]
where_results.update({"$match": combine_where(where_spec)})
# limit parsing
limit_spec = spec_dict.get("limit")
limit_results = {}
if limit_spec:
if len(limit_spec) == 1:
limit_results["$limit"] = limit_spec[0]
else:
limit_results["$skip"] = limit_spec[0]
limit_results["$limit"] = limit_spec[1]
# group by parsing
group_spec = spec_dict.get("group")
group_id = {}
if group_spec:
for group_key in group_spec:
group_id[group_key] = f"${group_key}"
select_results["$group"].update({"_id": group_id})
# order by parsing
order_spec = spec_dict.get("order")
order_results = {}
if order_spec:
order_results["$sort"] = {}
for order_lst in order_spec:
if len(order_lst) == 1:
order_results["$sort"].update({order_lst[0]: 1})
else:
asc = 1 if order_lst[1] == "asc" else -1
order_results["$sort"].update({order_lst[0]: asc})
spec_parse_results.update(select_results)
spec_parse_results.update(where_results)
spec_parse_results.update(limit_results)
spec_parse_results.update(order_results)
return spec_parse_results
def combine_where(where_spec):
if isinstance(where_spec, list):
if isinstance(where_spec[0], str):
key, op_word = where_spec[:2]
vals = where_spec[2:]
op_word = COND_KEYWORDS[op_word]
if op_word == "$in":
val = vals
else:
val = vals[0]
if op_word == "$regex":
val = val.strip("'")
if val[0] == "%":
val = val[1:]
else:
val = f"^{val}"
if val[-1] == "%":
val = val[:-1]
else:
val = f"{val}$"
return {key: {op_word: val}}
else:
res = []
for spec in where_spec:
res.append(combine_where(spec))
return res
else:
for op_word, vals in where_spec.items():
val_res = combine_where(vals)
return {COND_KEYWORDS[op_word]: val_res}
if __name__ == "__main__":
# sql = """
# select gid, name, leader_name, level, nMember, power, create_tm
# from test_2999999.guild
# where create_tm > 1664431200.0
# AND create_tm <= 1666799999.0
# AND name like '%吃啥%'
# OR leader_name like '999'
# gid in (1001, '1002', '12223')
# order by level, power limit 0,30
# """
# sql = """
# SELECT * FROM player WHERE _id = 2079 and name = 'c是的' and pid='2079'
# """
#
# sql = """
# select count(*) as a
# from player
# group by online, _id
# """
# sql = """
# select *
# from test_999999.player
# where _id = 1146 and max_power >= 3000 or pid > 1010
# limit 10, 10
# """
# sql = """
# select gid, name, leader, level, nMember, power, create_tm
# from guild
# where create_tm > 1684396800
# and create_tm <= 1688486399
#
# order by level desc, power
# limit 0,30
# """
# todo unit test
sql = """
select sum(online) as online_cnt
from player
"""
sql_spec = sql_to_spec(sql)
print(sql_spec)
mongo_spec = create_mongo_spec(sql_spec)
print(mongo_spec) | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/dbops/mongo_op/sql2mongo/sql2mongo.py | sql2mongo.py |
import argparse
from celery import Celery
from yyxx_game_pkg.stat.log import root_log
class CeleryInstance:
"""
celery 接口
"""
# region external
@staticmethod
def get_celery_instance():
"""
加载celery相关配置
获取celery实例
:return:
"""
celery_name = CeleryInstance._args().name
_app = Celery(celery_name) # 初始化celery
_app.config_from_envvar("CELERY_CONFIG_MODULE") # 加载配置
conf_jaeger = _app.conf.get("JAEGER")
if conf_jaeger:
from opentelemetry.instrumentation.celery import CeleryInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from yyxx_game_pkg.xtrace.helper import register_to_jaeger
if celery_name:
conf_jaeger["service_name"] += f"-{celery_name}"
register_to_jaeger(**conf_jaeger)
CeleryInstrumentor().instrument()
RequestsInstrumentor().instrument()
root_log(f"<CeleryInstance> tracer on, jaeger:{conf_jaeger}")
log_str = (
f"<CeleryInstance> get_celery_instance, app_name:{celery_name}, config:{_app.conf}, publish_flag:"
f"{_app.conf.get('PUBLISH_FLAG')}"
)
root_log(log_str)
return _app
@staticmethod
def get_current_task_id():
"""
当前task id [如果有]
:return:
"""
from celery import current_task
try:
return current_task.request.id
except:
return -1
# endregion
# region inner
@staticmethod
def _args():
"""
argparse
-n 服务名
-c 配置文件
:return:
"""
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-n", "--name")
args = parser.parse_known_args()
return args[0]
# endregion
# region celery实例化
"""
app.conf.get('worker_max_tasks_per_child', 0)
"""
# app = CeleryInstance.get_celery_instance()
# endregion | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/stat/xcelery/instance.py | instance.py |
import traceback
from yyxx_game_pkg.stat.dispatch.common.common import fastapi_except_monitor
from yyxx_game_pkg.stat.log import local_log
from yyxx_game_pkg.stat.dispatch.core.manager import RuleManager
from yyxx_game_pkg.stat.dispatch.core.structs import ProtoSchedule
from yyxx_game_pkg.stat.dispatch.core.workflows import WorkFlowMethods
# region logic入口
from yyxx_game_pkg.xtrace.helper import get_current_trace_id
@fastapi_except_monitor
def task_logic(msg):
# 解析命令,构建任务标签列表
task_sig_list = parse_task(msg)
if not task_sig_list:
err_msg = f"<task_logic> main_dispatch_logic, parse task failed: {traceback.format_exc()}"
local_log(err_msg)
return []
# 分发任务
return dispatch_tasks(task_sig_list)
# endregion
# region 任务解析
def parse_task(schedule):
"""
解析命令
:param schedule:
:return:
"""
task_sig_list = []
# 反序列化
schedule = ProtoSchedule().to_schedule(schedule)
instance_name = schedule.SCHEDULE_DISPATCH_RULE_INSTANCE_NAME
# 校验队列名
if schedule.SCHEDULE_QUEUE_NAME is None:
local_log(
f"<parse_command_data> SCHEDULE_QUEUE_NAME is None, schedule:{schedule}"
)
return task_sig_list
# 获取对应计划解析规则
rule = RuleManager().rules.get(instance_name)
if not rule:
local_log(f"<parse_command_data> rule is None, instance_name:{instance_name}")
return task_sig_list
# 构建signature列表
schedule_sig = rule.build(schedule)
if not schedule_sig:
return task_sig_list
# link
if isinstance(schedule_sig, list):
task_sig_list.extend(schedule_sig)
else:
task_sig_list.append(schedule_sig)
return task_sig_list
# endregion
# region 任务分发
def _dispatch_one_task(task_sig, queue_priority, queue_name=None):
common_options = {
"priority": queue_priority,
# 'serializer': 'pickle'
"headers": {"X-Trace-ID": get_current_trace_id()},
}
if queue_name is not None:
# 强制指定队列名
res = task_sig.apply_async(queue=queue_name, **common_options)
else:
# 动态队列名
res = task_sig.apply_async(**common_options)
# 根据res获取task id
task_id_list = []
WorkFlowMethods.fill_res_task_id_list(res, task_id_list)
return res.id, task_id_list
def dispatch_tasks(task_sig_list):
task_id_list = [] # task id列表
task_type_list = [] # task类型列表(日志显示用)
task_queue_flag_list = [] # task队列名列表(日志显示用)
task_cnt = 0 # task数(日志显示用)
max_sig_cnt = 0 # 单次提交任务数峰值(日志显示用)
for task_sig in task_sig_list:
task_type_list.append(type(task_sig))
queue_flag = WorkFlowMethods.get_task_sig_queue_name(task_sig)
task_queue_flag_list.append(queue_flag)
# 解析queue_flag,获取队列名和优先级
queue_name, queue_priority = _parse_queue_flag(queue_flag)
# 获取任务数
WorkFlowMethods.reset_max_sig_cnt()
task_cnt += WorkFlowMethods.calculate_sig_cnt(task_sig)
max_sig_cnt = max(WorkFlowMethods.get_max_sig_cnt(), max_sig_cnt)
# 提交任务
m_task_id, s_task_id_list = _dispatch_one_task(task_sig, queue_priority)
task_id_list.append(m_task_id)
local_log(
f"<dispatch_tasks> record_task_id, queue:{queue_name}, "
f"priority:{queue_priority}, m_task_id:{m_task_id}, "
f"s_task_len:{len(s_task_id_list)}, s_task_id_list:{s_task_id_list}"
)
local_log(
f"<dispatch_tasks> dispatch_tasks, queue_name:{task_queue_flag_list} "
f"task_cnt:{task_cnt}, max_sig_cnt:{max_sig_cnt}"
)
return task_id_list
def _parse_queue_flag(queue_flag):
"""
解析队列名标识
:param queue_flag:
:return:
"""
default_priority = 3 # 默认队列优先级
if queue_flag is None:
# assert False
return [None], default_priority
res_list = queue_flag.split("@")
queue_name = res_list[0]
priority = min(int(res_list[1]), 10) if len(res_list) > 1 else default_priority
return queue_name, priority
# endregion | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/stat/dispatch/logic/task_logic.py | task_logic.py |
from yyxx_game_pkg.stat.log import local_log
from yyxx_game_pkg.stat.dispatch.core.manager import rule_register
from yyxx_game_pkg.stat.dispatch.core.workflows import WorkFlowMethods
from yyxx_game_pkg.stat.dispatch.logic.task_logic import parse_task
from yyxx_game_pkg.stat.dispatch.rules.rule_base import RuleBase
@rule_register(inst_name_list=["work_flow_instance"])
class DispatchRuleWorkFlow(RuleBase):
def __init__(self):
super(self.__class__, self).__init__()
# region 继承方法
def build(self, schedule):
"""
构建分发任务标签
:return: [group, chord, chain, signature]
"""
return self.__logic_make_sig(schedule)
# endregion
# region 内部方法
def __logic_make_sig(self, schedule):
flow_content_dict = schedule.SCHEDULE_CONTENT
assert isinstance(flow_content_dict, dict)
sig_list = []
for _, flow_content in flow_content_dict.items():
sig = self.__make_sig_by_content(schedule, flow_content)
if not sig:
continue
sig_list.append(sig)
return sig_list
def __parse_flow_content(self, flow_content):
assert isinstance(flow_content, dict)
dict_step_sig_list = dict()
min_step = 65535
max_step = -1
for step, content_list in flow_content.items():
step = int(step)
min_step = min(step, min_step)
max_step = max(step, max_step)
for schedule_str in content_list:
if schedule_str == self.inst_name:
# 工作流的子计划中不能再包含工作流
local_log(
"[ERROR] <DispatchRuleWorkFlow> __parse_flow_content, "
"workflow can not contain workflow, schedule:{}".format(
schedule_str
)
)
return None, -1, -1
sub_sig_list = parse_task(schedule_str)
if not sub_sig_list:
# 不能跳过sig
local_log(
"[ERROR] <DispatchRuleWorkFlow> __parse_flow_content, "
"parse_schedule_str_to_signature, schedule:{}".format(
schedule_str
)
)
return None, -1, -1
if not dict_step_sig_list.get(step):
dict_step_sig_list[step] = []
if isinstance(sub_sig_list, list):
dict_step_sig_list[step].extend(sub_sig_list)
else:
dict_step_sig_list[step].append(sub_sig_list)
return dict_step_sig_list, min_step, max_step
def __make_sig_by_content(self, schedule, flow_content):
dict_step_sig_list, min_step, max_step = self.__parse_flow_content(flow_content)
if dict_step_sig_list is None:
local_log(
"[ERROR] <DispatchRuleWorkFlow>dict_step_sig_list is None, content:{}".format(
flow_content
)
)
return None
queue_name = dict_step_sig_list[min_step][0].options.get("queue")
# step合并
step_sig_list = []
for step in range(min_step, max_step + 1):
# 按照step先后顺序构建sig列表
sig_list = dict_step_sig_list.get(step)
if not sig_list:
continue
res_sig = WorkFlowMethods.merge_sig_list(sig_list) # 多个相同同step的sig合并
step_sig_list.append(res_sig)
# 构建chord
ch = WorkFlowMethods.link_signatures(step_sig_list)
if ch is None:
local_log(
"[ERROR] <DispatchRuleWorkFlow>__make_sig_by_content, make chord error, content:{}".format(
flow_content
)
)
else:
local_log(
"<DispatchRuleWorkFlow>__make_sig_by_content, queue:{} steps:{}".format(
queue_name, max_step
)
)
return ch
# endregion | yyxx-game-pkg-compat | /yyxx_game_pkg_compat-2023.8.31.2-py3-none-any.whl/yyxx_game_pkg/stat/dispatch/rules/rule_workflow.py | rule_workflow.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.