code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas._typing import TimedeltaConvertibleTypes, Axis
from .common import get_real_n
def simple_moving_average(data: pd.Series | pd.DataFrame,
window: timedelta = timedelta(hours=5),
min_periods: int | None = None,
center: bool = False,
win_type: str | None = None,
on: str | None = None,
axis: Axis = 0,
closed: str | None = None,
method: str = "single",
) -> pd.Series:
"""
calculate simple moving average, Note: window is based on time span
docs for other params, see https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.rolling.html
:param data: data
:type data: Series
:param window: window width
:type window: timedelta
:return: simple moving average data
:rtype: Series
"""
return data.rolling(window=get_real_n(data, window),
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
method=method,
).mean()
def exponential_moving_average(data: pd.Series | pd.DataFrame,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | pd.DataFrame | pd.Series | None = None,
method: str = "single",
):
"""
calculate exponential moving average, just a shortcut for pandas.evm().mean()
docs for params, see: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.ewm.html
"""
return data.ewm(com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
method=method,
).mean() | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/indicator/ma.py | ma.py |
import csv
import datetime
import operator
import os.path
import pickle
import time
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from operator import itemgetter
from typing import List, Dict
from .eth_req import EthRequestClient, GetLogsParam
from tqdm import tqdm # process bar
import json
from ._typing import DownloadParam, ChainType
from .swap_contract import Constant
from .utils import get_file_name
height_cache_file_name = "_height_timestamp.pkl"
# preserved for future extension. such as special logic for each chain.
CHAINS = {
ChainType.Polygon: {
"is_poa": True
},
ChainType.Arbitrum: {
"is_poa": True
},
ChainType.Optimism: {
"is_poa": True
},
ChainType.Ethereum: {
"is_poa": False
},
}
@dataclass
class ContractConfig:
address: str
topics: List[str]
batch_size: int
one_by_one: bool
def save_block_dict(height_cache_path, block_dict):
"""
save pickled block data
:param height_cache_path:
:param block_dict:
:return:
"""
with open(height_cache_path, 'wb') as f:
pickle.dump(block_dict, f)
# def dict_factory(cursor, row):
# d = {}
# for idx, col in enumerate(cursor.description):
# d[col[0]] = row[idx]
# return d
def cut(obj, sec):
"""
get slice of object
:param obj: list of object
:param sec: step of slice
:return:
"""
return [obj[i:i + sec] for i in range(0, len(obj), sec)]
def fill_block_info(log, client: EthRequestClient, block_dict):
"""
fill block info with timestamp and height
:param log: log json data
:param client: client of eth
:param block_dict: block json data
:return:
"""
height = log['block_number']
if height not in block_dict:
block_dt = client.get_block_timestamp(height)
block_dict[height] = block_dt
log['block_timestamp'] = block_dict[height].isoformat()
log['block_dt'] = block_dict[height]
return log
def query_event_log(client: EthRequestClient, contract_config: ContractConfig, start_height: int, end_height: int,
save_path: str,
block_dict, chain):
"""
query event from jrpc
:param client: EthRequestClient
:param contract_config: ContractConfig
:param start_height: start from block height
:param end_height: end with block height
:param save_path: save data path
:param block_dict: block json data
:param chain: the chain to get data
:return:
"""
collect_dt, log_by_day_dict, collect_start = None, OrderedDict(), None # collect date, date log by day,collect start time
start_tp = time.time()
downloaded_day = []
with tqdm(total=(end_height - start_height + 1), ncols=150) as pbar:
for height_slice in cut([i for i in range(start_height, end_height + 1)], contract_config.batch_size):
start = height_slice[0]
end = height_slice[-1]
if contract_config.one_by_one:
logs = []
for topic_hex in contract_config.topics:
tmp_logs = client.get_logs(GetLogsParam(contract_config.address,
start,
end,
[topic_hex]))
logs.extend(tmp_logs)
else:
logs = client.get_logs(GetLogsParam(contract_config.address, start, end, None))
log_lst = []
for log in logs:
log['blockNumber'] = int(log['blockNumber'], 16)
if len(log['topics']) > 0 and (log['topics'][0] in contract_config.topics):
if log["removed"]:
continue
log_lst.append({
'block_number': log['blockNumber'],
'transaction_hash': log['transactionHash'],
'transaction_index': log['transactionIndex'],
'log_index': log['logIndex'],
'DATA': log["data"],
'topics': json.dumps(log['topics'])
})
with ThreadPoolExecutor(max_workers=10) as t:
obj_lst = []
for data in log_lst:
obj = t.submit(fill_block_info, data, client, block_dict)
obj_lst.append(obj)
for future in as_completed(obj_lst):
data = future.result()
block_time_by_day = data['block_dt'].date()
data.pop('block_dt')
if block_time_by_day not in log_by_day_dict:
log_by_day_dict[block_time_by_day] = [data]
else:
log_by_day_dict[block_time_by_day].append(data)
pbar.update(n=len(height_slice))
if (len(height_slice) < contract_config.batch_size) or (
len(height_slice) >= contract_config.batch_size and len(log_by_day_dict) >= 2):
# collect_dt/collect_start
if len(log_by_day_dict) > 0:
log_by_day_dict = OrderedDict(sorted(log_by_day_dict.items(), key=operator.itemgetter(0)))
collect_dt, one_day_data = log_by_day_dict.popitem(last=False)
one_day_data = sorted(one_day_data,
key=itemgetter('block_number', 'transaction_index', 'log_index'))
collect_dt = collect_dt.strftime('%Y-%m-%d')
collect_start = one_day_data[0]['block_number']
# write to csv
save_one_day(save_path, collect_dt, contract_config, one_day_data, chain)
downloaded_day.append(collect_dt)
print(f'\nsaved date: {collect_dt}, start height: {collect_start}, '
f'length: {len(one_day_data)}, time: {time.time() - start_tp} s')
start_tp = time.time()
return downloaded_day
def save_one_day(save_path, collect_dt, contract_config, one_day_data, chain: ChainType):
"""
save daily data to file
:param save_path: save data file path
:param collect_dt: the date
:param contract_config: contract config
:param one_day_data: one day's data
:param chain: the chain to get data
:return: None
"""
with open(get_file_name(save_path, chain.name, contract_config.address, collect_dt, True), 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['block_number', 'block_timestamp', 'transaction_hash',
'transaction_index',
'log_index', 'DATA', 'topics'])
writer.writeheader()
for item in one_day_data:
writer.writerow(item)
def download_and_save_by_day(config: DownloadParam):
"""
down and save data by day
:param config:
:return:
"""
chain_config = CHAINS[config.chain]
height_cache_path = config.chain.name + height_cache_file_name
if os.path.exists(height_cache_path):
with open(height_cache_path, 'rb') as f:
block_dict = pickle.load(f)
print(f"Height cache has loaded, length: {len(block_dict)}")
else:
block_dict: Dict[int,datetime.datetime] = {}
client = EthRequestClient(config.rpc.end_point, config.rpc.proxy, config.rpc.auth_string)
try:
downloaded_day = query_event_log(client,
ContractConfig(config.pool_address,
[Constant.SWAP_KECCAK,
Constant.BURN_KECCAK,
Constant.COLLECT_KECCAK,
Constant.MINT_KECCAK],
config.rpc.batch_size,
False),
config.rpc.start_height,
config.rpc.end_height,
config.save_path,
block_dict,
config.chain)
except Exception as e:
print(e)
import traceback
print(traceback.format_exc())
print(f"saving height cache, length {len(block_dict)}")
save_block_dict(height_cache_path, block_dict)
return downloaded_day if downloaded_day else [] | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/source_rpc.py | source_rpc.py |
from typing import List
from ._typing import OnchainTxType
from .utils import HexUtil
class Constant(object):
MINT_KECCAK = "0x7a53080ba414158be7ec69b987b5fb7d07dee101fe85488f0853ae16239d0bde"
SWAP_KECCAK = "0xc42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"
BURN_KECCAK = "0x0c396cd989a39f4459b5fa1aed6a9a8dcdbc45908acfd67e028cd568da98982c"
COLLECT_KECCAK = "0x70935338e69775456a85ddef226c395fb668b63fa0115f5f20610b388e6ca9c0"
type_dict = {Constant.MINT_KECCAK: OnchainTxType.MINT,
Constant.SWAP_KECCAK: OnchainTxType.SWAP,
Constant.BURN_KECCAK: OnchainTxType.BURN,
Constant.COLLECT_KECCAK: OnchainTxType.COLLECT}
def decode_address_from_topic(topic_str):
"""
decode address from topic
:param topic_str: contact 0x to topic address
:return:
"""
return "0x" + topic_str[26:]
def split_topic(topic: str) -> List[str]:
"""
split topic with \n or ,
:param topic:
:return:
"""
spliter = "\n" if "\n" in topic else ","
topic_list = topic.strip("[]").replace("'", "").replace("\"", "").replace(" ", "").split(spliter)
return topic_list
def handle_event(topics_str, data_hex):
"""
handle event data, receipt amount and so on
:param topics_str: topic info
:param data_hex: hex data str
:return:
"""
# proprocess topics string -> topic list
# topics_str = topics.values[0]
sqrtPriceX96 = receipt = amount1 = current_liquidity = current_tick = tick_lower = tick_upper = delta_liquidity = None
if isinstance(topics_str, str):
topic_list = split_topic(topics_str)
else:
topic_list = topics_str
# data_hex = data.values[0]
type_topic = topic_list[0]
tx_type = type_dict[type_topic]
no_0x_data = data_hex[2:]
chunk_size = 64
chunks = len(no_0x_data)
if tx_type == OnchainTxType.SWAP:
sender = decode_address_from_topic(topic_list[1])
receipt = decode_address_from_topic(topic_list[2])
split_data = ["0x" + no_0x_data[i:i + chunk_size] for i in range(0, chunks, chunk_size)]
amount0, amount1, sqrtPriceX96, current_liquidity, current_tick = [HexUtil.to_signed_int(onedata) for onedata in
split_data]
elif tx_type == OnchainTxType.BURN:
sender = decode_address_from_topic(topic_list[1])
tick_lower = HexUtil.to_signed_int(topic_list[2])
tick_upper = HexUtil.to_signed_int(topic_list[3])
split_data = ["0x" + no_0x_data[i:i + chunk_size] for i in range(0, chunks, chunk_size)]
delta_liquidity, amount0, amount1 = [HexUtil.to_signed_int(onedata) for onedata in split_data]
delta_liquidity = -delta_liquidity
elif tx_type == OnchainTxType.MINT:
# sender = topic_str_to_address(topic_list[1])
owner = decode_address_from_topic(topic_list[1])
tick_lower = HexUtil.to_signed_int(topic_list[2])
tick_upper = HexUtil.to_signed_int(topic_list[3])
split_data = ["0x" + no_0x_data[i:i + chunk_size] for i in range(0, chunks, chunk_size)]
sender = decode_address_from_topic(split_data[0])
delta_liquidity, amount0, amount1 = [HexUtil.to_signed_int(onedata) for onedata in split_data[1:]]
elif tx_type == OnchainTxType.COLLECT:
tick_lower = HexUtil.to_signed_int(topic_list[2])
tick_upper = HexUtil.to_signed_int(topic_list[3])
split_data = ["0x" + no_0x_data[i:i + chunk_size] for i in range(0, chunks, chunk_size)]
sender = decode_address_from_topic(split_data[0])
amount0, amount1 = [HexUtil.to_signed_int(onedata) for onedata in split_data[1:]]
else:
raise ValueError("not support tx type")
return tx_type, sender, receipt, amount0, amount1, sqrtPriceX96, current_liquidity, current_tick, tick_lower, tick_upper, delta_liquidity | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/swap_contract.py | swap_contract.py |
import json
from datetime import date
import pandas
from ._typing import ChainType
from .swap_contract import Constant
def download_bigquery_pool_event_oneday(chain: ChainType, contract_address: str, one_date: date) -> "pandas.DataFrame":
"""
query log data from big_query
sample response
[{
"log_index": "38",
"transaction_hash": "0xb013afdc4272ccf59a19cfa3943d2af9e818dd3a88981fc0e31e043233d31d1a",
"transaction_index": "1",
"address": "0x8ef34625aec17541b6147a40a3840c918263655e",
"data": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002022f034f3ae810000000000000000000000000000000000000000000000c30329a44dfdbf0de60000000000000000000000000000000000000000000000000000000000000000",
"topics": ["0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822", "0x0000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488d", "0x000000000000000000000000700000f7c2c71caab6b250ca85237117ff702ebb"],
"block_timestamp": "2022-09-01T01:01:52Z",
"block_number": "15449884",
"block_hash": "0x48268cc49b9a68fa08a5954a4a841f36205f8ba5a957cb632165a817c6817b05"
}]
:param chain:
:param contract_address:
:param one_date:
:return:
"""
from google.cloud import bigquery
client = bigquery.Client()
# no index on blocknum for bigquery. Do not use blocknum in where. slow.
query = f"""SELECT
block_timestamp,
block_number,
transaction_hash,
transaction_index,
log_index,
topics,
DATA
FROM
{ModuleUtils.get_table_name(chain)}
WHERE
(topics[SAFE_OFFSET(0)] = '{Constant.MINT_KECCAK}'
OR topics[SAFE_OFFSET(0)] = '{Constant.BURN_KECCAK}'
OR topics[SAFE_OFFSET(0)] = '{Constant.SWAP_KECCAK}' OR topics[SAFE_OFFSET(0)] = '{Constant.COLLECT_KECCAK}')
AND DATE(block_timestamp) >= DATE("{one_date}")
AND DATE(block_timestamp) <= DATE("{one_date}")
AND address = "{contract_address}" order by block_number asc,log_index asc"""
# print(query);
query_job = client.query(query) # Make an API request.
result: pandas.DataFrame = query_job.to_dataframe(create_bqstorage_client=False)
return result
class ModuleUtils(object):
@staticmethod
def get_table_name(chain_type: ChainType) -> str:
"""
according to ChainType return bigquery table name
:param chain_type:
:return:
"""
match chain_type:
case ChainType.Polygon:
return "public-data-finance.crypto_polygon.logs"
case ChainType.Ethereum:
return "bigquery-public-data.crypto_ethereum.logs"
case _:
raise RuntimeError("chain type {} is not supported by BigQuery", chain_type) | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/source_bigquery.py | source_bigquery.py |
import datetime
import random
from dataclasses import dataclass
from typing import List
import requests
from .._typing import EthError
@dataclass
class GetLogsParam:
address: str
fromBlock: int
toBlock: int
topics: List[str] | None
class EthRequestClient:
def __init__(self, endpoint: str, proxy="", auth=""):
"""
init EthRequestClient
:param endpoint: endpoint like http://*.*.*.*:*
:param proxy: use proxy to connect proxy,
:param auth: auth info
"""
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=5, pool_maxsize=20)
self.session.mount("https://", adapter)
self.session.mount("http://", adapter)
self.headers = {}
self.endpoint = endpoint
if auth:
self.headers["Authorization"] = auth
self.proxies = {"http": proxy, "https": proxy, } if proxy else {}
def __del__(self):
self.session.close()
@staticmethod
def __encode_json_rpc(method: str, params: list):
"""
encode json rpc request body
:param method: json rpc method
:param params: request params
:return:
"""
return {"jsonrpc": "2.0", "method": method, "params": params, "id": random.randint(1, 2147483648)}
@staticmethod
def __decode_json_rpc(response: requests.Response):
"""
decode json rps response
:param response: response with json data
:return:
"""
content = response.json()
if "error" in content:
raise EthError(content["error"]["code"], content["error"]["message"])
return content["result"]
def do_post(self, param):
"""
send post request
:param param: json param
:return: json response
"""
return self.session.post(self.endpoint,
json=param,
proxies=self.proxies,
headers=self.headers)
def get_block(self, height):
"""
get block rpc data
:param height:
:return:
"""
response = self.do_post(EthRequestClient.__encode_json_rpc("eth_getBlockByNumber", [hex(height), False]))
return EthRequestClient.__decode_json_rpc(response)
def get_block_timestamp(self, height):
"""
get block timestamp
:param height:
:return:
"""
resp = self.get_block(height)
if resp:
timestamp = int(resp["timestamp"], 16)
return datetime.datetime.utcfromtimestamp(timestamp)
else:
return None
def get_logs(self, param: GetLogsParam):
"""
get logs from jrpc json
:param param:
:return:
"""
if param.toBlock:
param.toBlock = hex(param.toBlock)
if param.fromBlock:
param.fromBlock = hex(param.fromBlock)
response = self.do_post(EthRequestClient.__encode_json_rpc("eth_getLogs", [vars(param)]))
return EthRequestClient.__decode_json_rpc(response) | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/eth_req.py | eth_req.py |
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
DEFAULT_SAVE_PATH = "data"
class DataSource(Enum):
BigQuery = 1
RPC = 2
class ChainType(Enum):
Ethereum = 1
Polygon = 2
Optimism = 3
Arbitrum = 4
Celo = 5
@dataclass
class RpcParam:
end_point: str
start_height: int
end_height: int
auth_string: str = ""
proxy: str = ""
batch_size: int = 500
@dataclass
class BigQueryParam:
auth_file: str
start: str
end: str = datetime.now().strftime("%Y-%m-%d")
@dataclass
class DownloadParam:
chain: ChainType = ChainType.Ethereum
source: DataSource = DataSource.BigQuery
pool_address: str = ""
save_path: str = DEFAULT_SAVE_PATH
rpc = RpcParam("", 0, 0)
big_query = BigQueryParam("", "")
class OnchainTxType(Enum):
MINT = 0
SWAP = 2
BURN = 1
COLLECT = 3
MarketDataNames = [
"timestamp",
"netAmount0",
"netAmount1",
"closeTick",
"openTick",
"lowestTick",
"highestTick",
"inAmount0",
"inAmount1",
"currentLiquidity",
]
class MarketData(object):
def __init__(self):
self.timestamp = None
self.netAmount0 = 0
self.netAmount1 = 0
self.closeTick = None
self.openTick = None
self.lowestTick = None
self.highestTick = None
self.inAmount0 = 0
self.inAmount1 = 0
self.currentLiquidity = None
def to_array(self):
return [
self.timestamp,
self.netAmount0,
self.netAmount1,
self.closeTick,
self.openTick,
self.lowestTick,
self.highestTick,
self.inAmount0,
self.inAmount1,
self.currentLiquidity
]
def __str__(self):
"""
print MarketData info
:return:
"""
return str(self.timestamp)
def fill_missing_field(self, prev_data) -> bool:
"""
fill missing field with previous data
:param prev_data:
:return: data is available or not
"""
if prev_data is None:
prev_data = MarketData()
self.closeTick = self.closeTick if self.closeTick is not None else prev_data.closeTick
self.openTick = self.openTick if self.openTick is not None else prev_data.closeTick
self.lowestTick = self.lowestTick if self.lowestTick is not None else prev_data.closeTick
self.highestTick = self.highestTick if self.highestTick is not None else prev_data.closeTick
self.currentLiquidity = self.currentLiquidity if self.currentLiquidity is not None \
else prev_data.currentLiquidity
return False if (self.closeTick is None or self.currentLiquidity is None) else True | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/_typing.py | _typing.py |
from datetime import datetime, timedelta
from ._typing import MarketData
class TextUtil(object):
@staticmethod
def cut_after(text: str, symbol: str) -> 'str':
"""
cut str after symbol
:param text:
:param symbol:
:return:
"""
index = text.find(symbol)
return text[0:index]
class TimeUtil(object):
@staticmethod
def get_minute(time: datetime) -> datetime:
"""
datetime with minute
:param time:
:return:
"""
return datetime(time.year, time.month, time.day, time.hour, time.minute, 0)
class HexUtil(object):
@staticmethod
def to_signed_int(h):
"""
Converts hex values to signed integers.
"""
s = bytes.fromhex(h[2:])
i = int.from_bytes(s, 'big', signed=True)
return i
def get_file_name(path, chain, addr, day, raw: bool):
"""
get file name
:param path: file path
:param chain: chain name
:param addr: address
:param day: day
:param raw: if raw data
:return:
"""
raw_str = "raw-" if raw else ""
return f"{path}{raw_str}{chain}-{addr.lower()}-{day}.csv"
class DataUtil(object):
@staticmethod
def fill_missing(data_list: [MarketData]) -> list:
"""
fill missing data
:param data_list:
:return:
"""
if len(data_list) < 1:
return data_list
# take the first minute in data. instead of 0:00:00
# so here will be a problem, if the first data is 0:03:00, the first 2 minutes will be blank
# that's because there is no previous data to follow
# those empty rows will be filled in loading stage
index_minute = data_list[0].timestamp
new_list = []
data_list_index = 0
start_day = data_list[0].timestamp.day
while index_minute.day == start_day:
if (data_list_index < len(data_list)) and (index_minute == data_list[data_list_index].timestamp):
item = data_list[data_list_index]
data_list_index += 1
else:
item = MarketData()
item.timestamp = index_minute
prev_data = new_list[len(new_list) - 1] if len(new_list) - 1 >= 0 else None
# if no previous(this might happen in the first minutes) data, this row will be discarded
if item.fill_missing_field(prev_data):
new_list.append(item)
index_minute = index_minute + timedelta(minutes=1)
return new_list | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/utils.py | utils.py |
import datetime
import pandas
from pandas import Timestamp
from ._typing import MarketData, OnchainTxType, MarketDataNames
from .swap_contract import handle_event
from .utils import TextUtil, TimeUtil, DataUtil
class ModuleUtils(object):
@staticmethod
def get_datetime(date_str: str) -> datetime:
"""
get datetime from date_str
:param date_str:
:return:
"""
if type(date_str) == Timestamp:
return date_str.to_pydatetime()
else:
return datetime.datetime.strptime(TextUtil.cut_after(str(date_str), "+").replace("T", " "),
"%Y-%m-%d %H:%M:%S")
def process_raw_data(raw_data: pandas.DataFrame) -> "pandas.DataFrame":
"""
get data from jrpc response raw data
:param raw_data:
:return:
"""
if raw_data.size <= 0:
return raw_data
start_time = TimeUtil.get_minute(ModuleUtils.get_datetime(raw_data.loc[0, "block_timestamp"]))
minute_rows = []
data = []
total_index = 1
for index, row in raw_data.iterrows():
current_time = TimeUtil.get_minute(ModuleUtils.get_datetime(row["block_timestamp"]))
if start_time == current_time: # middle of a minute
minute_rows.append(row)
else: #
data.append(sample_data_to_one_minute(start_time, minute_rows))
total_index += 1
# start on_bar minute
start_time = current_time
minute_rows = [row]
data = DataUtil.fill_missing(data)
df = pandas.DataFrame(columns=MarketDataNames, data=map(lambda d: d.to_array(), data))
return df
def sample_data_to_one_minute(current_time, minute_rows) -> MarketData:
"""
aggregate data to minute data
:param current_time: current time
:param minute_rows: row data in this minute
:return:
"""
data = MarketData()
data.timestamp = current_time
i = 1
for r in minute_rows:
tx_type, sender, receipt, amount0, amount1, sqrtPriceX96, current_liquidity, current_tick, tick_lower, tick_upper, delta_liquidity = handle_event(
r.topics, r.DATA)
# print(tx_type, sender, receipt, amount0, amount1, sqrtPriceX96, current_liquidity, current_tick, tick_lower,
# tick_upper, delta_liquidity)
match tx_type:
case OnchainTxType.MINT:
pass
case OnchainTxType.BURN:
pass
case OnchainTxType.COLLECT:
pass
case OnchainTxType.SWAP:
data.netAmount0 += amount0
data.netAmount1 += amount1
if amount0 > 0:
data.inAmount0 += amount0
if amount1 > 0:
data.inAmount1 += amount1
if data.openTick is None: # first
data.openTick = current_tick
data.highestTick = current_tick
data.lowestTick = current_tick
if data.highestTick < current_tick:
data.highestTick = current_tick
if data.lowestTick > current_tick:
data.lowestTick = current_tick
if i == len(minute_rows): # last
data.closeTick = current_tick
data.currentLiquidity = current_liquidity
i += 1
return data | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/process.py | process.py |
import os
from datetime import date, timedelta
import json
import pandas as pd
from tqdm import tqdm # process bar
from ._typing import ChainType, DownloadParam
from .process import process_raw_data
from .source_bigquery import download_bigquery_pool_event_oneday
from .source_rpc import download_and_save_by_day
from .utils import get_file_name
def download_from_rpc(config: DownloadParam):
"""
download from rpc
:param config: DownloadParam
:return:
"""
if config.rpc.end_height <= config.rpc.start_height:
raise RuntimeError("start height should less than end height")
downloaded_day = download_and_save_by_day(config)
# downloaded_day = ["2022-06-30", "2022-07-01", "2022-07-02"]
if len(downloaded_day) <= 2:
raise RuntimeError("As first day and last day will be dropped, "
"day length should at least 3, current length is " + len(downloaded_day))
print(f"now will drop data in {downloaded_day[0]} and {downloaded_day[len(downloaded_day) - 1]} "
f"as they a highly likely insufficient for a whole day")
downloaded_day = downloaded_day[1:len(downloaded_day) - 1]
for day in downloaded_day:
day_df = pd.read_csv(get_file_name(config.save_path, config.chain.name, config.pool_address, day, True))
processed_day_data = process_raw_data(day_df)
processed_day_data.to_csv(get_file_name(config.save_path, config.chain.name, config.pool_address, day, False),
header=True, index=False)
def download_from_bigquery(chain: ChainType, pool_address: str, start: date, end: date,
save_path=os.getcwd(), save_raw_file=False, skip_exist=True):
"""
Download transfer data by day
:param chain: which chain
:param pool_address: contract address of swap pool
:param start: start date
:param end: end date
:param save_path: save to path
:param save_raw_file: save raw data or not
:param skip_exist: if file exist, skip.
:return:
"""
pool_address = pool_address.lower()
end = end + timedelta(days=1) # make date range is [a,b], instead of [a,b)
if start > end:
raise RuntimeError("start date should earlier than end date")
date_array = split_date_range_to_array(start, end)
for i in tqdm(range(len(date_array)), ncols=150):
day = date_array[i]
date_str = day.strftime("%Y-%m-%d")
file_name = get_file_name(save_path, chain.name, pool_address, date_str, False)
if skip_exist and os.path.exists(file_name):
continue
raw_day_data = download_bigquery_pool_event_oneday(chain, pool_address, day)
if save_raw_file:
raw_day_data['topics'] = raw_day_data['topics'].apply(lambda x: json.dumps(x.tolist()))
raw_day_data.to_csv(get_file_name(save_path, chain.name, pool_address, date_str, True),
header=True,
index=False)
processed_day_data = process_raw_data(raw_day_data)
# save processed
processed_day_data.to_csv(file_name, header=True, index=False)
# time.sleep(1)
def split_date_range_to_array(start: date, end: date) -> "array":
"""
split date range to array
:param start: start date
:param end: end date
:return:
"""
return [start + timedelta(days=x) for x in range(0, (end - start).days)] | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/download/downloader.py | downloader.py |
from decimal import Decimal
from typing import Dict
import pandas as pd
from ._typing import Asset, TokenInfo, AccountStatus, MarketDict, AssetDict
from .market import Market
from .._typing import DemeterError, UnitDecimal
from ..utils import get_formatted_from_dict, get_formatted_predefined, STYLE, \
float_param_formatter
class Broker:
def __init__(self, allow_negative_balance=False, record_action_callback=None):
"""
init Broker
:param allow_negative_balance:
:param record_action_callback:
"""
self.allow_negative_balance = allow_negative_balance
self._assets: AssetDict[Asset] = AssetDict()
self._markets: MarketDict[Market] = MarketDict()
self._record_action_callback = record_action_callback
# region properties
@property
def markets(self) -> MarketDict[Market]:
"""
markets property
:return:
"""
return self._markets
@property
def assets(self) -> AssetDict[Asset]:
"""
asset property
:return:
"""
return self._assets
# endregion
def __str__(self):
"""
Broker raw info
:return:
"""
return "assets: " + ",".join([f"({v})" for k, v in self._assets.items()]) + \
"; markets: " + ",".join([f"({v})" for k, v in self.markets.items()])
def add_market(self, market: Market):
"""
Set a new market to broker,
User should initialize market before set to broker(Because there are too many initial parameters)
:param market: market
:type market: Market
:return:
:rtype:
"""
if market.market_info in self._markets:
raise DemeterError("market has exist")
self._markets[market.market_info] = market
market.broker = self
market._record_action_callback = self._record_action_callback
@float_param_formatter
def add_to_balance(self, token: TokenInfo, amount: Decimal | float):
"""
set initial balance for token
:param token: which token to set
:type token: TokenInfo
:param amount: balance, eg: 1.2345
:type amount: Decimal | float
"""
if token in self._assets:
asset: Asset = self._assets[token]
else:
asset: Asset = self.__add_asset(token)
asset.add(amount)
return asset
@float_param_formatter
def set_balance(self, token: TokenInfo, amount: Decimal | float):
"""
set the balance value
:param token: the token of asset, TokenInfo(name='usdc', decimal=6)
:param amount: amount of token, 10000
:return: Asset instance, usdc: 10000
"""
asset: Asset = self.__add_asset(token)
asset.balance = amount
return asset
@float_param_formatter
def subtract_from_balance(self, token: TokenInfo, amount: Decimal | float):
"""
:param token: TokenInfo object
:param amount: Decimal or float type
:return:
"""
if token in self._assets:
asset: Asset = self._assets[token]
asset.sub(amount, allow_negative_balance=self.allow_negative_balance)
else:
if self.allow_negative_balance:
asset: Asset = self.__add_asset(token)
asset.balance = 0 - amount
else:
raise DemeterError(f"{token.name} doesn't exist in assets dict")
return asset
def __add_asset(self, token: TokenInfo) -> Asset:
"""
set Asset with Token
:param token: TokenInfo
:return: Asset with TokenInfo
"""
self._assets[token] = Asset(token, 0)
return self._assets[token]
def get_token_balance(self, token: TokenInfo):
"""
get balance of token
:param token:
:return:
"""
if token in self.assets:
return self._assets[token].balance
else:
raise DemeterError(f"{token.name} doesn't exist in assets dict")
def get_token_balance_with_unit(self, token: TokenInfo):
"""
get balance of token with unit
:param token: TokenInfo
:return: UnitDecimal balance
"""
return UnitDecimal(self.get_token_balance(token), token.name)
def get_account_status(self, prices: pd.Series | Dict[str, Decimal], timestamp=None) -> AccountStatus:
"""
get account status
:param prices: price series
('eth', Decimal('1610.553895752868641174609110')) ('usdc', 1)
:param timestamp: optional timestamp, 2022-08-20 00:00:00
:return: account status
AccountStatus(timestamp=None, net_value=Decimal('26105.53895752868641174609110'), asset_balances=<demeter.broker._typing.AssetDict object at 0x11842f7c0>, market_status=<demeter.broker._typing.MarketDict object at 0x11842e020>)
"""
account_status = AccountStatus(timestamp=timestamp)
for k, v in self.markets.items():
account_status.market_status[k] = v.get_market_balance(prices)
account_status.market_status.set_default_key(self.markets.get_default_key())
for k, v in self.assets.items():
account_status.asset_balances[k] = v.balance
asset_sum = sum([v * prices[k.name] for k, v in account_status.asset_balances.items()])
market_sum = sum([v.net_value for v in account_status.market_status.values()])
account_status.net_value = asset_sum + market_sum
return account_status
def formatted_str(self):
"""
get formatted broker info
:return: str info of broker
"""
str_to_print = get_formatted_predefined("Broker", STYLE["header1"]) + "\n"
str_to_print += get_formatted_predefined("Asset amounts", STYLE["header2"]) + "\n"
balances = {}
for asset in self._assets.values():
balances[asset.name] = asset.balance
str_to_print += get_formatted_from_dict(balances) + "\n"
str_to_print += get_formatted_predefined("Markets", STYLE["header2"]) + "\n"
for market in self._markets.values():
str_to_print += market.formatted_str() + "\n"
return str_to_print | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/broker/broker.py | broker.py |
import logging
from datetime import datetime
from decimal import Decimal
from typing import Dict
import pandas as pd
from ._typing import BaseAction, MarketBalance, MarketStatus, MarketInfo
from .._typing import DECIMAL_0, DemeterError
DEFAULT_DATA_PATH = "./data"
class Market:
"""
note: only get properties are allow in this base class
"""
def __init__(self,
market_info: MarketInfo,
data: pd.DataFrame = None,
data_path=DEFAULT_DATA_PATH):
"""
init Market
:param market_info: uni_market
:param data: None or dataframe data
:param data_path: default ./data dir
"""
self._data: pd.DataFrame = data
self._market_info: MarketInfo = market_info
self.broker = None
self._record_action_callback = lambda x: x # default value, will be set by broker
self.data_path: str = data_path
self.logger = logging.getLogger(__name__)
self._market_status = MarketStatus(None)
def __str__(self):
return f"{self._market_info.name}:{type(self).__name__}"
@property
def market_info(self) -> MarketInfo:
return self._market_info
@property
def data(self):
"""
data got from uniswap pool
:return:
:rtype:
"""
return self._data
@data.setter
def data(self, value):
if isinstance(value, pd.DataFrame):
self._data = value
else:
raise ValueError()
def record_action(self, action: BaseAction):
if self._record_action_callback is not None:
self._record_action_callback(action)
# region for subclass to override
def check_asset(self):
pass
def update(self):
"""
update status various in markets. eg. liquidity fees of uniswap
:return:
:rtype:
"""
pass
@property
def market_status(self):
return self._market_status
def set_market_status(self, timestamp: datetime, data: pd.Series | MarketStatus):
"""
set up market status, such as liquidity, price
:param timestamp: current timestamp
:type timestamp: datetime
:param data: market status
:type data: pd.Series | MarketStatus
"""
if isinstance(data, MarketStatus):
self._market_status = data
else:
self._market_status = MarketStatus(timestamp)
def get_market_balance(self, prices: pd.Series | Dict[str, Decimal]) -> MarketBalance:
"""
get market asset balance
:param prices: current price of each token
:type prices: pd.Series | Dict[str, Decimal]
:return:
:rtype:
"""
return MarketBalance(DECIMAL_0)
def check_before_test(self):
"""
do some check for this market before back test start
:return:
:rtype:
"""
if not isinstance(self.data, pd.DataFrame):
raise DemeterError("data must be type of data frame")
if not isinstance(self.data.index, pd.core.indexes.datetimes.DatetimeIndex):
raise DemeterError("date index must be datetime")
def formatted_str(self):
return ""
# endregion | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/broker/market.py | market.py |
from dataclasses import dataclass, field
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Generic, TypeVar
from typing import NamedTuple, List, Dict
import pandas as pd
from .._typing import DemeterError, TokenInfo
class Rule(NamedTuple):
"""
Rule properties
"""
agg: str | None
fillna_method: str | None
fillna_value: int | None
@dataclass
class RowData:
"""
Row properties
"""
timestamp: datetime = None
row_id: int = None
class MarketInfo(NamedTuple):
"""
MarketInfo properties
"""
name: str # uni_market
class Asset(object):
"""
Wallet of broker, manage balance of an asset.
It will prevent excess usage on asset.
"""
def __init__(self, token: TokenInfo, init_amount=Decimal(0)):
"""
initialization of Asset
:param token: token info
:param init_amount: initialization amount, default 0
"""
self.token_info = token
self.name = token.name
self.decimal = token.decimal
self.balance = init_amount
def __str__(self):
"""
return Asset info
:return: Asset info with name && balance
"""
return f"{self.name}: {self.balance}"
def add(self, amount=Decimal(0)):
"""
add amount to balance
:param amount: amount to add
:type amount: Decimal
:return: entity itself
:rtype: BrokerAsset
"""
self.balance += amount
return self
def sub(self, amount=Decimal(0), allow_negative_balance=False):
"""
subtract amount from balance. if balance is not enough, an error will be raised.
:param amount: amount to subtract
:type amount: Decimal
:param allow_negative_balance: allow balance is negative
:type allow_negative_balance: bool
:return:
:rtype:
"""
base = self.balance if self.balance != Decimal(0) else Decimal(amount)
if base == Decimal(0): # amount and balance is both 0
return self
if allow_negative_balance:
self.balance -= amount
else:
# if difference between amount and balance is below 0.01%, will deduct all the balance
# That's because, the amount calculated by v3_core, has some acceptable error.
if abs((self.balance - amount) / base) < 0.00001:
self.balance = Decimal(0)
elif self.balance - amount < Decimal(0):
raise DemeterError(f"insufficient balance, balance is {self.balance}{self.name}, "
f"but sub amount is {amount}{self.name}")
else:
self.balance -= amount
return self
def amount_in_wei(self):
"""
return balance ** decimal
:return: self.balance * 10 ** self.decimal
"""
return self.balance * Decimal(10 ** self.decimal)
class ActionTypeEnum(Enum):
"""
Trade types
* add_liquidity,
* remove_liquidity,
* buy,
* sell,
* collect_fee
"""
uni_lp_add_liquidity = "add_liquidity"
uni_lp_remove_liquidity = "remove_liquidity"
uni_lp_buy = "buy"
uni_lp_sell = "sell"
uni_lp_collect = "collect"
@dataclass
class BaseAction(object):
"""
Parent class of broker actions,
:param market: market
:type market: MarketInfo
:param action_type: action type
:type action_type: ActionTypeEnum
:param timestamp: action time
:type timestamp: datetime
"""
market: MarketInfo
action_type: ActionTypeEnum = field(default=False, init=False)
timestamp: datetime = field(default=False, init=False)
def get_output_str(self):
return str(self)
@dataclass
class MarketBalance:
"""
MarketBalance properties
:type net_value: Decimal
"""
net_value: Decimal
@dataclass
class AccountStatusCommon:
"""
AccountStatusCommon properties
:type timestamp: datetime
:type net_value: Decimal, default 0
"""
timestamp: datetime
net_value: Decimal = Decimal(0)
@dataclass
class MarketStatus:
"""
MarketStatus properties
:type timestamp: datetime
"""
timestamp: datetime | None
T = TypeVar('T')
class MarketDict(Generic[T]):
"""
Market Dict with get/set function
"""
def __init__(self):
self.data: Dict[MarketInfo, T] = {}
self._default: MarketInfo | None = None
def __getitem__(self, item) -> T:
return self.data[item]
def __setitem__(self, key: MarketInfo, value: T):
if len(self.data) == 0:
self._default = key
self.data[key] = value
setattr(self, key.name, value)
@property
def default(self) -> T:
"""
get default value in MarketDict
:return:
"""
return self.data[self._default]
def get_default_key(self):
"""
get default key
:return:
"""
return self._default
def set_default_key(self, value: MarketInfo):
"""
set default key
:param value:
:return:
"""
self._default = value
def items(self) -> (List[MarketInfo], List[T]):
"""
get dict items
:return:
"""
return self.data.items()
def keys(self) -> List[MarketInfo]:
"""
get dict keys
:return:
"""
return self.data.keys()
def values(self) -> List[T]:
"""
get dict values
:return:
"""
return self.data.values()
def __contains__(self, item):
"""
check if item in dict
:param item:
:return:
"""
return item in self.data
def __len__(self):
"""
len of dict
:return:
"""
return len(self.data)
class AssetDict(Generic[T]):
def __init__(self):
"""
init AssetDict
"""
self.data: Dict[TokenInfo, T] = {}
def __getitem__(self, item) -> T:
"""
get item magic method
:param item:
:return:
"""
return self.data[item]
def __setitem__(self, key: TokenInfo, value: T):
"""
set item magic method
:param key:
:param value:
:return:
"""
self.data[key] = value
setattr(self, key.name, value)
def items(self) -> (List[TokenInfo], List[T]):
"""
get items from dict
:return:
"""
return self.data.items()
def keys(self) -> List[TokenInfo]:
"""
get keys from dict
:return:
"""
return self.data.keys()
def values(self) -> List[T]:
"""
get values from dict
:return:
"""
return self.data.values()
def __contains__(self, item):
"""
check if item in dict
:param item:
:return:
"""
return item in self.data
def __len__(self):
"""
length of dict
:return:
"""
return len(self.data)
@dataclass
class AccountStatus(AccountStatusCommon):
"""
Account Status
:param asset_balances: balance of asset
:param market_status:
"""
asset_balances: AssetDict[Decimal] = field(default_factory=AssetDict)
market_status: MarketDict[MarketBalance] = field(default_factory=MarketDict)
def to_array(self) -> List:
"""
market_status value to list
:return:
"""
result = [self.net_value]
for balance in self.asset_balances.values():
result.append(balance)
for market, status in self.market_status.items():
for k, v in vars(status).items():
result.append(v)
return result
def get_names(self) -> List:
"""
get market_status market name
:return:
"""
result = ["net_value"]
for asset in self.asset_balances.keys():
result.append(asset.name)
for market, status in self.market_status.items():
base_name = market.name
for k, v in vars(status).items():
result.append(f"{base_name}_{k}")
return result
@staticmethod
def to_dataframe(status_list: []) -> pd.DataFrame:
"""
status list convert to dataframe
:param status_list:
:return:
"""
index = [i.timestamp for i in status_list]
if len(index) > 0:
return pd.DataFrame(columns=status_list[0].get_names(),
index=index,
data=map(lambda d: d.to_array(), status_list))
else:
return pd.DataFrame() | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/broker/_typing.py | _typing.py |
from typing import Dict, List
import pandas as pd
from .trigger import Trigger
from .. import Broker, MarketDict, AccountStatus, AssetDict, Asset
from .._typing import DemeterError
from ..broker import MarketInfo, RowData, BaseAction, Market
class Strategy(object):
"""
strategy parent class, all user strategy should inherit this class
"""
def __init__(self):
self.broker: Broker = None
self.data: MarketDict[pd.DataFrame] = MarketDict()
self.markets: MarketDict[Market] = MarketDict()
self.number_format = ".8g"
self.prices: pd.DataFrame = None
self.triggers: [Trigger] = []
self.account_status: List[AccountStatus] = []
self.assets: AssetDict[Asset] = AssetDict()
self.actions: List[BaseAction] = []
def initialize(self):
"""
initialize your strategy, this will be called before self.on_bar()
"""
pass
def before_bar(self, row_data: MarketDict[RowData]):
"""
called before triggers on each row, at this time, fees are not updated yet. you can add some indicator or add some actions
:param row_data: row data, include columns load from data, converted data( price, volumn, and timestamp, index), indicators(such as ma)
:type row_data: Union[{MarketInfo:RowData}, pd.Series]
"""
pass
def on_bar(self, row_data: MarketDict[RowData]):
"""
called after triggers on each row, at this time, fees and account status are not updated yet. you can add some actions here
:param row_data: row data, include columns load from data, converted data( price, volumn, and timestamp, index), indicators(such as ma)
:type row_data: Union[{MarketInfo:RowData}, pd.Series]
"""
pass
def after_bar(self, row_data: MarketDict[RowData]):
"""
called after fees and account status are updated on each row. you can add some statistic logic here
:param row_data: row data, include columns load from data, converted data( price, volumn, and timestamp, index), indicators(such as ma)
:type row_data: Union[{MarketInfo:RowData}, pd.Series]
"""
pass
def finalize(self):
"""
this will run after all the data processed.
"""
pass
def notify(self, action: BaseAction):
"""
notify if non-basic action happens
:param action: action
:type action: BaseAction
"""
pass
def _add_column(self, market: MarketInfo | Market, name: str, line: pd.Series):
"""
add a column to data
:param name: column name, sma
:type name: str
:param market: market1
:type market: MarketInfo
:param line: data,
2022-08-20 00:00:00 NaN
2022-08-20 00:01:00 NaN
2022-08-20 00:02:00 NaN
2022-08-20 00:03:00 NaN
2022-08-20 00:04:00 NaN
...
2022-08-20 23:55:00 1568.069688
2022-08-20 23:56:00 1568.036998
2022-08-20 23:57:00 1568.004837
2022-08-20 23:58:00 1567.990103
2022-08-20 23:59:00 1567.975368
Freq: T, Name: price, Length: 1440, dtype: float64
:type line: Line
"""
if not isinstance(line.index, pd.core.indexes.datetimes.DatetimeIndex):
raise DemeterError("date index must be datetime")
if isinstance(market, MarketInfo):
self.broker.markets[market].data[name] = line
elif isinstance(market, Market):
market.data[name] = line
else:
raise DemeterError(f"{market} is not a valid market") | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/strategy/strategy.py | strategy.py |
from dataclasses import dataclass
from datetime import datetime, timedelta
import pandas as pd
from .. import MarketDict
from .._typing import DemeterError
from ..broker import RowData
def to_minute(time: datetime) -> datetime:
"""
second with 0 datetime
:param time:
:return:
"""
return datetime(time.year, time.month, time.day, time.hour, time.minute)
"""
Note: in current version, all the market data should have the same index, which means the same timestamp range and
interval, so we choose timestamp in default market to trigger actions.
"""
class Trigger:
def __init__(self, do, *args, **kwargs):
self._do = do if do is not None else self.do_nothing
self.kwargs = kwargs
self.args = args
def when(self, row_data: MarketDict[RowData]) -> bool:
"""
when to handler data
:param row_data: data in row
:return:
"""
return False
def do_nothing(self, row_data: MarketDict[RowData], *args, **kwargs):
pass
def do(self, row_data: MarketDict[RowData]):
"""
operation to handler with row data
:param row_data:
:return:
"""
return self._do(row_data, *self.args, **self.kwargs)
class AtTimeTrigger(Trigger):
"""
trigger action at a specific time
"""
def __init__(self, time: datetime, do, *args, **kwargs):
self._time = to_minute(time)
super().__init__(do, *args, **kwargs)
def when(self, row_data: MarketDict[RowData]) -> bool:
return row_data.default.timestamp == self._time
class AtTimesTrigger(Trigger):
"""
trigger action at some specific time
"""
def __init__(self, time: [datetime], do, *args, **kwargs):
self._time = [to_minute(t) for t in time]
super().__init__(do, *args, **kwargs)
def when(self, row_data: MarketDict[RowData]) -> bool:
return self._time in row_data.default.timestamp
@dataclass
class TimeRange:
start: datetime
end: datetime
class TimeRangeTrigger(Trigger):
"""
trigger action at a time range
"""
def __init__(self, time_range: TimeRange, do, *args, **kwargs):
self._time_range = TimeRange(to_minute(time_range.start), to_minute(time_range.end))
super().__init__(do, *args, **kwargs)
def when(self, row_data: MarketDict[RowData]) -> bool:
return self._time_range.start <= row_data.default.timestamp < self._time_range.end
class TimeRangesTrigger(Trigger):
"""
trigger action at some time range
"""
def __init__(self, time_range: [TimeRange], do, *args, **kwargs):
self._time_range: [TimeRange] = [TimeRange(to_minute(t.start), to_minute(t.end)) for t in time_range]
super().__init__(do, *args, **kwargs)
def when(self, row_data: MarketDict[RowData]) -> bool:
for r in self._time_range:
if r.start <= row_data.default.timestamp < r.end:
return True
return False
def check_time_delta(delta: timedelta):
if delta.total_seconds() % 60 != 0:
raise DemeterError("min time span is 1 minute")
class PeriodTrigger(Trigger):
"""
trigger period action
"""
def __init__(self, time_delta: timedelta, do, trigger_immediately=False, *args, **kwargs):
self._next_match = None
self._delta = time_delta
self._trigger_immediately = trigger_immediately
check_time_delta(time_delta)
super().__init__(do, *args, **kwargs)
def reset(self):
self._next_match = None
def when(self, row_data: MarketDict[RowData]) -> bool:
if self._next_match is None:
self._next_match = row_data.default.timestamp + self._delta
return self._trigger_immediately
if self._next_match == row_data.default.timestamp:
self._next_match = self._next_match + self._delta
return True
return False
class PeriodsTrigger(Trigger):
"""
trigger some period actions
"""
def __init__(self, time_delta: [timedelta], do, trigger_immediately=False, *args, **kwargs):
self._next_matches = [None for _ in time_delta]
self._deltas = time_delta
self._trigger_immediately = trigger_immediately
for td in time_delta:
check_time_delta(td)
super().__init__(do, *args, **kwargs)
def reset(self):
self._next_matches = [None for _ in self._deltas]
def when(self, row_data: MarketDict[RowData]) -> bool:
if self._next_matches[0] is None:
self._next_matches = [row_data.default.timestamp + d for d in self._deltas]
return self._trigger_immediately
for i in range(len(self._deltas)):
if self._next_matches[i] == row_data.default.timestamp:
self._next_matches[i] = self._next_matches[i] + self._deltas[i]
return True
return False | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/strategy/trigger.py | trigger.py |
from typing import Dict, List
import pandas as pd
from .._typing import UnitDecimal, DemeterError, EvaluatorEnum
from ..broker import AccountStatus, AccountStatusCommon
from .math_helper import max_draw_down_fast, annualized_returns, get_benchmark_returns
class Evaluator(object):
"""
calculate evaluator indicator for strategy.
"""
def __init__(self, init_status: AccountStatus, data: pd.DataFrame | AccountStatusCommon, prices: pd.DataFrame):
"""
init Evaluator
:param init_status:
:param data:
:param prices:
"""
self.init_status: AccountStatus = init_status
self.end_status: AccountStatusCommon = data.iloc[-1]
self.prices: pd.DataFrame = prices
self.data: pd.DataFrame = data
if len(data) < 2:
raise DemeterError("not enough data")
self.time_span_in_day = len(data.index) * (data.index[1] - data.index[0]).seconds / (60 * 60 * 24)
self._result = None
def run(self, enables: List[EvaluatorEnum]):
"""
run evaluator
:param enables:
:return: result_dict
"""
if EvaluatorEnum.ALL in enables:
enables = [x for x in EvaluatorEnum]
enables = filter(lambda x: x.value > 0, enables)
result_dict: Dict[EvaluatorEnum, UnitDecimal] = {}
for request in enables:
match request:
case EvaluatorEnum.ANNUALIZED_RETURNS:
result = UnitDecimal(annualized_returns(self.init_status.net_value,
self.end_status.net_value,
self.time_span_in_day), "")
case EvaluatorEnum.BENCHMARK_RETURNS:
result = UnitDecimal(get_benchmark_returns(self.init_status.net_value,
self.prices.iloc[0],
self.prices.iloc[-1],
self.time_span_in_day), "")
case EvaluatorEnum.MAX_DRAW_DOWN:
result = UnitDecimal(max_draw_down_fast(self.data.net_value), "")
case _:
raise DemeterError(f"{request} has not implied")
result_dict[request] = result
self._result = result_dict
return result_dict
@property
def result(self) -> Dict[EvaluatorEnum, UnitDecimal]:
"""
return Evaluator._result property
:return:
"""
return self._result
def __str__(self):
"""
Evaluator print function
:return:
"""
str_array = []
for k, v in self._result.items():
str_array.append(f"{k.name}:{v}")
return "; ".join(str_array) | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/core/evaluating_indicator.py | evaluating_indicator.py |
from decimal import Decimal
import pandas as pd
def annualized_returns(init_value, final_value, timespan_in_day):
"""
calculated for a period of a year's data
:param init_value:
:param final_value:
:param timespan_in_day:
:return:
"""
return (final_value / init_value) ** Decimal(365 / timespan_in_day) - 1
def get_benchmark_returns(self, init_value: Decimal,
init_price: pd.Series, final_price: pd.Series, timespan_in_day: Decimal):
"""
Annualized benchmark return rate
algorithm: swap token balance to 1:1, and hold those position to the end.
:return:
"""
splited_value = init_value / len(init_price)
amounts = []
for price in init_price:
amounts.append(splited_value / price)
final_value = 0
i = 0
for price in final_price:
final_value += amounts[i] * price
i += 1
return (final_value / self.init_net_value) ** Decimal(365 / timespan_in_day) - 1
def __get_benchmark_asset(net_value, price):
"""
get benchmark of asset
:param net_value: Decimal or float
:param price:
:return:
"""
base_amount = net_value / 2
quote_amount = (net_value - base_amount) / price
return base_amount, quote_amount
def max_draw_down(value: pd.Series):
"""
get max draw down
:param value: value to calculate
:type value: pd.Series
:return:
:rtype:
"""
value.index = range(len(value.index)) # restruct index to access faster
result = 0
for index, row in value.iteritems():
current_max = value[index:].apply(lambda nv: 1 - nv / row).max()
if current_max > result:
result = current_max
return result
def max_draw_down_fast(value: pd.Series):
"""
get max draw down in a fast algorithm.
:param value: value to calculate
:type value: pd.Series
:return:
:rtype:
"""
max_value, idx_h, idx_l = _withdraw_with_high_low(value.to_list())
return (value.iloc[idx_h] - value.iloc[idx_l]) / value.iloc[idx_h]
def _withdraw_with_high_low(arr: list):
"""
from : https://blog.csdn.net/Spade_/article/details/112341428
"""
# 传入一个数组,返回最大回撤和对应的最高点索引、最低点索引
_dp = 0 # 使用 _dp 表示 i 点的最大回撤
i_high = 0 # 遍历时,0 ~ i - 1 中最高的点的索引,注意是索引
# 全局最大回撤和对应的最高点和最低点的索引,注意是索引
g_withdraw, g_high, g_low = float('-inf'), -1, -1
for i in range(1, len(arr)):
if arr[i_high] < arr[i - 1]: # 若 0 ~ i - 1 中最高的点小于当前点
i_high = i - 1 # 0 ~ i - 1 中最高的点的索引
_dp = arr[i_high] - arr[i] # _dp 表示 i 点的最大回撤
if _dp > g_withdraw: # 找到新的最大回撤,更新三个值
g_withdraw = _dp
g_high = i_high
g_low = i
return g_withdraw, g_high, g_low | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/core/math_helper.py | math_helper.py |
import logging
import os
import pickle
import time
from datetime import datetime
from typing import List, Dict
import orjson
import pandas as pd
from tqdm import tqdm # process bar
from .evaluating_indicator import Evaluator
from .. import Broker, RowData, Asset
from .._typing import DemeterError, EvaluatorEnum, UnitDecimal, PositionInfo
from ..broker import BaseAction, AccountStatus, MarketInfo, MarketDict
from ..uniswap import UniLpMarket
from ..strategy import Strategy
from ..utils import get_formatted_predefined, STYLE
class Actuator(object):
"""
Core component of a back test. Manage the resources in a test, including broker/strategy/data/indicator,
"""
def __init__(self, allow_negative_balance=False):
"""
init Actuator
:param allow_negative_balance: balance can less than 0
"""
# all the actions during the test(buy/sell/add liquidity)
self._action_list: List[BaseAction] = []
self._currents = {
"actions": [],
"timestamp": None
}
# broker status in every bar, use array for performance
self._account_status_list: List[AccountStatus] = []
self._account_status_df: pd.DataFrame = None
# broker
self._broker: Broker = Broker(allow_negative_balance, self._record_action_list)
# strategy
self._strategy: Strategy = Strategy()
self._token_prices: pd.DataFrame = None
# path of source data, which is saved by downloader
# evaluating indicator calculator
self._evaluator: Evaluator = None
self._enabled_evaluator: [] = []
# logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
self.logger = logging.getLogger(__name__)
# internal var
self.__backtest_finished = False
def _record_action_list(self, action):
"""
record action list
:param action: action
:return: None
"""
action.timestamp = self._currents["timestamp"]
action.set_type()
self._action_list.append(action)
self._currents["actions"].append(action)
# region property
@property
def account_status(self) -> List[AccountStatus]:
"""
account status of all market,
"""
return self._account_status_list
@property
def token_prices(self):
"""
price of all token
:return: None
"""
return self._token_prices
@property
def final_status(self) -> AccountStatus:
"""
Get status after back test finish.
If test has not run, an error will be raised.
:return: Final state of broker
:rtype: AccountStatus
"""
if self.__backtest_finished:
return self._account_status_list[len(self._account_status_list) - 1]
else:
raise DemeterError("please run strategy first")
def reset(self):
"""
reset all the status variables
"""
self._evaluator: Evaluator = None
self._enabled_evaluator: [] = []
self._action_list = []
self._currents = {"actions": [], "timestamp": None}
self._account_status_list = []
self.__backtest_finished = False
self._account_status_df: pd.DataFrame = None
@property
def actions(self) -> [BaseAction]:
"""
all the actions during the test(buy/sell/add liquidity)
:return: action list
:rtype: [BaseAction]
"""
return self._action_list
@property
def evaluating_indicator(self) -> Dict[EvaluatorEnum, UnitDecimal]:
"""
evaluating indicator result
:return: evaluating indicator
:rtype: EvaluatingIndicator
"""
return self._evaluator.result if self._evaluator is not None else None
@property
def broker(self) -> Broker:
"""
Broker manage assets in back testing. Including asset, positions. it also provides operations for positions,
"""
return self._broker
@property
def strategy(self) -> Strategy:
"""
strategy,
:return: strategy
:rtype: Strategy
"""
return self._strategy
@strategy.setter
def strategy(self, value):
"""
set strategy
:param value: strategy
:type value: Strategy
"""
if isinstance(value, Strategy):
self._strategy = value
else:
raise ValueError()
@property
def number_format(self) -> str:
"""
number format for console output, eg: ".8g", ".5f"
:return: number format
:rtype: str
"""
return self._number_format
@number_format.setter
def number_format(self, value: str):
"""
number format for console output, eg: ".8g", ".5f",
follow the document here: https://python-reference.readthedocs.io/en/latest/docs/functions/format.html
:param value: number format,
:type value:str
"""
self._number_format = value
# endregion
def get_account_status_dataframe(self) -> pd.DataFrame:
"""
get account status dataframe
:return: dataframe
"""
return AccountStatus.to_dataframe(self._account_status_list)
def set_assets(self, assets: List[Asset]):
"""
set initial balance for token
:param assets: assets to set.
:type assets: [Asset]
"""
for asset in assets:
self._broker.set_balance(asset.token_info, asset.balance)
def set_price(self, prices: pd.DataFrame | pd.Series):
"""
set price
:param prices: dataframe or series
eth usdc
2022-08-20 00:00:00 1610.553895752868641174609110 1
2022-08-20 00:01:00 1612.487623747744872677867817 1
2022-08-20 00:02:00 1615.715664560742874527210287 1
2022-08-20 00:03:00 1615.715664560742874527210287 1
2022-08-20 00:04:00 1615.554109149827891738036484 1
... ... ...
2022-08-20 23:55:00 1577.086574012079067849553855 1
2022-08-20 23:56:00 1576.928881123966671182435611 1
2022-08-20 23:57:00 1576.928881123966671182435611 1
2022-08-20 23:58:00 1576.613542649301384412539259 1
2022-08-20 23:59:00 1576.613542649301384412539259 1
[1440 rows x 2 columns]
:return: None
"""
if isinstance(prices, pd.DataFrame):
if self._token_prices is None:
self._token_prices = prices
else:
self._token_prices = pd.concat([self._token_prices, prices])
else:
if self._token_prices is None:
self._token_prices = pd.DataFrame(data=prices, index=prices.index)
else:
self._token_prices[prices.name] = prices
def notify(self, strategy: Strategy, actions: List[BaseAction]):
"""
notify user when new action happens.
:param strategy: Strategy
:type strategy: Strategy
:param actions: action list
:type actions: [BaseAction]
"""
if len(actions) < 1:
return
# last_time = datetime(1970, 1, 1)
for action in actions:
# if last_time != action.timestamp:
# print(f"\033[7;34m{action.timestamp} \033[0m")
# last_time = action.timestamp
strategy.notify(action)
def _check_backtest(self):
"""
check backtest result
:return:
"""
# ensure a market exist
if len(self._broker.markets) < 1:
raise DemeterError("No market assigned")
# ensure all token has price list.
if self._token_prices is None:
# if price is not set and market is uni_lp_market, get price from market automatically
for market in self.broker.markets.values():
if isinstance(market, UniLpMarket):
self.set_price(market.get_price_from_data())
if self._token_prices is None:
raise DemeterError("token prices is not set")
for token in self._broker.assets.keys(): # dict_keys([TokenInfo(name='usdc', decimal=6), TokenInfo(name='eth', decimal=18)])
if token.name not in self._token_prices:
raise DemeterError(f"Price of {token.name} has not set yet")
[market.check_before_test() for market in self._broker.markets.values()]
data_length = [] # [1440]
for market in self._broker.markets.values():
data_length.append(len(market.data.index))
market.check_asset() # check each market, including assets
# ensure data length same
if List.count(data_length, data_length[0]) != len(data_length):
raise DemeterError("data length among markets are not same")
if len(self._token_prices.index) != data_length[0]:
raise DemeterError("price length and data length are not same")
length = data_length[0]
# ensure data interval same
data_interval = []
if length > 1:
for market in self._broker.markets.values():
data_interval.append(market.data.index[1] - market.data.index[0])
if List.count(data_interval, data_interval[0]) != len(data_interval):
raise DemeterError("data interval among markets are not same")
price_interval = self._token_prices.index[1] - self._token_prices.index[0]
if price_interval != data_interval[0]:
raise DemeterError("price list interval and data interval are not same")
def __get_market_row_dict(self, index, row_id) -> MarketDict:
"""
get market row dict info
:param index:
:param row_id:
:return: Market dict
"""
market_dict = MarketDict()
for market_key, market in self._broker.markets.items():
market_row = RowData(index.to_pydatetime(), row_id)
df_row = market.data.loc[index]
for column_name in df_row.index:
setattr(market_row, column_name, df_row[column_name])
market_dict[market_key] = market_row
market_dict.set_default_key(self.broker.markets.get_default_key())
return market_dict
def __set_row_to_markets(self, timestamp, market_row_dict: dict):
"""
set markets row data
:param timestamp:
:param market_row_dict:
:return:
"""
for market_key, market_row_data in market_row_dict.items():
self._broker.markets[market_key].set_market_status(timestamp, market_row_data)
def run(self,
evaluator: List[EvaluatorEnum] = [],
output: bool = True):
"""
start back test, the whole process including:
* reset actuator
* initialize strategy (set object to strategy, then run strategy.initialize())
* process each bar in data
* prepare data in each row
* run strategy.on_bar()
* calculate fee earned
* get latest account status
* notify actions
* run evaluator indicator
* run strategy.finalize()
:param enable_notify: notify when new action happens
:type enable_notify: bool
:param enable_evaluating: enable evaluating indicator. if not enabled, no evaluating will be calculated
:type enable_evaluating: bool
:param print_final_status: enable output.
:type print_final_status: bool
"""
run_begin_time = time.time() # 1681718968.267463
self.reset()
self._enabled_evaluator = evaluator
self._check_backtest()
index_array: pd.DatetimeIndex = list(self._broker.markets.values())[0].data.index
self.logger.info("init strategy...")
# set initial status for strategy, so user can run some calculation in initial function.
init_row_data = self.__get_market_row_dict(index_array[0], 0)
self.__set_row_to_markets(index_array[0], init_row_data)
# keep initial balance for evaluating
init_account_status = self._broker.get_account_status(self._token_prices.iloc[0])
self.init_strategy()
row_id = 0
data_length = len(index_array)
first = True # todo delete
self.logger.info("start main loop...")
with tqdm(total=data_length, ncols=150) as pbar:
for timestamp_index in index_array:
# prepare data of a row
market_row_dict = self.__get_market_row_dict(timestamp_index, row_id)
row_id += 1
self.__set_row_to_markets(timestamp_index, market_row_dict)
# execute strategy, and some calculate
self._currents["timestamp"] = timestamp_index.to_pydatetime()
self._strategy.before_bar(market_row_dict)
if self._strategy.triggers:
for trigger in self._strategy.triggers:
if trigger.when(market_row_dict):
trigger.do(market_row_dict)
self._strategy.on_bar(market_row_dict)
# update broker status, eg: re-calculate fee
# and read the latest status from broker
for market in self._broker.markets.values():
market.update()
self._strategy.after_bar(market_row_dict)
if first: # todo delete
first = False # todo delete
self._account_status_list.append(
self._broker.get_account_status(self._token_prices.loc[timestamp_index],
timestamp_index))
# notify actions in current loop
self.notify(self.strategy, self._currents["actions"])
self._currents["actions"] = []
# move forward for process bar and index
pbar.update()
self.logger.info("main loop finished")
self._account_status_df: pd.DataFrame = self.get_account_status_dataframe()
if len(self._enabled_evaluator) > 0:
self.logger.info("Start calculate evaluating indicator...")
self._evaluator = Evaluator(init_account_status, self._account_status_df, self._token_prices)
self._evaluator.run(self._enabled_evaluator)
self.logger.info("Evaluating indicator has finished it's job.")
self._strategy.finalize()
self.__backtest_finished = True
if output:
self.output()
self.logger.info(f"Backtesting finished, execute time {time.time() - run_begin_time}s")
def output(self):
"""
output back test result to console
"""
if not self.__backtest_finished:
raise DemeterError("Please run strategy first")
self.logger.info(self.broker.formatted_str())
self.logger.info(get_formatted_predefined("Account Status", STYLE["header1"]))
self.logger.info(self._account_status_df)
if len(self._enabled_evaluator) > 0:
self.logger.info("Evaluating indicator")
self.logger.info(self._evaluator)
def save_result(self, path: str, account=True, actions=True) -> List[str]:
"""
save back test result
:param path: path to save
:type path: str
:param account: Save account status or not
:type account: bool
:param actions: Save actions or not
:type actions: bool
:return:
:rtype:
"""
if not self.__backtest_finished:
raise DemeterError("Please run strategy first")
file_name_head = "backtest-" + datetime.now().strftime('%Y%m%d-%H%M%S')
if not os.path.exists(path):
os.mkdir(path)
file_list = []
if account:
file_name = os.path.join(path, file_name_head + ".account.csv")
self._account_status_df.to_csv(file_name)
file_list.append(file_name)
if actions:
# save pkl file to load again
pkl_name = os.path.join(path, file_name_head + ".action.pkl")
with open(pkl_name, "wb") as outfile1:
pickle.dump(self._action_list, outfile1)
# save json to read
actions_json_str = orjson.dumps(self._action_list,
option=orjson.OPT_INDENT_2,
default=json_default)
json_name = os.path.join(path, file_name_head + ".action.json")
with open(json_name, "wb") as outfile:
outfile.write(actions_json_str)
file_list.append(json_name)
file_list.append(pkl_name)
self.logger.info("files have saved to", file_list)
return file_list
def init_strategy(self):
"""
initialize strategy, set property to strategy. and run strategy.initialize()
"""
if not isinstance(self._strategy, Strategy):
raise DemeterError("strategy must be inherit from Strategy")
self._strategy.broker = self._broker
self._strategy.markets = self._broker.markets
market_datas = MarketDict()
for k, v in self.broker.markets.items():
market_datas[k] = v.data
market_datas.set_default_key(self.broker.markets.get_default_key())
self._strategy.data = market_datas
self._strategy.prices = self._token_prices
self._strategy.account_status = self._account_status_list
self._strategy.actions = self._action_list
self._strategy.assets = self.broker.assets
self._strategy.get_account_status_dataframe = self.get_account_status_dataframe
for k, v in self.broker.markets.items():
setattr(self._strategy, k.name, v)
for k, v in self.broker.assets.items():
setattr(self._strategy, k.name, v)
self._strategy.initialize()
def __str__(self):
return f"Demeter Actuator (broker:{self._broker})\n"
def json_default(obj):
"""
format json data
:param obj:
:return:
"""
if isinstance(obj, UnitDecimal):
return obj.to_str()
elif isinstance(obj, MarketInfo):
return {"name": obj.name}
elif isinstance(obj, PositionInfo):
return {"lower_tick": obj.lower_tick,
"upper_tick": obj.upper_tick}
else:
raise TypeError | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/core/actuator.py | actuator.py |
from enum import Enum
from typing import Dict
class ForColorEnum(Enum):
default = 0
black = 30
red = 31
green = 32
yellow = 33
blue = 34
purple = 35
cyan = 36
white = 37
class BackColorEnum(Enum):
default = 0
black = 40
red = 41
green = 42
yellow = 43
blue = 44
purple = 45
cyan = 46
white = 47
class ModeEnum(Enum):
normal = 0
bold = 1
underline = 4
blink = 5
invert = 7
hide = 8
DEFAULT_END = 0
STYLE = {
"header1": {
"mode": ModeEnum.invert,
"fore": ForColorEnum.red,
"back": BackColorEnum.default,
"width": 50
},
"header2": {
"mode": ModeEnum.invert,
"fore": ForColorEnum.purple,
"back": BackColorEnum.default,
"width": 30
},
"header3": {
"mode": ModeEnum.underline,
"fore": ForColorEnum.yellow,
"back": BackColorEnum.default,
"width": -1
},
"key": {
"mode": ModeEnum.normal,
"fore": ForColorEnum.blue,
"back": BackColorEnum.default,
"width": 10
},
"value": {
"mode": ModeEnum.normal,
"fore": ForColorEnum.default,
"back": BackColorEnum.default,
"width": 25
}
}
def get_formatted(string: str,
mode: ModeEnum = ModeEnum.normal,
fore: ForColorEnum = ForColorEnum.default,
back: BackColorEnum = BackColorEnum.default,
width=-1) -> str:
mode = '{}'.format(mode.value if mode != mode.normal else '')
fore = '{}'.format(fore.value if fore != ForColorEnum.default else "")
back = '{}'.format(back.value if back != BackColorEnum.default else "")
style = ';'.join([s for s in [mode, fore, back] if s])
end = ""
if style != "":
style = """\033[{}m""".format(style)
end = """\033[0m"""
if width > 0:
string = '{}{:<{}}{}'.format(style, string, width, end)
else:
string = '{}{}{}'.format(style, string, end)
return string
def get_formatted_predefined(string: str, style):
return get_formatted(string, style["mode"], style["fore"], style["back"], style["width"])
def get_formatted_from_dict(values: Dict[str, str]) -> str:
str_array = []
for k, v in values.items():
str_array.append(
f"{get_formatted_predefined(k, STYLE['key'])}:{get_formatted_predefined(str(v), STYLE['value'])}")
return "".join(str_array)
# def TestColor():
# print(get_formatted('正常显示'))
# print('')
# print("测试显示模式")
# print(get_formatted('高亮', mode=ModeEnum.bold), )
# print(get_formatted('下划线', mode=ModeEnum.underline), )
# print(get_formatted('闪烁', mode=ModeEnum.blink), )
# print(get_formatted('反白', mode=ModeEnum.invert), )
# print(get_formatted('不可见', mode=ModeEnum.hide))
# print('')
# print("测试前景色")
# print(get_formatted('黑色', fore=ForColorEnum.black), )
# print(get_formatted('红色', fore=ForColorEnum.red), )
# print(get_formatted('绿色', fore=ForColorEnum.green), )
# print(get_formatted('黄色', fore=ForColorEnum.yellow), )
# print(get_formatted('蓝色', fore=ForColorEnum.blue), )
# print(get_formatted('紫红色', fore=ForColorEnum.purple), )
# print(get_formatted('青蓝色', fore=ForColorEnum.cyan), )
# print(get_formatted('白色', fore=ForColorEnum.white))
# print('')
# print("测试背景色")
# print(get_formatted('黑色', back=BackColorEnum.black), )
# print(get_formatted('红色', back=BackColorEnum.red), )
# print(get_formatted('绿色', back=BackColorEnum.green), )
# print(get_formatted('黄色', back=BackColorEnum.yellow), )
# print(get_formatted('蓝色', back=BackColorEnum.blue), )
# print(get_formatted('紫红色', back=BackColorEnum.purple), )
# print(get_formatted('青蓝色', back=BackColorEnum.cyan), )
# print(get_formatted('白色', back=BackColorEnum.white))
# print('')
# print(get_formatted("综合", ModeEnum.invert, ForColorEnum.red))
#
#
# if __name__ == '__main__':
# TestColor() | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/utils/console_text.py | console_text.py |
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Dict
import pandas as pd
from pandas import _typing as pd_typing
from ..broker import Rule, RowData
DEFAULT_AGG_METHOD = "first"
EMPTY_RULE = Rule(None, None, None)
@dataclass
class UniLPDataRaw:
"""
data types in csv file saved by download module
"""
timestamp: datetime = None
netAmount0: int = None
netAmount1: int = None
closeTick: int = None
openTick: int = None
lowestTick: int = None
highestTick: int = None
inAmount0: int = None
inAmount1: int = None
currentLiquidity: int = None
@dataclass
class UniLPData(RowData):
"""
data type used in back test, extended from UniLPDataRaw
"""
netAmount0: int = None
netAmount1: int = None
closeTick: int = None
openTick: int = None
lowestTick: int = None
highestTick: int = None
inAmount0: int = None
inAmount1: int = None
currentLiquidity: int = None
open: Decimal = None
price: Decimal = None
low: Decimal = None
high: Decimal = None
volume0: Decimal = None
volume1: Decimal = None
class LineTypeEnum(Enum):
"""
predefined column, used to define fillna method.
"""
timestamp = 1
netAmount0 = 2
netAmount1 = 3
closeTick = 4
openTick = 5
lowestTick = 6
highestTick = 7
inAmount0 = 8
inAmount1 = 9
currentLiquidity = 10
other = 100
LINE_RULES = {
LineTypeEnum.timestamp.name: EMPTY_RULE,
LineTypeEnum.netAmount0.name: Rule("sum", None, 0),
LineTypeEnum.netAmount1.name: Rule("sum", None, 0),
LineTypeEnum.closeTick.name: Rule("last", "ffill", None),
LineTypeEnum.openTick.name: Rule("first", "ffill", None),
LineTypeEnum.lowestTick.name: Rule("min", "ffill", None),
LineTypeEnum.highestTick.name: Rule("max", "ffill", None),
LineTypeEnum.inAmount0.name: Rule("sum", None, 0),
LineTypeEnum.inAmount1.name: Rule("sum", None, 0),
LineTypeEnum.currentLiquidity.name: Rule("sum", "ffill", None),
}
def get_line_rules_safe(key: str) -> Rule:
if key in LINE_RULES:
return LINE_RULES[key]
else:
return EMPTY_RULE
def resample(df: pd.DataFrame,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | pd_typing.TimestampConvertibleTypes = "start_day",
offset: pd_typing.TimedeltaConvertibleTypes | None = None,
agg: Dict[str, str] = None) -> pd.DataFrame:
"""
resample data
:param df: data in dataframe
:param rule: resample rule, see Dataframe.resample doc
:param axis: resample axis, see Dataframe.resample doc
:param closed: resample closed, see Dataframe.resample doc
:param label: resample label, see Dataframe.resample doc
:param convention: resample convention, see Dataframe.resample doc
:param kind: resample kind, see Dataframe.resample doc
:param loffset: resample loffset, see Dataframe.resample doc
:param base: resample base, see Dataframe.resample doc
:param on: resample on, see Dataframe.resample doc
:param level: resample level, see Dataframe.resample doc
:param origin: resample origin, see Dataframe.resample doc
:param offset: resample offset, see Dataframe.resample doc
:param agg: aggregate method
:return: aggregated dataframe
"""
agg = agg if agg else {}
resampler = df.resample(rule, axis, closed, label, convention, kind, loffset, base, on, level, origin, offset)
agg_dict = {}
for column_name in df.columns:
rule = get_line_rules_safe(column_name)
agg_method = rule.agg
if agg_method is None:
if column_name in agg:
agg_method = agg[column_name]
else:
agg_method = DEFAULT_AGG_METHOD
agg_dict[column_name] = agg_method
df_new = resampler.agg(agg_dict)
return df_new
def fillna(
df: pd.DataFrame,
value: object | pd_typing.ArrayLike | None = None,
method: pd_typing.FillnaOptions | None = None,
axis: pd_typing.Axis | None = None,
inplace: bool = False,
limit=None,
downcast=None) -> pd.DataFrame | None:
"""
fill empty item. param is the same to pandas.Series.fillna
if column name is predefined, method and value will be omitted, and data will be filled as predefined
"""
new_df = df.copy(False)
# fill close tick first, it will be used later.
if LineTypeEnum.closeTick.name in new_df.columns:
new_df[LineTypeEnum.closeTick.name] = \
new_df[LineTypeEnum.closeTick.name].fillna(value=None,
method=get_line_rules_safe(
LineTypeEnum.closeTick.name).fillna_method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast)
for column_name in new_df.columns:
if column_name == LineTypeEnum.closeTick.name:
continue
rule = get_line_rules_safe(column_name)
if not rule.fillna_method and rule.fillna_value is None:
new_df[column_name] = new_df[column_name].fillna(value, method, axis, inplace, limit, downcast)
else:
current_method = rule.fillna_method if rule.fillna_method else method
current_value = rule.fillna_value if rule.fillna_value is not None else value
# all tick related field will be filled with close_tick.
if column_name in [LineTypeEnum.openTick.name,
LineTypeEnum.highestTick.name,
LineTypeEnum.lowestTick.name] and LineTypeEnum.closeTick.name in new_df.columns:
current_method = None
current_value = new_df[LineTypeEnum.closeTick.name]
new_df[column_name] = new_df[column_name].fillna(value=current_value, method=current_method, axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast)
return new_df | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/data.py | data.py |
import math
from decimal import Decimal
from .liquitidy_math import get_sqrt_ratio_at_tick
def _x96_to_decimal(number: int):
"""
decimal divide 2 ** 96
:param number:
:return:
"""
return Decimal(number) / 2 ** 96
def decimal_to_x96(number: Decimal):
"""
decimal multiple 2 ** 96
:param number:
:return:
"""
return int(Decimal(number) * 2 ** 96)
def _x96_sqrt_to_decimal(sqrt_priceX96, token_decimal_diff=12):
"""
sqrt_priceX96 divide token decimal
:param sqrt_priceX96:
:param token_decimal_diff:
:return:
"""
price = _x96_to_decimal(sqrt_priceX96)
return (price ** 2) / 10 ** token_decimal_diff
## can round by spacing?
def sqrt_price_to_tick(sqrt_priceX96: int) -> int:
"""
convert sqrt_priceX96 to tick data
:param sqrt_priceX96:
:return:
"""
decimal_price = _x96_to_decimal(sqrt_priceX96)
return pool_price_to_tick(decimal_price)
def pool_price_to_tick(price_decimal: Decimal):
"""
pool price to tick data
:param price_decimal:
:return:
"""
return int(math.log(price_decimal, math.sqrt(1.0001)))
def tick_to_sqrtPriceX96(tick: int):
"""
convert tick data to sqrtPriceX96
:param tick:
:return:
"""
return get_sqrt_ratio_at_tick(tick)
def tick_to_quote_price(tick: int, token_0_decimal, token_1_decimal, is_token0_base: bool):
"""
tick data get quote price
:param tick: tick data
:param token_0_decimal: token0 decimal
:param token_1_decimal: token1 decimal
:param is_token0_base: base on token0
:return: quote price
"""
sqrt_price = get_sqrt_ratio_at_tick(tick)
decimal_price = _x96_to_decimal(sqrt_price) ** 2
pool_price = decimal_price * Decimal(10 ** (token_0_decimal - token_1_decimal))
return Decimal(1 / pool_price) if is_token0_base else pool_price
def quote_price_to_tick(based_price: Decimal, token_0_decimal: int, token_1_decimal: int, is_token_base) -> int:
"""
quote price to tick data
:param based_price: base price
:param token_0_decimal: token0 decimal
:param token_1_decimal: token1 decimal
:param is_token_base: base on token
:return: tick data
"""
# quote price->add decimal pool price->sqrt_price ->ticker
sqrt_price = quote_price_to_sqrt(based_price, token_0_decimal, token_1_decimal, is_token_base)
tick = sqrt_price_to_tick(sqrt_price)
return tick
def quote_price_to_sqrt(based_price: Decimal, token_0_decimal: int, token_1_decimal: int, is_token_base) -> int:
"""
convert quote price to sqrt
:param based_price: price of base token
:param token_0_decimal: token 0 decimal
:param token_1_decimal: token 1 decimal
:param is_token_base: if is base token
:return: sqrt price
"""
# quote price->add decimal pool price->sqrt_price ->ticker
price = 1 / based_price if is_token_base else based_price
pool_price = price / Decimal(10 ** (token_0_decimal - token_1_decimal))
decimal_price = Decimal.sqrt(pool_price)
return decimal_to_x96(decimal_price)
def from_wei(token_amt: int, decimal: int) -> Decimal:
"""
token ** decimal
:param token_amt:
:param decimal:
:return:
"""
return Decimal(int(token_amt)) / Decimal(10 ** decimal)
def get_delta_gamma(lower_price: float, upper_price: float, price: float,
liquidity: int, decimal0: int, decimal1: int, is_0_base: bool):
"""
get delta gamma
:param lower_price: lower price
:param upper_price: upper price
:param price: price
:param liquidity: liquidity
:param decimal0: decimal 0
:param decimal1: decimal 1
:param is_0_base: check if token 0 is base
:return:
"""
lower_price_sqrtX96 = quote_price_to_sqrt(Decimal(lower_price), decimal0, decimal1, is_0_base)
upper_price_sqrtX96 = quote_price_to_sqrt(Decimal(upper_price), decimal0, decimal1, is_0_base)
if lower_price_sqrtX96 > upper_price_sqrtX96:
(lower_price_sqrtX96, upper_price_sqrtX96) = (upper_price_sqrtX96, lower_price_sqrtX96)
return get_delta_gamma_sqrtX96(lower_price,
lower_price_sqrtX96,
upper_price,
upper_price_sqrtX96,
price,
liquidity,
decimal0, decimal1,
is_0_base)
def get_delta_gamma_sqrtX96(lower_price, sqrtA: int,
upper_price, sqrtB: int,
price,
liquidity: int,
d0: int,
d1: int,
is_0_base: bool):
"""
k = 2 ** 96
a0 = k * (10**(-d)) * Liquidity * (1/SqrtPrice - 1/upper_price_sqrtX96)
a1= Liquidity / k / 10**d * (SqrtPrice - lower_price_sqrtX96)
if 0 base:
SqrtPrice=k / (10 ** (d/2)) / (p**0.5)
net_value = a1 * p price <= lower, a0 is constant
a0 + a1 * p lower < price < upper
a0 price >= upper
a0 + a1 * p = liquidity * 10 ** (0.5 * d) / 10 ** d0 * price_float ** 0.5 - \
k / upper_sqrt * liquidity / 10 ** d0 + \
liquidity* price_float ** 0.5 / 10 ** d1 / 10 ** (0.5 * d) - \
lower_sqrt / k * price_float * liquidity / 10 ** d1
if 1 base
SqrtPrice = k * p**0.5 / (10 ** (d/2))
net_value = a0 * p price <= lower, a0 is constant
a0 * p + a1 lower < price < upper
a1 price >= upper, a1 is constant
a0 * p + a1 = liquidity * price_float ** 0.5 * 10 ** (0.5 * d) / 10 ** d0 - \
k / upper_sqrt * liquidity * price_float / 10 ** d0 + \
liquidity * price_float ** 0.5 / 10 ** d1 / 10 ** (0.5 * d) - \
lower_sqrt / k * liquidity / 10 ** d1
a0 + p * a1 = Liquidity / 10**(d/2) / p**(1/2) + Liquidity * p**(1.5) / 10 ** (1.5*d) -
Liquidity * lower_price_sqrtX96 * p / 2**96 / 10**d
"""
k = 2 ** 96
d = d0 - d1
if is_0_base:
if price <= lower_price:
delta = liquidity / 2 ** 96 / 10 ** d1 * (sqrtB - sqrtA)
gamma = 0
elif lower_price < price < upper_price:
m = 10 ** (0.5 * d)
delta = liquidity * (0.5 * m / price ** 0.5 / 10 ** d0 + \
0.5 / 10 ** d1 / m / price ** 0.5 - \
sqrtA / k / 10 ** d1)
gamma = -0.25 * liquidity / price ** 1.5 * (m / 10 ** d0 + 1 / 10 ** d1 / m)
else:
delta = 0
gamma = 0
else:
if price <= lower_price:
delta = liquidity / 10 ** d0 * (k / sqrtA - k / sqrtB)
gamma = 0
elif lower_price < price < upper_price:
m = 10 ** (0.5 * d)
delta = liquidity * (0.5 * m / price ** 0.5 / 10 ** d0 + \
0.5 / 10 ** d1 / m / price ** 0.5 - \
k / sqrtB / 10 ** d0)
gamma = -0.25 * liquidity / price ** 1.5 * (m / 10 ** d0 + 1 / m / 10 ** d1)
else:
delta = 0
gamma = 0
return delta, gamma | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/helper.py | helper.py |
from datetime import datetime, date, timedelta
from decimal import Decimal
from typing import Dict
import pandas as pd
from ._typing import UniV3Pool, TokenInfo, BrokerAsset, Position, UniV3PoolStatus, UniLpBalance, \
AddLiquidityAction, RemoveLiquidityAction, CollectFeeAction, BuyAction, SellAction, position_dict_to_dataframe, \
PositionInfo
from .core import V3CoreLib
from .data import fillna
from .helper import tick_to_quote_price, quote_price_to_tick, quote_price_to_sqrt, tick_to_sqrtPriceX96
from .liquitidy_math import get_sqrt_ratio_at_tick
from .._typing import DemeterError, DECIMAL_0, UnitDecimal
from ..broker import MarketBalance, Market, MarketInfo
from ..utils import get_formatted_from_dict, get_formatted_predefined, STYLE, float_param_formatter, to_decimal
class UniLpMarket(Market):
"""
Broker manage assets in back testing. Including asset, positions. it also provides operations for positions,
such as add/remove liquidity, swap assets.
UniLpMarket does not save historical state.
出于对计算效率的考虑, 回测没有模拟每个add/remove liquidity,
因此, 无法计算出当前池子价格等信息, 如current_tick, SqrtPriceX96,
这些信息需要在回测的时候从外部输入(设置到market_status变量).
:param pool_info: pool information
:type pool_info: UniV3Pool
"""
def __init__(self, market_info: MarketInfo, pool_info: UniV3Pool, data: pd.DataFrame = None):
"""
init UniLpMarket
:param market_info: uni_market
:param pool_info: pool info with token0, token1, fee, base token
:param data: default None, dataframe data
"""
super().__init__(market_info=market_info, data=data)
self._pool: UniV3Pool = pool_info
# init balance
self._is_token0_base = pool_info.is_token0_base
# reference for super().assets dict.
self.base_token, self.quote_token = self._convert_pair(self.pool_info.token0, self.pool_info.token1)
# status
self._positions: Dict[PositionInfo, Position] = {}
self._market_status = UniV3PoolStatus(None, 0, 0, 0, 0, DECIMAL_0)
# In order to distinguish price in pool and to u, we call former one "pool price"
self._pool_price_unit = f"{self.base_token.name}/{self.quote_token.name}"
self.history_recorder = None
# internal temporary variable
# self.action_buffer = []
# region properties
def __str__(self):
return f"{self._market_info.name}:{type(self).__name__}, positions: {len(self._positions)}, " \
f"total liquidity: {sum([p.liquidity for p in self._positions.values()])}"
@property
def positions(self) -> Dict[PositionInfo, Position]:
"""
current positions in broker
:return: all positions
:rtype: dict[PositionInfo:Position]
"""
return self._positions
@property
def pool_info(self) -> UniV3Pool:
"""
Get pool info.
:return: pool info
:rtype: UniV3Pool
"""
return self._pool
@property
def token0(self) -> TokenInfo:
"""
get asset 0 info, including balance
:return: BrokerAsset
:rtype: BrokerAsset
"""
return self._pool.token0
@property
def token1(self) -> TokenInfo:
"""
get asset 1 info, including balance
:return: BrokerAsset
:rtype: BrokerAsset
"""
return self._pool.token1
def position(self, position_info: PositionInfo) -> Position:
"""
get position by position information
:param position_info: position information
:type position_info: PositionInfo
:return: Position entity
:rtype: Position
"""
return self._positions[position_info]
@property
def market_status(self) -> UniV3PoolStatus:
return self._market_status
# endregion
def set_market_status(self, timestamp: datetime | None, data: pd.Series | UniV3PoolStatus):
# update price tick
if isinstance(data, UniV3PoolStatus):
self._market_status = data
else:
self._market_status = UniV3PoolStatus(timestamp,
int(data.closeTick),
data.currentLiquidity,
data.inAmount0,
data.inAmount1,
data.price)
def get_price_from_data(self) -> pd.DataFrame:
if self.data is None:
raise DemeterError("data has not set")
price_series: pd.Series = self.data.price
df = pd.DataFrame(index=price_series.index,
data={self.quote_token.name: price_series})
df[self.base_token.name] = 1
return df
def _convert_pair(self, any0, any1):
"""
convert order of token0/token1 to base_token/quote_token, according to self.is_token0_base.
Or convert order of base_token/quote_token to token0/token1
:param any0: token0 or any property of token0, eg. balance...
:param any1: token1 or any property of token1, eg. balance...
:return: (base,qoute) or (token0,token1)
"""
return (any0, any1) if self._is_token0_base else (any1, any0)
def check_asset(self):
"""
"""
if not self._pool:
raise DemeterError("set up pool info first")
if self.base_token not in self.broker.assets:
raise DemeterError(f"base token {self.base_token.name} not exist in asset dict")
if self.quote_token not in self.broker.assets:
raise DemeterError(f"quote token {self.quote_token.name} not exist in asset dict")
def update(self):
"""
re-calculate status
"""
self.__update_fee()
def __update_fee(self):
"""
update fee in all positions according to current status
fee will be calculated by liquidity
"""
for position_info, position in self._positions.items():
V3CoreLib.update_fee(self.pool_info, position_info, position, self.market_status)
def get_market_balance(self, prices: pd.Series | Dict[str, Decimal] = None) -> MarketBalance:
"""
get current status, including positions, balances
:param prices: current price, used for calculate position value and net value, if set to None, will use price in current status
:type prices: pd.Series | Dict[str, Decimal]
:return: MarketBalance
"""
if prices is None:
pool_price = self._market_status.price
prices = {
self.base_token.name: Decimal(1),
self.quote_token.name: self._market_status.price
}
else:
pool_price = prices[self.quote_token.name] / prices[self.base_token.name]
sqrt_price = quote_price_to_sqrt(pool_price,
self._pool.token0.decimal,
self._pool.token1.decimal,
self._is_token0_base)
base_fee_sum = Decimal(0)
quote_fee_sum = Decimal(0)
deposit_amount0 = Decimal(0)
deposit_amount1 = Decimal(0)
for position_info, position in self._positions.items():
base_fee, quote_fee = self._convert_pair(position.pending_amount0, position.pending_amount1)
base_fee_sum += base_fee
quote_fee_sum += quote_fee
amount0, amount1 = V3CoreLib.get_token_amounts(self._pool, position_info, sqrt_price, position.liquidity)
deposit_amount0 += amount0
deposit_amount1 += amount1
base_deposit_amount, quote_deposit_amount = self._convert_pair(deposit_amount0, deposit_amount1)
# net value here is calculated by external price, because we usually want a net value with usd base,
net_value = (base_fee_sum + base_deposit_amount) * prices[self.base_token.name] + \
(quote_fee_sum + quote_deposit_amount) * prices[self.quote_token.name]
val = UniLpBalance(net_value=net_value,
base_uncollected=UnitDecimal(base_fee_sum, self.base_token.name),
quote_uncollected=UnitDecimal(quote_fee_sum, self.quote_token.name),
base_in_position=UnitDecimal(base_deposit_amount, self.base_token.name),
quote_in_position=UnitDecimal(quote_deposit_amount, self.quote_token.name),
position_count=len(self._positions))
return val
def tick_to_price(self, tick: int) -> Decimal:
"""
convert tick to price
:param tick: tick
:type tick: int
:return: price
:rtype: Decimal
"""
return tick_to_quote_price(int(tick),
self._pool.token0.decimal,
self._pool.token1.decimal,
self._is_token0_base)
@float_param_formatter
def price_to_tick(self, price: Decimal | float) -> int:
"""
convert price to tick
:param price: price
:type price: Decimal | float
:return: tick
:rtype: int
"""
return quote_price_to_tick(price,
self._pool.token0.decimal,
self._pool.token1.decimal,
self._is_token0_base)
def _add_liquidity_by_tick(self, token0_amount: Decimal,
token1_amount: Decimal,
lower_tick: int,
upper_tick: int,
sqrt_price_x96: int = -1):
lower_tick = int(lower_tick)
upper_tick = int(upper_tick)
sqrt_price_x96 = int(sqrt_price_x96)
if sqrt_price_x96 == -1:
# self.current_tick must be initialed
sqrt_price_x96 = get_sqrt_ratio_at_tick(self.market_status.current_tick)
if lower_tick > upper_tick:
raise DemeterError("lower tick should be less than upper tick")
token0_used, token1_used, liquidity, position_info = V3CoreLib.new_position(self._pool,
token0_amount,
token1_amount,
lower_tick,
upper_tick,
sqrt_price_x96)
if position_info in self._positions:
self._positions[position_info].liquidity += liquidity
else:
self._positions[position_info] = Position(DECIMAL_0, DECIMAL_0, liquidity)
self.broker.subtract_from_balance(self.token0, token0_used)
self.broker.subtract_from_balance(self.token1, token1_used)
return position_info, token0_used, token1_used, liquidity
def __remove_liquidity(self, position: PositionInfo, liquidity: int = None, sqrt_price_x96: int = -1):
sqrt_price_x96 = int(sqrt_price_x96) if sqrt_price_x96 != -1 else \
get_sqrt_ratio_at_tick(self.market_status.current_tick)
delta_liquidity = liquidity if (liquidity is not None) and liquidity < self.positions[position].liquidity \
else self.positions[position].liquidity
token0_get, token1_get = V3CoreLib.close_position(self._pool, position, delta_liquidity, sqrt_price_x96)
self._positions[position].liquidity = self.positions[position].liquidity - delta_liquidity
self._positions[position].pending_amount0 += token0_get
self._positions[position].pending_amount1 += token1_get
return token0_get, token1_get, delta_liquidity
def __collect_fee(self, position: Position, max_collect_amount0: Decimal = None,
max_collect_amount1: Decimal = None):
"""
collect fee
:param position: position
:param max_collect_amount0: max collect amount0
:param max_collect_amount1: max collect amount1
:return:
"""
token0_fee = max_collect_amount0 if \
max_collect_amount0 is not None and max_collect_amount0 < position.pending_amount0 else \
position.pending_amount0
token1_fee = max_collect_amount1 if \
max_collect_amount1 is not None and max_collect_amount1 < position.pending_amount1 else \
position.pending_amount1
position.pending_amount0 -= token0_fee
position.pending_amount1 -= token1_fee
# add un_collect fee to current balance
self.broker.add_to_balance(self.token0, token0_fee)
self.broker.add_to_balance(self.token1, token1_fee)
return token0_fee, token1_fee
# action for strategy
@float_param_formatter
def add_liquidity(self,
lower_quote_price: Decimal | float,
upper_quote_price: Decimal | float,
base_max_amount: Decimal | float = None,
quote_max_amount: Decimal | float = None) -> (PositionInfo, Decimal, Decimal, int):
"""
add liquidity, then get a new position
:param lower_quote_price: lower price base on quote token.
:type lower_quote_price: Decimal | float
:param upper_quote_price: upper price base on quote token.
:type upper_quote_price: Decimal | float
:param base_max_amount: inputted base token amount, also the max amount to deposit, if is None, will use all the balance of base token
:type base_max_amount: Decimal | float
:param quote_max_amount: inputted base token amount, also the max amount to deposit, if is None, will use all the balance of base token
:type quote_max_amount: Decimal | float
:return: added position, base token used, quote token used
:rtype: (PositionInfo, Decimal, Decimal)
"""
base_max_amount = self.broker.get_token_balance(self.base_token) if base_max_amount is None else \
base_max_amount
quote_max_amount = self.broker.get_token_balance(self.quote_token) if quote_max_amount is None else \
quote_max_amount
token0_amt, token1_amt = self._convert_pair(base_max_amount, quote_max_amount)
lower_tick, upper_tick = V3CoreLib.quote_price_pair_to_tick(self._pool, lower_quote_price,
upper_quote_price)
lower_tick, upper_tick = self._convert_pair(upper_tick, lower_tick)
(created_position, token0_used, token1_used, liquidity) = self._add_liquidity_by_tick(token0_amt,
token1_amt,
lower_tick,
upper_tick)
base_used, quote_used = self._convert_pair(token0_used, token1_used)
self.record_action(AddLiquidityAction(
market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
base_amount_max=UnitDecimal(base_max_amount, self.base_token.name),
quote_amount_max=UnitDecimal(quote_max_amount, self.quote_token.name),
lower_quote_price=UnitDecimal(lower_quote_price, self._pool_price_unit),
upper_quote_price=UnitDecimal(upper_quote_price, self._pool_price_unit),
base_amount_actual=UnitDecimal(base_used, self.base_token.name),
quote_amount_actual=UnitDecimal(quote_used, self.quote_token.name),
position=created_position,
liquidity=int(liquidity)))
return created_position, base_used, quote_used, liquidity
def add_liquidity_by_tick(self,
lower_tick: int,
upper_tick: int,
base_max_amount: Decimal | float = None,
quote_max_amount: Decimal | float = None,
sqrt_price_x96: int = -1,
tick: int = -1):
"""
add liquidity, you need to set tick instead of price.
:param lower_tick: lower tick
:type lower_tick: int
:param upper_tick: upper tick
:type upper_tick: int
:param base_max_amount: inputted base token amount, also the max amount to deposit, if is None, will use all the balance of base token
:type base_max_amount: Decimal | float
:param quote_max_amount: inputted base token amount, also the max amount to deposit, if is None, will use all the balance of base token
:type quote_max_amount: Decimal | float
:param tick: tick price. if set to none, it will be calculated from current price.
:type tick: int
:param sqrt_price_x96: precise price. if set to none, it will be calculated from current price. this param will override tick
:type sqrt_price_x96: int
:return: added position, base token used, quote token used
:rtype: (PositionInfo, Decimal, Decimal)
"""
if sqrt_price_x96 == -1 and tick != -1:
sqrt_price_x96 = tick_to_sqrtPriceX96(tick)
base_max_amount = self.broker.get_token_balance(self.base_token) if base_max_amount is None else \
base_max_amount
quote_max_amount = self.broker.get_token_balance(self.quote_token) if quote_max_amount is None else \
quote_max_amount
token0_amt, token1_amt = self._convert_pair(base_max_amount, quote_max_amount)
(created_position, token0_used, token1_used, liquidity) = self._add_liquidity_by_tick(token0_amt,
token1_amt,
lower_tick,
upper_tick,
sqrt_price_x96)
base_used, quote_used = self._convert_pair(token0_used, token1_used)
self.record_action(AddLiquidityAction(
market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
base_amount_max=UnitDecimal(base_max_amount, self.base_token.name),
quote_amount_max=UnitDecimal(quote_max_amount, self.quote_token.name),
lower_quote_price=UnitDecimal(self.tick_to_price(lower_tick), self._pool_price_unit),
upper_quote_price=UnitDecimal(self.tick_to_price(upper_tick), self._pool_price_unit),
base_amount_actual=UnitDecimal(base_used, self.base_token.name),
quote_amount_actual=UnitDecimal(quote_used, self.quote_token.name),
position=created_position,
liquidity=int(liquidity)))
return created_position, base_used, quote_used, liquidity
@float_param_formatter
def remove_liquidity(self, position: PositionInfo, liquidity: int = None, collect: bool = True,
sqrt_price_x96: int = -1, remove_dry_pool: bool = True) -> (Decimal, Decimal):
"""
remove liquidity from pool, liquidity will be reduced to 0,
instead of send tokens to broker, tokens will be transferred to fee property in position.
position will be not deleted, until fees and tokens are collected.
:param position: position to remove.
:type position: PositionInfo
:param liquidity: liquidity amount to remove, if set to None, all the liquidity will be removed
:type liquidity: int
:param collect: collect or not, if collect, will call collect function. and tokens will be sent to broker. if not token will be kept in fee property of postion
:type collect: bool
:param sqrt_price_x96: precise price. if set to none, it will be calculated from current price.
:type sqrt_price_x96: int
:param remove_dry_pool: remove pool which liquidity==0, effect when collect==True
:type remove_dry_pool: bool
:return: (base_got,quote_get), base and quote token amounts collected from position
:rtype: (Decimal,Decimal)
"""
if liquidity and liquidity < 0:
raise DemeterError("liquidity should large than 0")
token0_get, token1_get, delta_liquidity = self.__remove_liquidity(position, liquidity, sqrt_price_x96)
base_get, quote_get = self._convert_pair(token0_get, token1_get)
self.record_action(
RemoveLiquidityAction(market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
position=position,
base_amount=UnitDecimal(base_get, self.base_token.name),
quote_amount=UnitDecimal(quote_get, self.quote_token.name),
removed_liquidity=delta_liquidity,
remain_liquidity=self.positions[position].liquidity
))
if collect:
return self.collect_fee(position, remove_dry_pool=remove_dry_pool)
else:
return base_get, quote_get
@float_param_formatter
def collect_fee(self,
position: PositionInfo,
max_collect_amount0: Decimal = None,
max_collect_amount1: Decimal = None,
remove_dry_pool: bool = True) -> (Decimal, Decimal):
"""
collect fee and token from positions,
if the amount and liquidity is zero, this position will be deleted.
:param position: position to collect
:type position: PositionInfo
:param max_collect_amount0: max token0 amount to collect, eg: 1.2345 usdc, if set to None, all the amount will be collect
:type max_collect_amount0: Decimal
:param max_collect_amount1: max token0 amount to collect, if set to None, all the amount will be collect
:type max_collect_amount1: Decimal
:param remove_dry_pool: remove pool which liquidity==0, effect when collect==True
:type remove_dry_pool: bool
:return: (base_got,quote_get), base and quote token amounts collected from position
:rtype: (Decimal,Decimal)
"""
if (max_collect_amount0 and max_collect_amount0 < 0) or \
(max_collect_amount1 and max_collect_amount1 < 0):
raise DemeterError("collect amount should large than 0")
token0_get, token1_get = self.__collect_fee(self._positions[position], max_collect_amount0, max_collect_amount1)
base_get, quote_get = self._convert_pair(token0_get, token1_get)
if self._positions[position]:
self.record_action(CollectFeeAction(
market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
position=position,
base_amount=UnitDecimal(base_get, self.base_token.name),
quote_amount=UnitDecimal(quote_get, self.quote_token.name)
))
if self._positions[position].pending_amount0 == Decimal(0) \
and self._positions[position].pending_amount1 == Decimal(0) \
and self._positions[position].liquidity == 0 \
and remove_dry_pool:
del self.positions[position]
return base_get, quote_get
@float_param_formatter
def buy(self, amount: Decimal | float, price: Decimal | float = None) -> (Decimal, Decimal, Decimal):
"""
buy token, swap from base token to quote token.
:param amount: amount to buy(in quote token)
:type amount: Decimal | float
:param price: price
:type price: Decimal | float
:return: fee, base token amount spend, quote token amount got
:rtype: (Decimal, Decimal, Decimal)
"""
price = price if price else self.market_status.price
from_amount = price * amount
from_amount_with_fee = from_amount * (1 + self._pool.fee_rate)
fee = from_amount_with_fee - from_amount
from_token, to_token = self._convert_pair(self.token0, self.token1)
self.broker.subtract_from_balance(from_token, from_amount_with_fee)
self.broker.add_to_balance(to_token, amount)
base_amount, quote_amount = self._convert_pair(from_amount, amount)
self.record_action(BuyAction(
market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
amount=UnitDecimal(amount, self.quote_token.name),
price=UnitDecimal(price, self._pool_price_unit),
fee=UnitDecimal(fee, self.base_token.name),
base_change=UnitDecimal(base_amount, self.base_token.name),
quote_change=UnitDecimal(quote_amount, self.quote_token.name)))
return fee, base_amount, quote_amount
@float_param_formatter
def sell(self, amount: Decimal | float, price: Decimal | float = None) -> (Decimal, Decimal, Decimal):
"""
sell token, swap from quote token to base token.
:param amount: amount to sell(in quote token)
:type amount: Decimal | float
:param price: price
:type price: Decimal | float
:return: fee, base token amount got, quote token amount spend
:rtype: (Decimal, Decimal, Decimal)
"""
price = price if price else self.market_status.price
from_amount_with_fee = amount
from_amount = from_amount_with_fee * (1 - self._pool.fee_rate)
to_amount = from_amount * price
fee = from_amount_with_fee - from_amount
to_token, from_token = self._convert_pair(self.token0, self.token1)
self.broker.subtract_from_balance(from_token, from_amount_with_fee)
self.broker.add_to_balance(to_token, to_amount)
base_amount, quote_amount = self._convert_pair(to_amount, from_amount)
self.record_action(SellAction(
market=self.market_info,
base_balance_after=self.broker.get_token_balance_with_unit(self.base_token),
quote_balance_after=self.broker.get_token_balance_with_unit(self.quote_token),
amount=UnitDecimal(amount, self.base_token.name),
price=UnitDecimal(price, self._pool_price_unit),
fee=UnitDecimal(fee, self.quote_token.name),
base_change=UnitDecimal(base_amount, self.base_token.name),
quote_change=UnitDecimal(quote_amount, self.quote_token.name)))
return fee, base_amount, quote_amount
def even_rebalance(self, price: Decimal = None) -> (Decimal, Decimal, Decimal):
"""
Divide assets equally between two tokens.
:param price: price of quote token. eg: 1234 eth/usdc
:type price: Decimal
:return: fee, base token amount spend, quote token amount got
:rtype: (Decimal, Decimal, Decimal)
"""
if price is None:
price = self._market_status.price
total_capital = self.broker.get_token_balance(self.base_token) + self.broker.get_token_balance(
self.quote_token) * price
target_base_amount = total_capital / 2
quote_amount_diff = target_base_amount / price - self.broker.get_token_balance(self.quote_token)
if quote_amount_diff > 0:
return self.buy(quote_amount_diff)
elif quote_amount_diff < 0:
return self.sell(0 - quote_amount_diff)
def remove_all_liquidity(self):
"""
remove all the positions kept in broker.
"""
if len(self.positions) < 1:
return
keys = list(self.positions.keys())
for position_key in keys:
self.remove_liquidity(position_key)
def add_statistic_column(self, df: pd.DataFrame):
"""
add statistic column to data, new columns including:
* open: open price
* price: close price (current price)
* low: lowest price
* high: height price
* volume0: swap volume for token 0
* volume1: swap volume for token 1
:param df: original data
:type df: pd.DataFrame
"""
# add statistic column
df["open"] = df["openTick"].map(lambda x: self.tick_to_price(x))
df["price"] = df["closeTick"].map(lambda x: self.tick_to_price(x))
high_name, low_name = ("lowestTick", "highestTick") if self.pool_info.is_token0_base \
else ("highestTick", "lowestTick")
df["low"] = df[high_name].map(lambda x: self.tick_to_price(x))
df["high"] = df[low_name].map(lambda x: self.tick_to_price(x))
df["volume0"] = df["inAmount0"].map(lambda x: Decimal(x) / 10 ** self.pool_info.token0.decimal)
df["volume1"] = df["inAmount1"].map(lambda x: Decimal(x) / 10 ** self.pool_info.token1.decimal)
def load_data(self, chain: str, contract_addr: str, start_date: date, end_date: date):
"""
load data, and preprocess. preprocess actions including:
* fill empty data
* calculate statistic column
* set timestamp as index
:param chain: chain name
:type chain: str
:param contract_addr: pool contract address
:type contract_addr: str
:param start_date: start test date
:type start_date: date
:param end_date: end test date
:type end_date: date
"""
self.logger.info(f"start load files from {start_date} to {end_date}...")
df = pd.DataFrame()
day = start_date
while day <= end_date:
path = f"{self.data_path}/{chain}-{contract_addr}-{day.strftime('%Y-%m-%d')}.csv"
day_df = pd.read_csv(path, converters={'inAmount0': to_decimal,
'inAmount1': to_decimal,
'netAmount0': to_decimal,
'netAmount1': to_decimal,
"currentLiquidity": to_decimal})
df = pd.concat([df, day_df])
day = day + timedelta(days=1)
self.logger.info("load file complete, preparing...")
df["timestamp"] = pd.to_datetime(df["timestamp"])
df.set_index("timestamp", inplace=True)
# fill empty row (first minutes in a day, might be blank)
full_indexes = pd.date_range(start=df.index[0], end=df.index[df.index.size - 1], freq="1min")
df = df.reindex(full_indexes)
# df = Lines.from_dataframe(df)
# df = df.fillna()
df = fillna(df)
self.add_statistic_column(df)
self.data = df
self.logger.info("data has been prepared")
def check_before_test(self):
"""
prefix test
:return:
"""
super().check_before_test()
required_columns = ["closeTick",
"currentLiquidity",
"inAmount0",
"inAmount1",
"price"]
for col in required_columns:
assert col in self.data.columns
def formatted_str(self):
"""
return formatted str info
:return:
"""
value = get_formatted_predefined(f"{self.market_info.name}({type(self).__name__})", STYLE["header3"]) + "\n"
value += get_formatted_from_dict({
"token0": self.pool_info.token0.name,
"token1": self.pool_info.token1.name,
"fee": self.pool_info.fee_rate * 100,
"is 0 base": self.pool_info.is_token0_base
}) + "\n"
value += get_formatted_predefined("positions", STYLE["key"]) + "\n"
df = position_dict_to_dataframe(self.positions)
if len(df.index) > 0:
value += position_dict_to_dataframe(self.positions).to_string()
else:
value += "Empty DataFrame"
return value | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/market.py | market.py |
from decimal import Decimal
from ._typing import UniV3Pool, Position, UniV3PoolStatus, PositionInfo
from .helper import quote_price_to_tick, from_wei
from .liquitidy_math import get_amounts, get_liquidity
class V3CoreLib(object):
@staticmethod
def new_position(pool: UniV3Pool,
token0_amount: Decimal, token1_amount: Decimal,
lower_tick: int, upper_tick: int, sqrt_price_x96: int):
"""
create new position
:param pool: operation on which pool
:param token0_amount: token0 amount
:param token1_amount: token1 amount
:param lower_tick: lower tick
:param upper_tick: upper tick
:param sqrt_price_x96: sqrt(price) * 2^96
:return: token0 position, token1 position, liquid, position instance
"""
position_liq = get_liquidity(sqrt_price_x96, lower_tick, upper_tick,
token0_amount, token1_amount,
pool.token0.decimal, pool.token1.decimal)
token0_in_position, token1_in_position = get_amounts(sqrt_price_x96, lower_tick, upper_tick,
position_liq, pool.token0.decimal, pool.token1.decimal)
new_position_entity = PositionInfo(lower_tick=lower_tick,
upper_tick=upper_tick)
return token0_in_position, token1_in_position, int(position_liq), new_position_entity
@staticmethod
def close_position(pool: UniV3Pool, position_info: PositionInfo, liquidity, sqrt_price_x96):
"""
close position
:param pool: operation on which pool
:param position_info: position info
:param liquidity: liquidity
:param sqrt_price_x96: sqrt(price) * 2^96
:return: token amount
"""
return V3CoreLib.get_token_amounts(pool, position_info, sqrt_price_x96, liquidity)
@staticmethod
def get_token_amounts(pool: UniV3Pool, pos: PositionInfo, sqrt_price_x96, liquidity) -> (Decimal, Decimal):
"""
get token amount in position
:param pool: operation on which pool
:param pos: position info
:param sqrt_price_x96: sqrt(price) * 2^96
:param liquidity: liquidity
:return: token0 amount, token1 amount
"""
if liquidity == 0: # performance improve
return 0, 0
amount0, amount1 = get_amounts(sqrt_price_x96,
pos.lower_tick,
pos.upper_tick,
liquidity,
pool.token0.decimal,
pool.token1.decimal)
return amount0, amount1
@staticmethod
def quote_price_pair_to_tick(pool: UniV3Pool, lower_quote_price: Decimal, upper_quote_price: Decimal):
"""
quote price pair to tick
:param pool: operation on which pool
:param lower_quote_price: lower quote price
:param upper_quote_price: upper quote price
:return: lower_tick, upper_tick
"""
lower_tick = quote_price_to_tick(lower_quote_price, pool.token0.decimal, pool.token1.decimal,
pool.is_token0_base)
upper_tick = quote_price_to_tick(upper_quote_price, pool.token0.decimal, pool.token1.decimal,
pool.is_token0_base)
return lower_tick, upper_tick
@staticmethod
def update_fee(pool: UniV3Pool, pos: PositionInfo, position: Position, state: UniV3PoolStatus):
"""
update fee
:param pool: operation on which pool
:param pos: position info
:param position: position
:param state: UniV3PoolStatus
:return: None
"""
# in most cases, tick will not cross to on_bar one, which means L will not change.
if pos.upper_tick > state.current_tick > pos.lower_tick:
# if the simulating liquidity is above the actual liquidity, we will consider share=1
if position.liquidity >= state.current_liquidity:
share = Decimal(1)
else:
share = Decimal(position.liquidity) / Decimal(state.current_liquidity)
position.pending_amount0 += from_wei(state.in_amount0, pool.token0.decimal) * share * pool.fee_rate
position.pending_amount1 += from_wei(state.in_amount1, pool.token1.decimal) * share * pool.fee_rate | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/core.py | core.py |
from decimal import Decimal
# -*- coding: utf-8 -*-
"""
!!! IMPORTANT
this module is developed and enhanced from active-strategy-framework of GammaStrategies
source code: https://github.com/GammaStrategies/active-strategy-framework/blob/main/UNI_v3_funcs.py
Original author information:
=============================================
Created on Mon Jun 14 18:53:09 2021
@author: JNP
"""
'''liquitidymath'''
'''Python library to emulate the calculations done in liquiditymath.sol of UNI_V3 peryphery contract'''
# sqrtP: format X96 = int(1.0001**(tick/2)*(2**96))
# liquidity: int
# sqrtA = price for lower tick
# sqrtB = price for upper tick
'''get_amounts function'''
# Use 'get_amounts' function to calculate amounts as a function of liquitidy and price range
def get_amount0(sqrtA: int, sqrtB: int, liquidity: int, decimals: int) -> Decimal:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount0 = (Decimal(liquidity * 2 ** 96 * (sqrtB - sqrtA)) / sqrtB / sqrtA) / 10 ** decimals
return amount0
def get_amount1(sqrtA: int, sqrtB: int, liquidity: int, decimals: int) -> Decimal:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount1 = Decimal(liquidity * (sqrtB - sqrtA)) / 2 ** 96 / 10 ** decimals
return amount1
def get_sqrt(tick: int):
return Decimal(1.0001 ** (tick / 2) * (2 ** 96))
def get_amounts(sqrt_price_x96: int, tickA: int, tickB: int, liquidity: int, decimal0: int, decimal1: int) -> \
(Decimal, Decimal):
sqrt = sqrt_price_x96
sqrtA = get_sqrt_ratio_at_tick(tickA)
sqrtB = get_sqrt_ratio_at_tick(tickB)
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
if sqrt <= sqrtA:
amount0 = get_amount0(sqrtA, sqrtB, liquidity, decimal0)
return amount0, Decimal(0)
elif sqrtB > sqrt > sqrtA:
amount0 = get_amount0(sqrt, sqrtB, liquidity, decimal0)
amount1 = get_amount1(sqrtA, sqrt, liquidity, decimal1)
return amount0, amount1
else:
amount1 = get_amount1(sqrtA, sqrtB, liquidity, decimal1)
return Decimal(0), amount1
'''get token amounts relation'''
# Use this formula to calculate amount of t0 based on amount of t1 (required before calculate liquidity)
# relation = t1/t0
def amounts_relation(tick: int, tickA: int, tickB: int, decimals0: int, decimals1: int) -> Decimal:
sqrt = (1.0001 ** tick / 10 ** (decimals1 - decimals0)) ** (1 / 2)
sqrtA = (1.0001 ** tickA / 10 ** (decimals1 - decimals0)) ** (1 / 2)
sqrtB = (1.0001 ** tickB / 10 ** (decimals1 - decimals0)) ** (1 / 2)
if sqrt == sqrtA or sqrt == sqrtB:
relation = 0
relation = (sqrt - sqrtA) / ((1 / sqrt) - (1 / sqrtB))
return relation
'''get_liquidity function'''
def mul_div(a: int, b: int, denominator: int) -> int:
"""
this function is very long in contract. but It's because max length in solidity is limited.
But python has unlimit integer.
ensure all the parameter is int !
"""
return a * b // denominator
def get_liquidity_for_amount0(sqrtA: int, sqrtB: int, amount: int) -> int:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
intermediate = mul_div(sqrtA, sqrtB, 2 ** 96)
return mul_div(amount, intermediate, sqrtB - sqrtA)
def get_liquidity_for_amount1(sqrtA: int, sqrtB: int, amount: int) -> int:
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
return mul_div(amount, 2 ** 96, sqrtB - sqrtA)
def to_wei(amount, decimals) -> int:
return int(amount * 10 ** decimals)
def get_liquidity(sqrt_price_x96: int, tickA: int, tickB: int,
amount0: Decimal, amount1: Decimal,
decimal0: int, decimal1: int) -> int:
sqrt = sqrt_price_x96
sqrtA = get_sqrt_ratio_at_tick(tickA)
sqrtB = get_sqrt_ratio_at_tick(tickB)
if sqrtA > sqrtB:
(sqrtA, sqrtB) = (sqrtB, sqrtA)
amount0wei: int = to_wei(amount0, decimal0)
amount1wei: int = to_wei(amount1, decimal1)
if sqrt <= sqrtA:
liquidity0 = get_liquidity_for_amount0(sqrtA, sqrtB, amount0wei)
return liquidity0
elif sqrtB > sqrt > sqrtA:
liquidity0 = get_liquidity_for_amount0(sqrt, sqrtB, amount0wei)
liquidity1 = get_liquidity_for_amount1(sqrtA, sqrt, amount1wei)
liquidity = liquidity0 if liquidity0 < liquidity1 else liquidity1
return liquidity
else:
liquidity1 = get_liquidity_for_amount1(sqrtA, sqrtB, amount1wei)
return liquidity1
def get_sqrt_ratio_at_tick(tick: int) -> int:
tick = int(tick)
abs_tick = tick if tick >= 0 else -tick
assert abs_tick <= 887272
# 这些魔数分别表示 1/sqrt(1.0001)^1, 1/sqrt(1.0001)^2, 1/sqrt(1.0001)^4....
ratio: int = 0xfffcb933bd6fad37aa2d162d1a594001 if abs_tick & 0x1 != 0 else 0x100000000000000000000000000000000
if abs_tick & 0x2 != 0: ratio = (ratio * 0xfff97272373d413259a46990580e213a) >> 128
if abs_tick & 0x4 != 0: ratio = (ratio * 0xfff2e50f5f656932ef12357cf3c7fdcc) >> 128
if abs_tick & 0x8 != 0: ratio = (ratio * 0xffe5caca7e10e4e61c3624eaa0941cd0) >> 128
if abs_tick & 0x10 != 0: ratio = (ratio * 0xffcb9843d60f6159c9db58835c926644) >> 128
if abs_tick & 0x20 != 0: ratio = (ratio * 0xff973b41fa98c081472e6896dfb254c0) >> 128
if abs_tick & 0x40 != 0: ratio = (ratio * 0xff2ea16466c96a3843ec78b326b52861) >> 128
if abs_tick & 0x80 != 0: ratio = (ratio * 0xfe5dee046a99a2a811c461f1969c3053) >> 128
if abs_tick & 0x100 != 0: ratio = (ratio * 0xfcbe86c7900a88aedcffc83b479aa3a4) >> 128
if abs_tick & 0x200 != 0: ratio = (ratio * 0xf987a7253ac413176f2b074cf7815e54) >> 128
if abs_tick & 0x400 != 0: ratio = (ratio * 0xf3392b0822b70005940c7a398e4b70f3) >> 128
if abs_tick & 0x800 != 0: ratio = (ratio * 0xe7159475a2c29b7443b29c7fa6e889d9) >> 128
if abs_tick & 0x1000 != 0: ratio = (ratio * 0xd097f3bdfd2022b8845ad8f792aa5825) >> 128
if abs_tick & 0x2000 != 0: ratio = (ratio * 0xa9f746462d870fdf8a65dc1f90e061e5) >> 128
if abs_tick & 0x4000 != 0: ratio = (ratio * 0x70d869a156d2a1b890bb3df62baf32f7) >> 128
if abs_tick & 0x8000 != 0: ratio = (ratio * 0x31be135f97d08fd981231505542fcfa6) >> 128
if abs_tick & 0x10000 != 0: ratio = (ratio * 0x9aa508b5b7a84e1c677de54f3e99bc9) >> 128
if abs_tick & 0x20000 != 0: ratio = (ratio * 0x5d6af8dedb81196699c329225ee604) >> 128
if abs_tick & 0x40000 != 0: ratio = (ratio * 0x2216e584f5fa1ea926041bedfe98) >> 128
if abs_tick & 0x80000 != 0: ratio = (ratio * 0x48a170391f7dc42444e8fa2) >> 128
if tick > 0:
# type(uint256).max
ratio = int(115792089237316195423570985008687907853269984665640564039457584007913129639935 // ratio)
# this divides by 1<<32 rounding up to go from a Q128.128 to a Q128.96.
# we then downcast because we know the result always fits within 160 bits due to our tick input constraint
# we round up in the division so getTickAtSqrtRatio of the output price is always consistent
sqrt_price_x96 = (ratio >> 32) + (0 if ratio % (1 << 32) == 0 else 1)
return sqrt_price_x96 | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/liquitidy_math.py | liquitidy_math.py |
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from typing import Dict
import pandas as pd
from .._typing import TokenInfo, DemeterError, UnitDecimal, PositionInfo
from ..broker import BaseAction, ActionTypeEnum, MarketBalance, MarketStatus
from ..utils import get_formatted_from_dict
class UniV3Pool(object):
"""
pool information, corresponding with definition in pool contract.
:param token0: First token in pool contract.
:type token0: TokenInfo
:param token1: Second token in pool contract.
:type token1: TokenInfo
:param fee: fee rate of this pool, should be among [0.05, 0.3, 1]
:type fee: float, 0.05
:param base_token: which token will be considered as base token. eg: to a token pair of USDT/BTC, if you want price unit to be like 10000 usdt/btc, you should set usdt as base token, otherwise if price unit is 0.00001 btc/usdt, you should set btc as base token
:type base_token: TokenInfo
"""
def __init__(self, token0: TokenInfo, token1: TokenInfo, fee: float, base_token: TokenInfo):
fee = Decimal(str(fee))
self.token0 = token0
self.token1 = token1
self.is_token0_base = (base_token == token0)
self.base_token = base_token
self.tickSpacing = int(fee * 200)
self.fee: Decimal = fee * Decimal(10000)
self.fee_rate: Decimal = Decimal(fee) / Decimal(100)
def __str__(self):
"""
get string
:return:
:rtype:
"""
return "PoolBaseInfo(Token0: {},".format(self.token0) + \
"Token1: {},".format(self.token1) + \
"fee: {},".format(self.fee_rate * Decimal(100)) + \
"base token: {})".format(self.token0.name if self.is_token0_base else self.token1.name)
@dataclass
class UniLpBalance(MarketBalance):
"""
current status of broker
:param timestamp: timestamp
:type timestamp: datetime
:param base_uncollected: base token uncollect fee in all the positions.
:type base_uncollected: UnitDecimal
:param quote_uncollected: quote token uncollect fee in all the positions.
:type quote_uncollected: UnitDecimal
:param base_in_position: base token amount deposited in positions, calculated according to current price
:type base_in_position: UnitDecimal
:param quote_in_position: quote token amount deposited in positions, calculated according to current price
:type quote_in_position: UnitDecimal
:param pool_net_value: 按照池子base/quote关系的净值. 不是broker层面的(which 通常是对u的).
:type pool_net_value: UnitDecimal
:param price: current price
:type price: UnitDecimal
"""
base_uncollected: UnitDecimal
quote_uncollected: UnitDecimal
base_in_position: UnitDecimal
quote_in_position: UnitDecimal
position_count: int
def get_output_str(self) -> str:
"""
get colored and formatted string to output in console
:return: formatted string
:rtype: str
"""
return get_formatted_from_dict({
"total capital": self.pool_net_value.to_str(),
"uncollect fee": f"{self.base_uncollected.to_str()},{self.quote_uncollected.to_str()}",
"in position amount": f"{self.base_in_position.to_str()},{self.quote_in_position.to_str()}",
"position count": self.position_count.to_str()
})
def to_array(self):
return [
self.base_uncollected,
self.quote_uncollected,
self.base_in_position,
self.quote_in_position,
self.position_count
]
@DeprecationWarning
class BrokerAsset(object):
"""
Wallet of broker, manage balance of an asset.
It will prevent excess usage on asset.
"""
def __init__(self, token: TokenInfo, init_amount=Decimal(0)):
self.token_info = token
self.name = token.name
self.decimal = token.decimal
self.balance = init_amount
def __str__(self):
return f"{self.balance} {self.name}"
def add(self, amount=Decimal(0)):
"""
add amount to balance
:param amount: amount to add
:type amount: Decimal
:return: entity itself
:rtype: BrokerAsset
"""
self.balance += amount
return self
def sub(self, amount=Decimal(0), allow_negative_balance=False):
"""
subtract amount from balance. if balance is not enough, an error will be raised.
:param amount: amount to subtract
:type amount: Decimal
:param allow_negative_balance: allow balance is negative
:type allow_negative_balance: bool
:return:
:rtype:
"""
base = self.balance if self.balance != Decimal(0) else Decimal(amount)
if base == Decimal(0): # amount and balance is both 0
return self
if allow_negative_balance:
self.balance -= amount
else:
# if difference between amount and balance is below 0.01%, will deduct all the balance
# That's because, the amount calculated by v3_core, has some acceptable error.
if abs((self.balance - amount) / base) < 0.00001:
self.balance = Decimal(0)
elif self.balance - amount < Decimal(0):
raise DemeterError(f"Insufficient balance, balance is {self.balance}{self.name}, "
f"but sub amount is {amount}{self.name}")
else:
self.balance -= amount
return self
def amount_in_wei(self):
return self.balance * Decimal(10 ** self.decimal)
@dataclass
class Position(object):
"""
variables for position
"""
pending_amount0: Decimal
pending_amount1: Decimal
liquidity: int
def position_dict_to_dataframe(positions: Dict[PositionInfo, Position]) -> pd.DataFrame:
pos_dict = {
"lower_tick": [],
"upper_tick": [],
"pending0": [],
"pending1": [],
"liquidity": []
}
for k, v in positions.items():
pos_dict["lower_tick"].append(k.lower_tick)
pos_dict["upper_tick"].append(k.upper_tick)
pos_dict["pending0"].append(v.pending_amount0)
pos_dict["pending1"].append(v.pending_amount1)
pos_dict["liquidity"].append(v.liquidity)
return pd.DataFrame(pos_dict)
@dataclass
class UniV3PoolStatus(MarketStatus):
"""
current status of a pool, actuators can notify current status to broker by filling this entity
"""
current_tick: int
current_liquidity: int
in_amount0: int
in_amount1: int
price: Decimal
@dataclass
class UniLpBaseAction(BaseAction):
"""
Parent class of broker actions,
:param base_balance_after: after action balance of base token
:type base_balance_after: UnitDecimal
:param quote_balance_after: after action balance of quote token
:type quote_balance_after: UnitDecimal
"""
base_balance_after: UnitDecimal
quote_balance_after: UnitDecimal
def get_output_str(self):
return str(self)
@dataclass
class AddLiquidityAction(UniLpBaseAction):
"""
Add Liquidity
:param base_amount_max: inputted base token amount, also the max amount to deposit
:type base_amount_max: ActionTypeEnum
:param quote_amount_max: inputted base token amount, also the max amount to deposit
:type quote_amount_max: datetime
:param lower_quote_price: lower price base on quote token.
:type lower_quote_price: UnitDecimal
:param upper_quote_price: upper price base on quote token.
:type upper_quote_price: UnitDecimal
:param base_amount_actual: actual used base token
:type base_amount_actual: UnitDecimal
:param quote_amount_actual: actual used quote token
:type quote_amount_actual: UnitDecimal
:param position: generated position
:type position: PositionInfo
:param liquidity: liquidity added
:type liquidity: int
"""
base_amount_max: UnitDecimal
quote_amount_max: UnitDecimal
lower_quote_price: UnitDecimal
upper_quote_price: UnitDecimal
base_amount_actual: UnitDecimal
quote_amount_actual: UnitDecimal
position: PositionInfo
liquidity: int
def set_type(self):
self.action_type = ActionTypeEnum.uni_lp_add_liquidity
def get_output_str(self) -> str:
"""
get colored and formatted string to output in console
:return: formatted string
:rtype: str
"""
return f"""\033[1;31m{"Add liquidity":<20}\033[0m""" + \
get_formatted_from_dict({
"max amount": f"{self.base_amount_max.to_str()},{self.quote_amount_max.to_str()}",
"price": f"{self.lower_quote_price.to_str()},{self.upper_quote_price.to_str()}",
"position": self.position,
"liquidity": self.liquidity,
"balance": f"{self.base_balance_after.to_str()}(-{self.base_amount_actual.to_str()}), {self.quote_balance_after.to_str()}(-{self.quote_amount_actual.to_str()})"
})
@dataclass
class CollectFeeAction(UniLpBaseAction):
"""
collect fee
:param position: position to operate
:type position: PositionInfo
:param base_amount: fee collected in base token
:type base_amount: UnitDecimal
:param quote_amount: fee collected in quote token
:type quote_amount: UnitDecimal
"""
position: PositionInfo
base_amount: UnitDecimal
quote_amount: UnitDecimal
def set_type(self):
self.action_type = ActionTypeEnum.uni_lp_collect
def get_output_str(self) -> str:
"""
get colored and formatted string to output in console
:return: formatted string
:rtype: str
"""
return f"""\033[1;33m{"Collect fee":<20}\033[0m""" + \
get_formatted_from_dict({
"position": self.position,
"balance": f"{self.base_balance_after.to_str()}(+{self.base_amount.to_str()}), {self.quote_balance_after.to_str()}(+{self.quote_amount.to_str()})"
})
@dataclass
class RemoveLiquidityAction(UniLpBaseAction):
"""
remove position
:param position: position to operate
:type position: PositionInfo
:param base_amount: base token amount collected
:type base_amount: UnitDecimal
:param quote_amount: quote token amount collected
:type quote_amount: UnitDecimal
:param removed_liquidity: liquidity number has removed
:type removed_liquidity: int
:param remain_liquidity: liquidity number left in position
:type remain_liquidity: int
"""
position: PositionInfo
base_amount: UnitDecimal
quote_amount: UnitDecimal
removed_liquidity: int
remain_liquidity: int
def set_type(self):
self.action_type = ActionTypeEnum.uni_lp_remove_liquidity
def get_output_str(self) -> str:
"""
get colored and formatted string to output in console
:return: formatted string
:rtype: str
"""
return f"""\033[1;32m{"Remove liquidity":<20}\033[0m""" + \
get_formatted_from_dict({
"position": self.position,
"balance": f"{self.base_balance_after.to_str()}(+0), {self.quote_balance_after.to_str()}(+0)",
"token_got": f"{self.base_amount.to_str()},{self.quote_amount.to_str()}",
"removed liquidity": self.removed_liquidity,
"remain liquidity": self.remain_liquidity
})
@dataclass
class BuyAction(UniLpBaseAction):
"""
buy token, swap from base token to quote token.
:param amount: amount to buy(in quote token)
:type amount: UnitDecimal
:param price: price,
:type price: UnitDecimal
:param fee: fee paid (in base token)
:type fee: UnitDecimal
:param base_change: base token amount changed
:type base_change: PositionInfo
:param quote_change: quote token amount changed
:type quote_change: UnitDecimal
"""
amount: UnitDecimal
price: UnitDecimal
fee: UnitDecimal
base_change: UnitDecimal
quote_change: UnitDecimal
def set_type(self):
self.action_type = ActionTypeEnum.uni_lp_buy
def get_output_str(self) -> str:
"""
get colored and formatted string to output in console
:return: formatted string
:rtype: str
"""
return f"""\033[1;36m{"Buy":<20}\033[0m""" + \
get_formatted_from_dict({
"price": self.price.to_str(),
"fee": self.fee.to_str(),
"balance": f"{self.base_balance_after.to_str()}(-{self.base_change.to_str()}), {self.quote_balance_after.to_str()}(+{self.quote_change.to_str()})"
})
@dataclass
class SellAction(UniLpBaseAction):
"""
sell token, swap from quote token to base token.
:param amount: amount to sell(in quote token)
:type amount: UnitDecimal
:param price: price,
:type price: UnitDecimal
:param fee: fee paid (in quote token)
:type fee: UnitDecimal
:param base_change: base token amount changed
:type base_change: PositionInfo
:param quote_change: quote token amount changed
:type quote_change: UnitDecimal
"""
amount: UnitDecimal
price: UnitDecimal
fee: UnitDecimal
base_change: UnitDecimal
quote_change: UnitDecimal
def set_type(self):
self.action_type = ActionTypeEnum.uni_lp_sell
def get_output_str(self):
return f"""\033[1;37m{"Sell":<20}\033[0m""" + \
get_formatted_from_dict({
"price": self.price.to_str(),
"fee": self.fee.to_str(),
"balance": f"{self.base_balance_after.to_str()}(+{self.base_change.to_str()}), {self.quote_balance_after.to_str()}(-{self.quote_change.to_str()})"
}) | zelos-demeter | /zelos-demeter-0.2.1.tar.gz/zelos-demeter-0.2.1/demeter/uniswap/_typing.py | _typing.py |

[](https://dev.azure.com/kevin0853/zelos/_build/latest?definitionId=1&branchName=master)
[](https://codecov.io/gh/zeropointdynamics/zelos)
[](https://zelos.readthedocs.io/en/latest/?badge=latest)

[](https://www.gnu.org/licenses/agpl-3.0)
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
# Zelos
Zelos (**Z**eropoint **E**mulated **L**ightweight **O**perating **S**ystem) is a python-based binary emulation platform. One use of zelos is to quickly assess the dynamic behavior of binaries via command-line or python scripts. All syscalls are emulated to isolate the target binary. Linux x86_64 (32- and 64-bit), ARM and MIPS binaries are supported. [Unicorn](https://github.com/unicorn-engine/unicorn) provides CPU emulation.

[Full documentation](https://zelos.readthedocs.io/en/latest/index.html) is available [here](https://zelos.readthedocs.io/en/latest/index.html).
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install zelos.
```bash
pip install zelos
```
## Basic Usage
### Command-line
To emulate a binary with default options:
```console
$ zelos my_binary
```
To view the instructions that are being executed, add the `--inst` flag:
```console
$ zelos --inst my_binary
```
You can print only the first time each instruction is executed, rather than *every* execution, using `--fasttrace`:
```console
$ zelos --inst --fasttrace my_binary
```
By default, syscalls are emitted on stdout. To write syscalls to a file instead, use the `--trace_file` flag:
```console
$ zelos --trace_file path/to/file my_binary
```
Specify any command line arguments after the binary name:
```console
$ zelos my_binary arg1 arg2
```
### Programmatic
```python
import zelos
z = zelos.Zelos("my_binary")
z.start(timeout=3)
```
## Plugins
Zelos supports first- and third-party [plugins](https://zelos.readthedocs.io/en/latest/tutorials/04_writing_plugins.html). Some notable plugins thus far:
- [crashd](https://github.com/zeropointdynamics/zelos-crashd) crash analyzer combining execution trace, dataflow and memory sanitization.
- [overlay (ida plugin)](https://zelos.readthedocs.io/en/latest/tutorials/06_snapshot_overlay.html): highlights `zelos` execution trace in IDA with instruction-level comments added.
- [angr integration](https://github.com/zeropointdynamics/angr-zelos-target): enables symbolic execution in `zelos`.
- [zdbserver](https://github.com/zeropointdynamics/zelos/tree/master/src/zelos/tools/zdbserver): remote control and debugging of emulated binaries.
- [syscall limiter](https://zelos.readthedocs.io/en/latest/tutorials/05_syscall_limit_plugin.html): demonstrates event hooking and provides syscall-based execution and termination options.
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate.
### Local Development Environment
First, create a new python virtual environment. This will ensure no package version conflicts arise:
```console
$ python3 -m venv ~/.venv/zelos
$ source ~/.venv/zelos/bin/activate
```
Now clone the repository and change into the `zelos` directory:
```console
(zelos) $ git clone [email protected]:zeropointdynamics/zelos.git
(zelos) $ cd zelos
```
Install an *editable* version of zelos into the virtual environment. This makes `import zelos` available, and any local changes to zelos will be effective immediately:
```console
(zelos) $ pip install -e '.[dev]'
```
At this point, tests should pass and documentation should build:
```console
(zelos) $ pytest
(zelos) $ cd docs
(zelos) $ make html
```
Built documentation is found in ``docs/_build/html/``.
Install zelos pre-commit hooks to ensure code style compliance:
```console
(zelos) $ pre-commit install
```
In addition to automatically running every commit, you can run them anytime with:
```console
(zelos) $ pre-commit run --all-files
```
#### Windows Development:
Commands vary slightly on Windows:
```console
C:\> python3 -m venv zelos_venv
C:\> zelos_venv\Scripts\activate.bat
(zelos) C:\> pip install -e .[dev]
```
## License
[AGPL v3](https://www.gnu.org/licenses/agpl-3.0.en.html)
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/README.md | README.md |
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Version 0.2.0] - 2020-08-04
### Added
- Plugins: Yarascan
- Introduction of Zelos Manipulation Language (ZML), used for specifying events on the command line and in scripts. New zml_hook function in api
- Ability to redirect input to stdin
- Hooks for internal memory reads, writes, and maps
- Linked to crashd plugin, containing separate plugins for heap memory guards, static analysis via IDA Pro, and dataflow using QEMU TCG
### Changed
- Moved to different command line flags for specifying what degree of information (instructions or syscalls) is printed while running
- Better support for lists in command line arguments
- Flags can be passed to the emulated program via the command line
- Misc. bug fixes (thanks to seth1002)
- General improvements to syscalls
### Removed
- Verbosity command line flag (now handled via other flags)
## [Version 0.1.0] - 2020-05-29
### Added
- Plugins: IDA overlays, remote debug server
- Additional plugin APIs
### Changed
- Minor syscall emulation improvements
- Memory management overhaul
### Removed
- N/A
## [Version 0.0.1] - 2020-03-03
### Added
- N/A
### Changed
- Updated documentation
### Removed
- N/A
## [Version 0.0.0] - 2020-03-02
Initial public release.
### Added
- Initial open source commit.
### Changed
- N/A
### Removed
- N/A
[0.0.0]: https://github.com/zeropointdynamics/zelos/releases/tag/v0.0.0
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/CHANGELOG.md | CHANGELOG.md |
Zelos Documentation
=================================
.. toctree::
:maxdepth: 2
./README.md
.. toctree::
:caption: Tutorials
:maxdepth: 1
tutorials/01_cmdline
tutorials/02_scripting
tutorials/03_using_hooks
tutorials/04_writing_plugins
tutorials/05_syscall_limit_plugin
tutorials/06_snapshot_overlay
tutorials/07_zml_and_feeds
.. toctree::
:caption: Script API
:maxdepth: 1
api/zelos.api
args/args
.. toctree::
:caption: Internal Package Docs
:maxdepth: 1
api/zelos
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/index.rst | index.rst |
# 01 - Command Line Use
To emulate a binary with default options:
```console
$ zelos my_binary
```
To emulate a binary and view the instructions being executed, add the `-v` flag:
```console
$ zelos -v my_binary
```
To print only the *first* time an instruction is executed, rather than *every* instruction, using the `--fasttrace` flag:
```console
$ zelos -v --fasttrace my_binary
```
To write output to a file use the `--trace_file` flag:
```console
$ zelos --trace_file /path/to/file my_binary
```
To provide command line arguments to the emulated binary, specify them after the binary name:
```console
$ zelos my_binary arg1 arg2
```
To log various Zelos-related debug information, you can specify log level with flag `--log` and specify one of the options from 'info', 'verbose', 'debug', 'spam', 'notice', 'warning', 'success', 'error', or 'fatal'. The default options is 'info'.
```console
$ zelos --log debug my_binary
```
To specify a timeout in seconds, after which emulation will stop, use the flag `-t`:
```console
$ zelos -t 10 my_binary
```
To specify a memory limit in mb, after which an exception is thrown an emulation will stop, use the flag `m`:
```console
$ zelos -m 1024 my_binary
```
To specify a virtual filename, the name that will be used for the binary during emulation, use the `--virtual_filename` flag:
```console
$ zelos --virtual_filename virtualname my_binary
```
To specify a virtual file path, the path that will be used for the binary during emulation, use the `--virtual_path` flag:
```console
$ zelos --virtual_path /home/admin/ my_binary
```
To specify environment variables to use during emulation, use the `--env_vars` (`-ev`) flag. This can be specified multiple times to set multiple environment variables:
```console
$ zelos --env_vars FOO=bar --env_vars LOREM=ipsum my_binary
```
To specify the date in YYYY-MM-DD format, use the `--date` flag. This is primarily used when emulating date-related system calls such as __time__ and __gettimeofday__.
```console
$ zelos --date 2020-03-04 my_binary
```
To see an example of the above, you can use zelos to emulate the `date` GNU coreutil. This is included on most linux systems at `/bin/date`. The source code for `date` is available [here](https://github.com/coreutils/coreutils/blob/master/src/date.c).
```
$ zelos --date 2020-03-04 /bin/date
```
To mount a specified file or path into the emulated filesystem, use the `--mount` flag. The format is `--mount ARCH,DEST,SRC`. `ARCH` is one of `x86`, `x86-64`, `arm`, or `mips`. `DEST` is the emulated path to mount the specified `SRC`. `SRC` is the absolute host path to the file or path to mount.
```
$ zelos --mount x86,/path/to/dest,/path/to/src my_binary
```
To specify a directory to use as the rootfs directory during emulation of a linux system, use `--linux_rootfs` flag. The format is `--linux_rootfs ARCH,PATH`. `ARCH` is one of `x86`, `x86-64`, `arm`, or `mips`. `PATH` is the absolute host path to the directory to be used as rootfs. For example, if you were running Zelos on a linux host machine, and you wanted to use your own root filesystem as the emulated rootfs, you would do the following:
```console
$ zelos --linux_rootfs x86,/ my_binary
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/01_cmdline.md | 01_cmdline.md |
# 05 - Syscall Limiter Plugin
This tutorial explains how the syscall-limiter plugin was written and how it works.
The source code for this plugin can be fond in [src/zelos/ext/plugins/syscall_limiter.py](https://github.com/zeropointdynamics/zelos/blob/master/src/zelos/ext/plugins/syscall_limiter.py).
## Overview
The Syscall Limiter Plugin provides the following additional functionalities for Zelos:
* Stop Zelos emulation after a specified number of syscalls have been executed across all threads.
* Stop a thread after specified number of syscalls have been executed on a thread.
* Swap threads after a specified number of syscalls have been executed on a thread.
## Create the Command Line Options
```eval_rst
As mentioned in the previous tutorial, we create three :py:class:`zelos.CommandLineOption` to be able to specify the number of syscalls we want to limit overall, the number of syscalls per thread we want to limit, and the number of syscalls before swapping at run time.
```
```python
from zelos import CommandLineOption
CommandLineOption(
"syscall_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_swap",
type=int,
default=100,
)
```
## Initializing the Plugin
```eval_rst
We create the plugin by creating a class that subclasses :py:class:`zelos.IPlugin`. We initialize by invoking the superclass init function through :code:`super().__init__(z)` in the SyscallLimiter's :code:`__init__` function.
```
```python
from zelos import CommandLineOption, IPlugin
CommandLineOption(
"syscall_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_swap",
type=int,
default=100,
)
class SyscallLimiter(IPlugin):
def __init__(self, z):
super().__init__(z)
pass
```
## Implementing the Syscall Hook
```eval_rst
In order to implement the desired behavior of SyscallLimiter, we create a syscall hook using the :py:meth:`~zelos.Zelos.hook_syscalls` function. As noted in the previous tutorial, we can access our command line options through :py:class:`zelos.Zelos`'s :code:`config` field. Additionally, we create a callback function that keeps track of the number of syscalls executed overall and per thread.
```
```python
from collections import defaultdict
from zelos import CommandLineOption, IPlugin, HookType
CommandLineOption(
"syscall_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_limit",
type=int,
default=0,
)
CommandLineOption(
"syscall_thread_swap",
type=int,
default=100,
)
class SyscallLimiter(IPlugin):
def __init__(self, z):
super().__init__(z)
# If we specify any of the above commandline options,
# then create a syscall hook
if (
z.config.syscall_limit > 0
or z.config.syscall_thread_limit > 0
or z.config.syscall_thread_swap > 0
):
self.zelos.hook_syscalls(
HookType.SYSCALL.AFTER, self._syscall_callback
)
# Fields to keep track of syscalls executed
self.syscall_cnt = 0
self.syscall_thread_cnt = defaultdict(int)
def _syscall_callback(self, p, sysname, args, retval):
if self.zelos.thread is None:
return
# Get the name of the current thread
thread_name = self.zelos.thread.name
self.syscall_cnt += 1
self.syscall_thread_cnt[thread_name] += 1
```
## Limiting Syscalls Overall
```eval_rst
To stop after a specified number of syscalls have been executed, we use the :py:meth:`~zelos.Zelos.hook_syscalls` function.
```
```python
def _syscall_callback(self, p, sysname, args, retval):
if self.zelos.thread is None:
return
# Get the name of the current thread
thread_name = self.zelos.thread.name
self.syscall_cnt += 1
self.syscall_thread_cnt[thread_name] += 1
# End execution if syscall limit reached
if (
self.zelos.config.syscall_limit > 0
and self.syscall_cnt >= self.zelos.config.syscall_limit
):
self.zelos.stop("syscall limit")
return
```
## Limiting Syscalls Per Thread
```eval_rst
To stop & complete a thread after specified number of syscalls have been executed on it, we use the :py:meth:`~zelos.Zelos.end_thread` function.
```
```python
def _syscall_callback(self, p, sysname, args, retval):
if self.zelos.thread is None:
return
# Get the name of the current thread
thread_name = self.zelos.thread.name
self.syscall_cnt += 1
self.syscall_thread_cnt[thread_name] += 1
# End execution if syscall limit reached
if (
self.zelos.config.syscall_limit > 0
and self.syscall_cnt >= self.zelos.config.syscall_limit
):
self.zelos.stop("syscall limit")
return
# End thread if syscall thread limit reached
if (
self.zelos.config.syscall_thread_limit != 0
and self.syscall_thread_cnt[thread_name]
% self.zelos.config.syscall_thread_limit
== 0
):
self.zelos.end_thread()
return
```
## Swapping Threads
```eval_rst
To force a thread swap to occur after specified number of syscalls have been executed on it, we use the :py:meth:`~zelos.Zelos.swap_thread` function.
```
```python
def _syscall_callback(self, p, sysname, args, retval):
if self.zelos.thread is None:
return
# Get the name of the current thread
thread_name = self.zelos.thread.name
self.syscall_cnt += 1
self.syscall_thread_cnt[thread_name] += 1
# End execution if syscall limit reached
if (
self.zelos.config.syscall_limit > 0
and self.syscall_cnt >= self.zelos.config.syscall_limit
):
self.zelos.stop("syscall limit")
return
# End thread if syscall thread limit reached
if (
self.zelos.config.syscall_thread_limit != 0
and self.syscall_thread_cnt[thread_name]
% self.zelos.config.syscall_thread_limit
== 0
):
self.zelos.end_thread()
return
# Swap threads if syscall thread swap limit reached
if (
self.zelos.config.syscall_thread_swap > 0
and self.syscall_cnt % self.zelos.config.syscall_thread_swap == 0
):
self.zelos.swap_thread("syscall limit thread swap")
return
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/05_syscall_limit_plugin.md | 05_syscall_limit_plugin.md |
# 02 - Scripting with Zelos
This tutorial demonstrates how Zelos can be used as a library in scripts to
dynamically change behavior at runtime.
## Hello Zelos
Files and scripts from this example are available in the [examples/hello](https://github.com/zeropointdynamics/zelos/tree/master/examples/hello) directory.
Consider the following example binary:
```
$ ./hello.bin
Hello, Zelos!
```
To emulate this binary with Zelos:
```python
from zelos import Zelos
z = Zelos("hello.bin")
z.start()
```
Which will produce the following output:
```
[main] [SYSCALL] brk ( addr=0x0 ) -> 90000038
[main] [SYSCALL] brk ( addr=0x90001238 ) -> 90001238
[main] [SYSCALL] arch_prctl ( option=0x1002 (ARCH_SET_FS), addr=0x90000900 ) -> 0
[main] [SYSCALL] uname ( buf=0xff08eae0 ) -> 0
[main] [SYSCALL] readlink ( pathname=0x57ee83 ("/proc/self/exe"), buf=0xff08dc10, bufsiz=0x1000 ) -> 31
[main] [SYSCALL] brk ( addr=0x90022238 ) -> 90022238
[main] [SYSCALL] brk ( addr=0x90023000 ) -> 90023000
[main] [SYSCALL] access ( pathname=0x57ea5a ("/etc/ld.so.nohwcap"), mode=0x0 ) -> -1
[main] [SYSCALL] fstat ( fd=0x1 (stdout), statbuf=0xff08ea50 ) -> 0
IOCTL: 0
[main] [SYSCALL] ioctl ( fd=0x1 (stdout), request=0x5401, data=0xff08e9b0 ) -> -1
[StdOut]: 'bytearray(b'Hello, Zelos!\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x900132d0 ("Hello, Zelos!\n"), count=0xe ) -> e
16:36:17:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
## Scripting Tutorial - Bypass
The source code and test program for this tutorial can be found in the [examples/script_bypass](https://github.com/zeropointdynamics/zelos/tree/master/examples/script_bypass) directory.
Consider the following example binary:
```sh
$ ./password_check.bin
What's the password?
password
Incorrect
$ ./password_check.bin
What's the password
0point
Correct!
```
The above binary prompts the user for a password from stdin. Upon
entry of the correct password, the program will output "Correct!" to
stdout and exit. Upon entry of an incorrect password, however, the
program will output "Incorrect" to stdout.
Our objective is to bypass the password check, such that
any password can be entered and the program will always print "Correct!"
to stdout. For this tutorial we will accomplish this in three different ways,
by dynamically writing directly to memory, setting registers, and patching code.
For each of these, we start with a boilerplate script that loads the binary
and emulates normal behavior:
```python
from zelos import Zelos
def main():
z = Zelos("password_check.bin", inst=True)
z.start()
if __name__ == "__main__":
main()
```
We can examine the output of the above script to locate where the string
comparison and subsequent check for equality actually occurs:
```
...
[main] [INS] [004017c0] <_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_S_compareEmm>
[main] [INS] [004017c0] push rbp ; push(0xff08ebf0) -> ff08ec70
[main] [INS] [004017c1] mov rbp, rsp ; rbp = 0xff08eb80 -> ff08ebf0
[main] [INS] [004017c4] mov qword ptr [rbp - 0x10], rdi ; store(0xff08eb70,0x0)
[main] [INS] [004017c8] mov qword ptr [rbp - 0x18], rsi ; store(0xff08eb68,0x6)
[main] [INS] [004017cc] mov rsi, qword ptr [rbp - 0x10] ; rsi = 0x0
[main] [INS] [004017d0] sub rsi, qword ptr [rbp - 0x18] ; rsi = 0xfffffffffffffffa
[main] [INS] [004017d4] mov qword ptr [rbp - 0x20], rsi ; store(0xff08eb60,0xfffffffffffffffa)
[main] [INS] [004017d8] cmp qword ptr [rbp - 0x20], 0x7fffffff ; 0xfffffffffffffffa vs 0x7fffffff
[main] [INS] [004017e0] jle 0x4017f2
[main] [INS] [004017f2] cmp qword ptr [rbp - 0x20], -0x80000000 ; 0xfffffffffffffffa vs 0x-80000000
[main] [INS] [004017fa] jge 0x40180c
[main] [INS] [0040180c] mov rax, qword ptr [rbp - 0x20] ; rax = 0xfffffffffffffffa
[main] [INS] [00401810] mov ecx, eax ; ecx = 0xfffffffa
[main] [INS] [00401812] mov dword ptr [rbp - 4], ecx ; store(0xff08eb7c,0xfffffffa)
[main] [INS] [00401815] mov eax, dword ptr [rbp - 4] ; eax = 0xfffffffa
[main] [INS] [00401818] pop rbp ; rbp = 0xff08ebf0 -> ff08ec70
[main] [INS] [00401819] ret
[main] [INS] [004012a7] mov dword ptr [rbp - 0x2c], eax ; store(0xff08ebc4,0xfffffffa)
[main] [INS] [004012aa] mov eax, dword ptr [rbp - 0x2c] ; eax = 0xfffffffa
[main] [INS] [004012ad] add rsp, 0x60 ; rsp = 0xff08ebf0 -> ff08ec70
[main] [INS] [004012b1] pop rbp ; rbp = 0xff08ec70 -> 49cfa0
[main] [INS] [004012b2] ret
[main] [INS] [00401079] mov dword ptr [rbp - 0x38], eax ; store(0xff08ec38,0xfffffffa)
[main] [INS] [0040107c] cmp dword ptr [rbp - 0x38], 0
[main] [INS] [00401080] jne 0x4010d7
...
```
### Method 1 - Writing Memory
We can see from the above output that the result of comparison is
initially contained in `eax` before being moved to the memory location
at `[rbp - 0x38]` after the last `ret`. This value in memory is then
used in the subequent `cmp` instruction to determine equality. In the
above output, the `jne` instruction is what determines whether the
program will execute code that prints "Correct!" vs "Incorrect". If the
jump is taken, the program will print "Incorrect".
To bypass this, we can ensure that this jump is never taken by writing
`0x0` to the memory location that is used as the first operand of the
`cmp` instruction.
```python
def patch_mem():
z = Zelos("password_check.bin", inst=True)
# The address cmp instr observed above
target_address = 0x0040107C
# run to the address of cmp and break
z.set_breakpoint(target_address, True)
z.start()
# Execution is now STOPPED at address 0x0040107C
# Write 0x0 to address [rbp - 0x38]
z.memory.write_int(z.regs.rbp - 0x38, 0x0)
# resume execution
z.start()
if __name__ == "__main__":
patch_mem()
```
To check our script, we can see that the last four lines of the output are:
```
...
[StdOut]: 'bytearray(b'Correct!\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x900132d0 ("Correct!\n"), count=0x9 ) -> 9
11:32:11:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
### Method 2 - Setting Registers
We noted in method 1 that the result of comparison is initially contained in `eax` before being moved to the memory location at `[rbp - 0x38]` after the last `ret`. Therefore,
we can accomplish the same behavior as method 1 by setting `eax` to `0x0` before
it is used.
```python
def patch_reg():
z = Zelos("password_check.bin", inst=True)
# The address of the first time eax is used above
target_address = 0x00401810
# run to the address of cmp and break
z.set_breakpoint(target_address, True)
z.start()
# Execution is now STOPPED at address 0x00401810
# Set eax to 0x0
z.eax = 0x0
# Resume execution
z.start()
if __name__ == "__main__":
patch_reg()
```
Again, to check our script, we can see that the last four lines of the output are:
```
...
[StdOut]: 'bytearray(b'Correct!\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x900132d0 ("Correct!\n"), count=0x9 ) -> 9
12:08:38:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
### Method 3 - Patching Code
An alternative approach to methods 1 & 2 is to ensure that the final
jump is never taken by replacing the `cmp` that immediately precedes the
final `jne`. In the following script, this is accomplished by replacing
`cmp dword ptr [rbp - 0x38], 0` with `cmp eax, eax`, which ensures that
the compared values never differ and the jump is never taken.
We make use of the keystone assembler to encode our replacement code, which
also includes two NOP instructions since we are replacing a 4 byte instruction.
```python
def patch_code():
z = Zelos("password_check.bin", inst=True)
# The address of the cmp instr
target_address = 0x0040107C
# run to the address of cmp and break
z.set_breakpoint(target_address, True)
z.start()
# Execution is now STOPPED at address 0x0040107C
# Code we want to insert
code = b"NOP; NOP; CMP eax, eax"
# Assemble with keystone
ks = Ks(KS_ARCH_X86, KS_MODE_64)
encoding, count = ks.asm(code)
# replace the four bytes at this location with our code
for i in range(len(encoding)):
z.memory.write_uint8(target_address + i, encoding[i])
# resume execution
z.start()
if __name__ == "__main__":
patch_code()
```
Yet again, to check our script, we can see that the last four lines of the output are:
```
...
[StdOut]: 'bytearray(b'Correct!\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x900132d0 ("Correct!\n"), count=0x9 ) -> 9
12:12:26:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
## Scripting Tutorial - Brute
The source code and test program for this tutorial can be found at
[examples/script_brute](https://github.com/zeropointdynamics/zelos/tree/master/examples/script_brute)
This example demonstrates some more of the dynamic capabilities of zelos. Consider the following example binary:
```sh
$ ./password.bin
What's the password?
password
Incorrect
$ ./password.bin
What's the password
0point
Correct!
```
The above binary prompts the user for a password from stdin. Upon
entry of the correct password, the program will output "Correct!" to
stdout and exit. Upon entry of an incorrect password, however, the
program will output "Incorrect" to stdout and then sleep for 10 seconds
before exiting.
Let's say that our objective is to dynamically brute force this password,
but we don't have time to wait 10 seconds between every failure. Our
goal is to focus only on the part of the program that checks user input,
namely the `strcmp` function.
We start with a script that loads the binary and emulates normal behavior:
```python
from zelos import Zelos
def brute():
z = Zelos("password.bin", inst=True)
# Start execution
z.start()
if __name__ == "__main__":
brute()
```
We can examine the output of the above to locate where the `strcmp`
function is invoked. Here we can see the `call` to `strcmp` is invoked at
address `0x00400bb6`. Additionally, the `rsi` and `rdi` registers appear to point to the strings being compared.
```
...
[main] [INS] [00400bac] lea rsi, [rip + 0xab349] ; rsi = 0x4abefc -> "0point"
[main] [INS] [00400bb3] mov rdi, rax ; rdi = 0xff08ec00 -> 0
[main] [INS] [00400bb6] call 0x4004b0 ; call(0x4004b0)
[main] [INS] [004004b0] jmp qword ptr [rip + 0x2d3be2] ; jmp(0x425df0)
[main] [INS] [00425df0] <__strcmp_ssse3>
[main] [INS] [00425df0] mov ecx, esi ; ecx = 0x4abefc -> "0point"
[main] [INS] [00425df2] mov eax, edi ; eax = 0xff08ec00 -> 0
...
```
Ignoring for a moment the fact that Zelos annotates pointers with the data at their location, let's modify our script to stop at the address of the call to `strcmp` and save the contents of the `rsi` & `rdi` registers. Let's also take the opportunity
to guess the password by writing a string to the address in `rdi`.
```python
from zelos import Zelos
def brute():
z = Zelos("password.bin", inst=True)
# The address of strcmp observed above
strcmp_address = 0x00400BB6
# run to the address of cmp and break
z.set_breakpoint(strcmp_address, True)
z.start()
# Execution is now STOPPED at address 0x00400BB6
# get initial reg values of rdi & rsi before strcmp is called
rdi = z.regs.rdi # user input
rsi = z.regs.rsi # 'real' password
# Write the string "our best guess" to address in rdi
z.memory.write_string(rdi, "our best guess")
# Resume execution
z.start()
if __name__ == "__main__":
brute()
```
At this point, we can inspect the output of the above modified script to
see that we successfully wrote the string "_our best guess_" to memory,
but unfortunately (and unsurprisingly) it was not correct.
We can see that zelos has annotated register `edi` with the first 8
characters ("our best") of the string at the address pointed to. We can
also see the stdout output indicating that our guess was incorrect.
```
...
[main] [INS] [00425df0] <__strcmp_ssse3>
[main] [INS] [00425df0] mov ecx, esi ; ecx = 0x4abefc -> "0point"
[main] [INS] [00425df2] mov eax, edi ; eax = 0xff08ec00 -> "our best"
...
[StdOut]: 'bytearray(b"What\'s the password?\nIncorrect\n")'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x90001690 ("What's the password?\nIncorrect\n"), count=0x1f ) -> 1f
...
```
Now we are prepared to add the actual 'brute-force' to this script.
For this, we will need to know where the check occurs that causes
behavior to diverge when inputting a correct vs incorrect password.
This appears to occur in a `test` instruction immediately after the
`strcmp` function returns, at address `0x400bbb`.
```
...
[main] [INS] [0042702c] sub eax, ecx
[main] [INS] [0042702e] ret
[main] [INS] [00400bbb] test eax, eax
[main] [INS] [00400bbd] jne 0x400bcd
...
```
We will 'brute-force' by repeatedly writing our guess to memory, letting execution
run until we reach the above `test` instruction, inspect the flag `zf` set as a result
of the `test`, and reset `IP` & `rsi` & `rdi` back to the call to `strcmp` if `zf` indicates that strings differ.
```python
from zelos import Zelos
def brute():
z = Zelos("password.bin", inst=True)
# The address of strcmp observed above
strcmp_address = 0x00400BB6
# run to the address of cmp and break
z.set_breakpoint(strcmp_address, True)
z.start()
# Execution is now STOPPED at address 0x00400BB6
# get initial reg values of rdi & rsi before strcmp is called
rdi = z.regs.rdi # user input
rsi = z.regs.rsi # 'real' password
# 'brute force' the correct string
for i in range(9, -1, -1):
# write our bruteforced guess to memory
z.memory.write_string(rdi, str(i) + "point")
# Address of the test instr
test_address = 0x00400BBB
# run to the address of cmp and break
z.set_breakpoint(test_address, True)
z.start()
# execute one step, in this case the test instr
z.step()
# check the zf bit for result of test
flags = z.regs.flags
zf = (flags & 0x40) >> 6
if zf == 1:
# if correct, run to completion
z.start()
return
# otherwise, reset ip to strcmp func & set regs
z.regs.setIP(strcmp_address)
z.regs.rdi = rdi
z.regs.rsi = rsi
if __name__ == "__main__":
brute()
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/02_scripting.md | 02_scripting.md |
# 06 - Exporting An Overlay & IDA Pro Plugin
This tutorial explains how to use zelos to export instruction overlays and import them into IDA Pro using the zelos IDA plugin.
## Overview
The Overlay Plugin provides the ability to export runtime information from zelos, which can include:
* Contents of all memory regions
* Instruction-level comments & runtime values for all executed instructions
* Function information for all executed functions
These overlays are intended to be used for integrating the results of zelos emulation into other tools. To demonstrate this using the zelos IDA plugin, described below, overlays can be imported into IDA Pro to improve static analysis by highlighting the runtime behavior of the emulated binary. The IDA plugin will recolor the IDA graph view to show the execution paths observed during zelos dynamic analysis, as well as annotate traced instructions with their corresponding runtime values. This is helpful for honing in on the most important areas of an executable during static analysis.

### Using the IDA Plugin
First, we are going to use zelos to emulate an executable and generate an overlay with comments. The executable that we are emulating is a basic "hello world", statically-compiled, ELF binary which can be found in the repo [here](https://github.com/zeropointdynamics/zelos/blob/master/tests/data/static_elf_helloworld).
```console
python -m zelos static_elf_helloworld --export_trace --inst --fasttrace
```
After generating an overlay (with comments), go ahead and open `static_elf_helloworld` in IDA Pro for disassembly. Wait for IDA to load the executable and the finish the initial autoanalysis. Once this completes, assuming you installed the zelos IDA plugin correctly, you should be able to see a View menu option that says "Load Zelos Overlay...". Click on this, and when prompted, navigate to and select the `static_elf_helloworld.zmu` overlay file that we generated above.

After a moment, you will notice that the IDA View has been updated. The yellow highlighting indicates an operation that has been emulated in zelos, and the updated comment string at each address describes the value of the emulated operands.
### Getting the IDA Plugin
After generating an overlay with instruction-level comments, it can be imported into IDA Pro using the zelos IDA plugin (which you can get [here](https://raw.githubusercontent.com/zeropointdynamics/zelos/master/src/zelos/ext/plugins/overlay/zelos_ida.py)).
#### Installing the IDA Plugin
The plugin source can be viewed and manually downloaded from [here](https://raw.githubusercontent.com/zeropointdynamics/zelos/master/src/zelos/ext/plugins/overlay/zelos_ida.py). To install, save this file into your IDA Pro install location's `plugins/` directory. On windows, this will typically be something like `C:\Program Files\IDA 7.0\plugins\`. On linux, this will typically be `<ida_install_dir>/plugins/`.
If you would instead prefer a script, if you are using windows, __modify the following powershell command for your IDA install location__:
```console
wget "https://raw.githubusercontent.com/zeropointdynamics/zelos/master/src/zelos/ext/plugins/overlay/zelos_ida.py" -outfile "C:\Program Files\IDA 7.0\plugins\zelos_ida.py"
```
If you are using linux, __modify the following bash command for your IDA install location__:
```console
wget https://raw.githubusercontent.com/zeropointdynamics/zelos/master/src/zelos/ext/plugins/overlay/zelos_ida.py -O <ida_install_dir>/plugins/zelos_ida.py
```
## Creating Instruction Overlays
Instruction overlays can be created using zelos from either the command line or in a script.
### Command Line Use
An instruction overlay can be generated by using the `--export_trace` flag on the command line in addition to the flag `--inst` to enable _verbose_ mode. The following command will emulate the executable `my_binary` normally, and upon completion will write an overlay to the file `my_binary.zmu`.
```console
$ zelos my_binary --export_trace --inst
```
For an additional speedup, the `--fasttrace` flag can be included in addition to `--inst`, which restricts verbose comment generation to only the first time an address is executed.
```console
$ zelos my_binary --export_trace --inst --fasttrace
```
### Script Use
An overlay can also be generated when using zelos as a library in scripts by interacting with the `overlay` plugin directly. The following shows an example of creating an overlay dynamically in a script.
```python
# Generating an overlay by invoking the overlay
# plugin directly.
from zelos import Zelos
z = Zelos(
"/path/to/my_binary",
inst=True, # include instruction-level comments
fasttrace=True,
export_trace=True,
)
z.start()
# After emulation finishes
# Open a new file for creating an overlay
with open("overlay.zmu", "w") as f:
# Export an overlay
z.plugins.overlay.export(f)
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/06_snapshot_overlay.md | 06_snapshot_overlay.md |
# 04 - Creating Plugins
This tutorial demonstrates how the in-memory string finding script from the previous tutorial can be adapted to a plugin.
Files and scripts from this tutorial are available in the [examples/inmemory_strings](https://github.com/zeropointdynamics/zelos/blob/master/examples/inmemory_strings) directory.
## Plugin Overview
Plugins are ways that make sharing additional functionalities for Zelos even easier. Plugins can be used to
* Modify how Zelos executes
* Provide additional output from zelos
* Extend Zelos's capabilities
In order for Zelos to find plugins, the python module containing the plugin must be located in a path specified by the `ZELOS_PLUGIN_DIR` environment variable.
## Building a Minimal Plugin
```eval_rst
Zelos identifies plugins as objects that subclass the :py:class:`zelos.IPlugin` class.
```
```python
from zelos import IPlugin
class MinimalPlugin(IPlugin):
pass
```
If we include this in a file `/home/kevin/zelos_plugins/MinimalPlugin.py`, let's just set our environment up appropriately before running zelos with our plugin!
```
$ ZELOS_PLUGIN_DIR=$ZELOS_PLUGIN_DIR,`/home/kevin/zelos_plugins`
$ zelos target_binary
Plugins: runner, minimalplugin
...
```
```eval_rst
Unfortunately, our plugin doesn't do much at the moment. We can add some functionality, but first we should have a way to turn our plugin on and off from the command line. This prevents plugins from running costly operations or printing extraneous output when they aren't being used. The easiest way to do this is by specifying a :py:class:`zelos.CommandLineOption` to add flags to the zelos command line tool. The arguments for creating a :py:class:`zelos.CommandLineOption` are identical to the python :code:`argparse` library's `add_argument() <https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument>`_ function.
The ideal time to activate the plugin is when the plugin is initialized by Zelos through the :code:`__init__` function. You can add your own initialization code by creating an :code:`__init__` which takes :py:class:`zelos.Zelos` as an input. Remember to begin with a call to the parent :code:`__init__` function.
```
```python
from zelos import IPlugin, Zelos
class MinimalPlugin(IPlugin):
def __init__(self, z:Zelos):
super.__init__(z)
print("Minimal plugin is created.")
```
```eval_rst
Now, we add the :py:class:`zelos.CommandLineOption` to change behavior at run time. The option can then be accessed using :py:class:`zelos.Zelos`'s :code:`config` field.
```
```python
from zelos import IPlugin, CommandLineOption
CommandLineOption('activate_minimal_plugin', action='store_true')
class MinimalPlugin(IPlugin):
def __init__(self, z):
super.__init__(z)
print("Minimal plugin is created.")
if z.config.activate_minimal_plugin:
print("Minimal plugin has been activated!")
```
Now we can change the behavior of zelos using our `MinimalPlugin`!
```
$ zelos target_binary
Minimal plugin is created.
...
$ zelos --activate_minimal_plugin target_binary
Minimal plugin is created.
Minimal plugin has been activated!
...
```
Now to do something a bit more complicated.
## Creating the In-Memory Strings Plugin.
The script from [the previous tutorial](03_using_hooks.md) can be converted into a plugin so that we can easily use it in the future.
The following plugin showing how to collect in-memory strings can be found at [examples/inmemory_strings/strings_plugin.py](https://github.com/zeropointdynamics/zelos/blob/master/examples/inmemory_strings/strings_plugin.py). To invoke the plugin, run `zelos --print_strings 4 target_binary`.
```python
from zelos import CommandLineOption, Zelos, HookType, IPlugin
CommandLineOption(
"print_strings",
type=int,
default=None,
help="The minimum size of string to identify",
)
class StringCollectorPlugin(IPlugin):
def __init__(self, z: Zelos):
super().__init__(z)
if z.config.print_strings:
z.hook_memory(
HookType.MEMORY.WRITE,
self.collect_writes,
name="strings_syscall_hook",
)
self._min_len = z.config.print_strings
self._current_string = ""
self._next_addr = 0
def collect_writes(self, zelos, access, address, size, value):
"""
Collects strings that are written to memory. Intended to be used
as a callback in a Zelos HookType.MEMORY hook.
"""
data = zelos.memory.pack(value)
try:
decoded_data = data.decode()
except UnicodeDecodeError:
self._next_addr = 0
self._end_current_string()
return
decoded_data = decoded_data[:size]
first_null_byte = decoded_data.find("\x00")
if first_null_byte != -1:
decoded_data = decoded_data[:first_null_byte]
self._current_string += decoded_data
self._next_addr = 0
self._end_current_string()
return
if address != self._next_addr:
self._end_current_string()
self._next_addr = address + size
self._current_string += decoded_data
return
def _end_current_string(self) -> None:
"""
Ends the currently identified string. May save the string if it
looks legit enough.
"""
if len(self._current_string) >= self._min_len:
print(f'Found string: "{self._current_string}"')
self._current_string = ""
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/04_writing_plugins.md | 04_writing_plugins.md |
# 07 - ZML and Feeds
## Feeds Overview
Feeds are a way to subscribe to information that is retrieved from a
dynamic execution of a binary, while respecting the performance
optimizations that have been requested by the user. This allows users to
specify what kinds of information should be collected, and therefore the
performance cost, in a global way, without having to configure
multiple plugins separately.
To start with, there are different levels of feeds, each increasing in
amount of verbosity as well as performance cost:
-> Verbosity ->
None -> Syscalls -> Functions -> Instructions
<- Performance <-
Which feeds are enabled is determined through a global "feed level".
Only feeds of the specified verbosity or less are provided data through
their subscribed callbacks.

For Example, when the feed level is FeedLevel.FUNC, subscribers to the
syscall and func feeds will be run, but no calls to inst feed
subscribers will be made.
Feeds are what powers the tracing capabilities of Zelos. In fact, the
--inst / --func / --syscall options set the starting feed level to
ensure that tracing is provided the appropriate information.
## Hooks Vs. Feeds
Both Hooks and Feeds provide a way of running code when certain events
occur. However, Hooks cannot be modified through the command line
(unless a plugin adds those capabilities). Feeds offer a unified way for
plugins to be controlled through the global "feed level".
### You should use Feeds if
* You don't require complete information. You can handle only being provided information only within certain regions.
* You want to use ZML to specify when to collect information without requiring a custom cmdline flag.
### You should use Hooks if
* You require complete information. You cannot support partial information as a result of the changing feed level
## Conditional Feed Levels
It became increasingly important to have control over when to enable
information flow during an execution. Packed binaries may have behavior
that a user would want to view an instruction trace for, but the
unpacking stage may make such a trace prohibitively long/large. Our
desire would be to only trace the desired regions to improve
performance.
To this end we developed flags that take conditions which change the
feed level during execution. In addition, we specified a language that
can be used to control these dynamic feed level changes.
## ZML (Zelos Manipulation Language)
ZML is how we specify conditional events in Zelos. While the specific
grammar can be seen in [zml.py](https://github.com/zeropointdynamics/zelos/blob/master/src/zelos/zml.py),
overall ZML can be described as a set of conditions centered around an
event.
Example Events:
* syscall=(name of syscall)
* func=(name of function)
* addr=(virtual address of instruction)
* thread=(name of thread)
Example Conditions:
* n=(number of times executed before triggering)
* retval=(return value of syscall/func)
* arg_(arg name)=(value of argument for syscall/func)
* thread=(thread the event must occur on)
For each specified ZML string, there must be one event, combined with
any of the conditions (delimited by commas).
For example, "`syscall=read,arg_fd=7,n=2,thread=main`" is a valid ZML
string that will trigger when the second time the read syscall is called
on the thread "main" when the "fd" argument is passed 7.
## Using ZML with Feeds
ZML strings are passed to the --(syscall|func|inst|no)_feed command line
flags.
For example, To trigger instructions to be printed only after the 'recv'
syscall has been called, specify "`--inst_feed=syscall=recv`" on the
command line. For a script, add `inst_feed="syscall=recv"` as a keyword
argument in the Zelos constructor.
These command line flags can be specified multiple times, adjusting
the feed level every time any zml string is satisfied.
For more information on what options are available for configuring feeds
look at the zelos.zml module.
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/07_zml_and_feeds.md | 07_zml_and_feeds.md |
# 03 - Using Hooks
This tutorial demonstrates how hooks in the Zelos API can be used to identify strings that are copied during runtime without symbol information.
Files and scripts from this tutorial are available in the [examples/inmemory_strings](https://github.com/zeropointdynamics/zelos/blob/master/examples/inmemory_strings) directory.
## Hook Overview
Hooks are a way to invoke your code whenever certain events occur during execution. To hook on:
```eval_rst
* Memory reads and writes use :py:meth:`~zelos.Zelos.hook_memory`
* Invocations of syscalls use :py:meth:`~zelos.Zelos.hook_syscalls`
* Execution of an instruction :py:meth:`~zelos.Zelos.hook_execution`
Each hook offers different configuration options and requires a different type of callback. For more details, as well as examples, for each type of hook, take a look at :py:class:`~zelos.Zelos`.
```
## Pwnable.kr Challenge
The [flag challenge on pwnable.kr](http://pwnable.kr/play.php) provides a binary that we need to extract a flag from. We can start off by just running the binary in zelos using `zelos pwnablekr_flag_binary`.
This will produce the output
```
[main] [SYSCALL] mmap ( addr=0x800000, length=0x2d295e, prot=0x7, flags=0x32, fd=0x0 (stdin), offset=0x0 ) -> 800000
[main] [SYSCALL] readlink ( pathname=0x84a78d ("/proc/self/exe"), buf=0xff08deb4, bufsiz=0x1000 ) -> 12
[main] [SYSCALL] mmap ( addr=0x400000, length=0x2c7000, prot=0x0, flags=0x32, fd=0xffffffff (unknown), offset=0x0 ) -> 400000
[main] [SYSCALL] mmap ( addr=0x400000, length=0xc115e, prot=0x7, flags=0x32, fd=0xffffffff (unknown), offset=0x0 ) -> 400000
[main] [SYSCALL] mprotect ( addr=0x400000, len=0xc115e, prot=0x5 ) -> 0
[main] [SYSCALL] mmap ( addr=0x6c1000, length=0x26f0, prot=0x3, flags=0x32, fd=0xffffffff (unknown), offset=0xc1000 ) -> 6c1000
[main] [SYSCALL] mprotect ( addr=0x6c1000, len=0x26f0, prot=0x3 ) -> 0
[main] [SYSCALL] mmap ( addr=0x6c4000, length=0x22d8, prot=0x3, flags=0x32, fd=0xffffffff (unknown), offset=0x0 ) -> 6c4000
[main] [SYSCALL] munmap ( addr=0x801000, length=0x2d195e ) -> 0
[main] [SYSCALL] uname ( buf=0xff08dab0 ) -> 0
[main] [SYSCALL] brk ( addr=0x0 ) -> 90000048
[main] [SYSCALL] brk ( addr=0x90001208 ) -> 90001208
[main] [SYSCALL] arch_prctl ( option=0x1002 (ARCH_SET_FS), addr=0x90000900 ) -> 0
[main] [SYSCALL] brk ( addr=0x90022208 ) -> 90022208
[main] [SYSCALL] brk ( addr=0x90023000 ) -> 90023000
[main] [SYSCALL] fstat ( fd=0x1 (stdout), statbuf=0xff08db40 ) -> 0
[main] [SYSCALL] ioctl ( fd=0x1 (stdout), request=0x5401, data=0xff08dab8 ) -> -1
[main] [SYSCALL] mmap ( addr=0x0, length=0x1000, prot=0x3, flags=0x22, fd=0xffffffff (unknown), offset=0x0 ) -> 10000
[StdOut]: 'bytearray(b'I will malloc() and strcpy the flag there. take it.\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x10000 ("I will malloc() and strcpy the flag there. take it.\n"), count=0x34 ) -> 34
00:45:32:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
Immediately, we see the line:
```
[StdOut]: 'bytearray(b'I will malloc() and strcpy the flag there. take it.\n')'
```
An initial approach may be to dump all of the strings that are present in the binary using the `strings` utility, unfortunately the is packed with [UPX](https://en.wikipedia.org/wiki/UPX). Seems like we'll have to run the binary and find strings while the binary is running...
## Script to Print In-Memory String Writes
To identify the flag, we will create a script that will print all the times strings are written.
To begin with, let's create a script that will run the target binary similar to how we ran it using the Zelos command line tool.
```python
from zelos import Zelos
z = Zelos("pwnablekr_flag_binary")
z.start()
```
```eval_rst
Next, let's print out every write to memory that occurs. Use the :py:meth:`~zelos.Zelos.hook_memory` to register the hook and specify the :py:const:`zelos.HookType.MEMORY.WRITE` hook type.
```
``` python
from zelos import Zelos, HookType
z = Zelos("pwnablekr_flag_binary")
def mem_hook_callback(zelos: Zelos, access: int, address: int, size: int, value: int):
"Prints the destination and contents of every memory write."
print(f"Address: {address:x}, Value: {value:x}")
z.hook_memory(HookType.MEMORY.WRITE, mem_hook_callback)
z.start()
```
```eval_rst
The function signature used by :code:`mem_hook_callback` is required by :py:meth:`~zelos.Zelos.hook_memory`. You can find the required callback function signature in the documentation for the hook registration functions in :py:class:`~zelos.Zelos`.
Unfortunately this script will print out a lot of garbage. What we want is a very specific subset of these writes, and to print them in a way that we can easily understand. We'll make some basic assumptions on how strings are written to memory via strcpy.
```
1. A single string is written from beginning to end with no memory writes to other locations inbetween.
2. The bytes that are written make up a valid utf-8 string.
Let's write a class that can keep track of subsequent writes and decodes strings as they are written.
```python
class StringCollector:
def __init__(self):
self._current_string = ""
self._next_addr = 0
def collect_writes(self, zelos: zelos, access: int, address: int, size: int, value: int):
# Pack converts the value into its representation in bytes.
data = zelos.memory.pack(value)
try:
decoded_data = data.decode("utf-8")
except UnicodeDecodeError:
self._next_addr = 0
self._end_current_string()
return
decoded_data = decoded_data[:size]
if address != self._next_addr:
self._end_current_string()
self._next_addr = address + size
self._current_string += decoded_data
return
def _end_current_string(self):
print(f'Found string: "{self._current_string}"')
self._current_string = ""
```
```eval_rst
Let's put this class to use. Note that we kept the method signature for :code:`collect_writes` similar to :code:`mem_hook_callback` from before. This allows us to use it as the callback for :py:meth:`~zelos.Zelos.hook_memory`
```
```python
from zelos import Zelos, HookType
class StringCollector:
...
z = Zelos("example_binary")
sc = StringCollector()
z.hook_memory(HookType.MEMORY.WRITE, sc.collect_writes)
z.start()
```
Running this script, we see the following input
```
Found string: "4"
Found string: ""
Found string: ""
Found string: "4"
Found string: ""
Found string: ""
Found string: ""
[StdOut]: 'bytearray(b'I will malloc() and strcpy the flag there. take it.\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x10000 ("I will malloc() and strcpy the flag there. take it.\n"), count=0x34 ) -> 34
Found string: ""
Found string: ""
Found string: ""
Found string: ""
Found string: ""
Found string: ""
```
There is still a lot of random looking data being printed. Let's clean up the results a bit by making two more assumptions.
1. A string can only contain a null byte at the end.
2. We're only interested in strings with 4 or more characters (similar to the `strings` utility)
Our new and improved `StringCollector` looks like this now
```python
class StringCollector:
def __init__(self):
self._min_len = 4
self._current_string = ""
self._next_addr = 0
def collect_writes(self, zelos, access, address, size, value):
data = zelos.memory.pack(value)
try:
decoded_data = data.decode("utf-8")
except UnicodeDecodeError:
self._next_addr = 0
self._end_current_string()
return
decoded_data = decoded_data[:size]
first_null_byte = decoded_data.find("\x00")
if first_null_byte != -1:
decoded_data = decoded_data[:first_null_byte]
self._current_string += decoded_data
self._next_addr = 0
self._end_current_string()
return
if address != self._next_addr:
self._end_current_string()
self._next_addr = address + size
self._current_string += decoded_data
return
def _end_current_string(self) -> None:
if len(self._current_string) >= self._min_len:
print(f'Found string: "{self._current_string}"')
self._current_string = ""
```
Running this script still prints out quite a bit due to the aforementioned obfuscation, however near the end you should see the target string printed out!
```
[main] [SYSCALL] ioctl ( fd=0x1 (stdout), request=0x5401, data=0xff08dab8 ) -> -1
[main] [SYSCALL] mmap ( addr=0x0, length=0x1000, prot=0x3, flags=0x22, fd=0xffffffff (unknown), offset=0x0 ) -> 10000
Found string: "I will malloc() and strcpy the flag there. take it.3"
Found string: "UPX...? sounds like a delivery service :)"
[StdOut]: 'bytearray(b'I will malloc() and strcpy the flag there. take it.\n')'
[main] [SYSCALL] write ( fd=0x1 (stdout), buf=0x10000 ("I will malloc() and strcpy the flag there. take it.\n"), count=0x34 ) -> 34
01:36:32:threads___:SUCCES:Done executing thread main
[main] [SYSCALL] exit_group ( status=0x0 ) -> void
```
Our script still needs some work, since there are many nonsensical characters printed out and we accidentally added a byte onto the string that got printed to stdout. However, we didn't have to worry about UPX! (We'll deal with it in a later tutorial.)
The following example script showing how to collect in-memory strings can be found at [examples/inmemory_strings/strings_script.py](https://github.com/zeropointdynamics/zelos/blob/master/examples/inmemory_strings/strings_script.py).
```python
import argparse
from zelos import Zelos, HookType
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--len",
type=int,
default=4,
help="The minimum size of string to identify",
)
parser.add_argument("filename", type=str, help="The file to analyze")
args = parser.parse_args()
z = Zelos(args.filename)
sc = StringCollector(args.len)
z.hook_memory(
HookType.MEMORY.WRITE, sc.collect_writes, name="strings_syscall_hook"
)
z.start()
class StringCollector:
"""
Identifies strings that are written in-memory. We identify strings by the
observation that when they are written to memory
* The string is written in sequential chunks.
* They are comprised of valid utf-8 bytes
This runs into some false positives with data that happens to be
valid utf-8. To reduce false positives we observe that
* Strings often end at the first null byte.
* False positives are often short strings. There is a higher
chance that 2 consecutive characters are valid utf-8 than
4 consecutive characters.
"""
def __init__(self, min_len):
self._min_len = min_len
self._current_string = ""
self._next_addr = 0
def collect_writes(self, zelos, access, address, size, value):
"""
Collects strings that are written to memory. Intended to be used
as a callback in a Zelos HookType.MEMORY hook.
"""
data = zelos.memory.pack(value)
try:
decoded_data = data.decode()
except UnicodeDecodeError:
self._next_addr = 0
self._end_current_string()
return
decoded_data = decoded_data[:size]
first_null_byte = decoded_data.find("\x00")
if first_null_byte != -1:
decoded_data = decoded_data[:first_null_byte]
self._current_string += decoded_data
self._next_addr = 0
self._end_current_string()
return
if address != self._next_addr:
self._end_current_string()
self._next_addr = address + size
self._current_string += decoded_data
return
def _end_current_string(self) -> None:
"""
Ends the currently identified string. May save the string if it
looks legit enough.
"""
if len(self._current_string) >= self._min_len:
print(f'Found string: "{self._current_string}"')
self._current_string = ""
if __name__ == "__main__":
main()
```
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/docs/tutorials/03_using_hooks.md | 03_using_hooks.md |
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/.github/ISSUE_TEMPLATE/bug_report.md | bug_report.md |
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| zelos | /zelos-0.2.0.tar.gz/zelos-0.2.0/.github/ISSUE_TEMPLATE/feature_request.md | feature_request.md |
.. image:: docs/_static/zelt.png
:alt: Zelt logo
Zalando end-to-end load tester
******************************
.. image:: https://travis-ci.org/zalando-incubator/zelt.svg?branch=master
:alt: travis-ci status badge
:target: https://travis-ci.org/zalando-incubator/zelt
.. image:: https://badgen.net/pypi/v/zelt
:alt: pypi version badge
:target: https://pypi.org/project/zelt
.. image:: https://api.codacy.com/project/badge/Grade/a74dee2bbbd64da8951a3cec5059dda3
:alt: code quality badge
:target: https://www.codacy.com/app/bmaher/zelt
.. image:: https://api.codacy.com/project/badge/Coverage/a74dee2bbbd64da8951a3cec5059dda3
:alt: test coverage badge
:target: https://www.codacy.com/app/bmaher/zelt
.. image:: https://badgen.net/badge/code%20style/black/000
:alt: Code style: Black
:target: https://github.com/ambv/black
|
A **command-line tool** for orchestrating the deployment of
Locust_ in Kubernetes_.
Use it in conjunction with Transformer_ to run large-scale end-to-end
load testing of your website.
Prerequistes
============
- `Python 3.6+`_
Installation
============
Install using pip:
.. code:: bash
pip install zelt
Usage
=====
Example HAR files, locustfile, and manifests are included in the
``examples/`` directory, try them out.
**N.B** The cluster to deploy to is determined by your currently
configured context. Ensure you are `using the correct cluster`_
before using Zelt.
Locustfile as input
-------------------
Zelt can deploy Locust with a locustfile to a cluster:
.. code:: bash
zelt from-locustfile PATH_TO_LOCUSTFILE --manifests PATH_TO_MANIFESTS
HAR files(s) as input
---------------------
Zelt can transform HAR file(s) into a locustfile and deploy it along
with Locust to a cluster:
.. code:: bash
zelt from-har PATH_TO_HAR_FILES --manifests PATH_TO_MANIFESTS
**N.B** This requires
Transformer_ to be installed. For more information about Transformer,
please refer to `Transformer's documentation`_.
Rescale a deployment
--------------------
Zelt can rescale the number of workers_ in a deployment it has made
to a cluster:
.. code:: bash
zelt rescale NUMBER_OF_WORKERS --manifests PATH_TO_MANIFESTS
Delete a deployment
-------------------
Zelt can delete deployments it has made from a cluster:
.. code:: bash
zelt delete --manifests PATH_TO_MANIFESTS
Run Locust locally
------------------
Zelt can also run Locust locally by providing the ``--local/-l`` flag to
either the ``from-har`` or ``from-locustfile`` command e.g.:
.. code:: bash
zelt from-locustfile PATH_TO_LOCUSTFILE --local
Use S3 for locustfile storage
-----------------------------
By default, Zelt uses a ConfigMap for storing the locustfile. ConfigMaps
have a file-size limitation of ~2MB. If your locustfile is larger than
this then you can use an S3 bucket for locustfile storage.
To do so, add the following parameters to your Zelt command:
- ``--storage s3``: Switch to S3 storage
- ``--s3-bucket``: The name of your S3 bucket
- ``--s3-key``: The name of the file as stored in S3
**N.B.** Zelt will *not* create the S3 bucket for you.
**N.B.** Make sure to update your deployment manifest(s) to download the
locustfile file from S3 instead of loading from the ConfigMap volume
mount.
Use a configuration file for Zelt options
-----------------------------------------
An alternative to specifying Zelt’s options on the command-line is to
use a configuration file, for example:
.. code:: bash
zelt from-har --config examples/config/config.yaml
**N.B.** The configuration file’s keys are the same as the command-line
option names but without the double dash (``--``).
Documentation
=============
Take a look at our documentation_ for more details.
Contributing
============
Please read `CONTRIBUTING.md <CONTRIBUTING.md>`__ for details on our
process for submitting pull requests to us, and please ensure you follow
the `CODE_OF_CONDUCT.md <CODE_OF_CONDUCT.md>`__.
Versioning
==========
We use SemVer_ for versioning.
Authors
=======
- **Brian Maher** - `@bmaher`_
- **Oliwia Zaremba** - `@tortila`_
- **Thibaut Le Page** - `@thilp`_
See also the list of `contributors <CONTRIBUTORS.md>`__ who participated
in this project.
License
=======
This project is licensed under the MIT License - see the
`LICENSE <LICENSE>`__ file for details
.. _Locust: https://locust.io/
.. _Kubernetes: https://kubernetes.io/
.. _Transformer: https://github.com/zalando-incubator/transformer
.. _`Python 3.6+`: https://www.python.org/downloads/
.. _`using the correct cluster`: https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration
.. _`Transformer's documentation`: https://transformer.readthedocs.io/
.. _workers: https://docs.locust.io/en/stable/running-locust-distributed.html
.. _documentation: https://zelt.readthedocs.io/
.. _`@bmaher`: https://github.com/bmaher
.. _`@tortila`: https://github.com/tortila
.. _`@thilp`: https://github.com/thilp
.. _SemVer: http://semver.org/
| zelt | /zelt-1.2.15.tar.gz/zelt-1.2.15/README.rst | README.rst |
import logging
import os
import pkg_resources
import sys
import yaml
from docopt import docopt
from pathlib import Path
from typing import NamedTuple, Sequence
import zelt
from zelt.zelt import StorageMethod
class Config(NamedTuple):
from_har: bool
from_locustfile: bool
rescale: bool
delete: bool
har_files: Sequence[os.PathLike]
locustfile: os.PathLike
transformer_plugins: Sequence[str]
manifests: os.PathLike
worker_pods: int
required_pods: int
storage: str
s3_bucket: str
s3_key: str
clean: bool
local: bool
logging: str
def cli():
"""
Entrypoint for Zelt.
"""
# Disable deprecation warning coming from Kubernetes client's YAML loading.
# See https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({"YAMLLoadWarning": False})
config = _load_config(docopt(__doc__, version=_version()))
logging.basicConfig(level=config.logging)
if config.from_har:
config = config._replace(
locustfile=zelt.invoke_transformer(
paths=config.har_files, plugin_names=config.transformer_plugins
)
)
_deploy(config)
if config.from_locustfile:
_deploy(config)
if config.rescale:
_rescale(config)
if config.delete:
_delete(config)
def _version() -> str:
return pkg_resources.get_distribution("zelt").version
def _deploy(config: Config) -> None:
"""
Deploys Locust.
"""
try:
zelt.deploy(
config.locustfile,
int(config.worker_pods),
config.manifests,
config.clean,
StorageMethod.from_storage_arg(config.storage),
config.local,
config.s3_bucket,
config.s3_key,
)
except Exception as e:
logging.fatal("Error: %s", e)
exit(1)
def _rescale(config: Config) -> None:
"""
Rescales a worker deployment.
"""
try:
zelt.rescale(config.manifests, int(config.required_pods))
except Exception as e:
logging.fatal("Error: %s", e)
exit(1)
def _delete(config: Config) -> None:
"""
Deletes a deployment.
"""
try:
zelt.delete(
config.manifests,
StorageMethod.from_storage_arg(config.storage),
config.s3_bucket,
config.s3_key,
)
except Exception as e:
logging.fatal("Error: %s", e)
exit(1)
def _load_config(config: dict) -> Config:
"""
Loads config from command-line or file.
"""
config = _normalise_config(config)
if config["config"]:
config = {**config, **yaml.safe_load(Path(config["config"]).read_text())}
return Config(
from_har=config["from-har"],
from_locustfile=config["from-locustfile"],
rescale=config["rescale"],
delete=config["delete"],
har_files=config.get("har-files", []),
locustfile=config["locustfile"],
transformer_plugins=config.get("transformer-plugins", []),
manifests=config["manifests"],
worker_pods=config["worker-pods"],
required_pods=config["required-pods"],
storage=config["storage"],
s3_bucket=config["s3-bucket"],
s3_key=config["s3-key"],
clean=config["clean"],
local=config["local"],
logging=config["logging"],
)
def _normalise_config(config: dict) -> dict:
"""
Removes special characters from config keys.
"""
normalised_config = {}
for k in config:
normalised_config[
k.replace("--", "").replace("<", "").replace(">", "")
] = config[k]
return normalised_config | zelt | /zelt-1.2.15.tar.gz/zelt-1.2.15/main.py | main.py |
import os
from enum import Enum, IntEnum
import logging
import re
import threading
import select
import yaml
from jinja2 import Environment, FileSystemLoader
import jinja2
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from pyroute2 import IPRoute
logger = logging.getLogger('zelus')
class RouteType(IntEnum):
# https://github.com/svinota/pyroute2/blob/6bf02c1f461b471a012658f043c7c47d66fd20f5/pyroute2/netlink/rtnl/__init__.py#L179
unspec = 0
unicast = 1
local = 2
broadcast = 3
anycast = 4
multicast = 5
blackhole = 6
unreachable = 7
prohibit = 8
throw = 9
nat = 10
xresolve = 11
def __str__(self):
return f'{self.name}'
class RouteProto(IntEnum):
# https://github.com/svinota/pyroute2/blob/6bf02c1f461b471a012658f043c7c47d66fd20f5/pyroute2/netlink/rtnl/__init__.py#L201
unspec = 0
redirect = 1
kernel = 2
boot = 3
static = 4
gated = 8
ra = 9
mrt = 10
zebra = 11
bird = 12
dnrouted = 13
xorp = 14
ntk = 15
dhcp = 16
def __str__(self):
return f'{self.name}'
class RouteScope(IntEnum):
universe = 0
site = 200
link = 253
host = 254
nowhere = 255
def __str__(self):
return f'{self.name}'
class InterfaceMap():
def __init__(self):
# This maps interface names to interface ids
self.interface_map = {}
# This maps interface ids to interface names
self._interface_id_map = {}
with IPRoute() as ipr:
interfaces = ipr.get_links()
for i in interfaces:
interface_name = i.get_attr('IFLA_IFNAME')
interface_id = i['index']
interface_addrs = ipr.get_addr(label=interface_name)
self._interface_id_map[interface_id] = interface_name
self.interface_map[interface_name] = {
'id': interface_id,
'addresses': [i.get_attr('IFA_ADDRESS') for i in interface_addrs]
}
def getInterfaceId(self, interface_name):
try:
return self.interface_map[interface_name]['id']
except KeyError:
try:
# Maybe we passed a interface id?
interface_id = int(interface_name)
return interface_id
except ValueError:
logger.error(
f'Could not find interface id for {interface_name}'
)
return None
def getInterfaceName(self, interface_id):
'''Translate an interface id into an interface name'''
try:
return self._interface_id_map[interface_id]
except KeyError:
logger.error(
f"Unable to get interface name for interface {interface_id}"
)
return interface_id
class TableMap():
def __init__(self):
with open('/etc/iproute2/rt_tables', 'r') as route_tables:
self.table_map = {} # This maps table names to table ids
self._table_id_map = {} # This maps table ids to table names
for line in route_tables.readlines():
m = re.match(r'^(\d+)\s+(\w+)$', line)
if m is not None:
self.table_map[m.group(2)] = int(m.group(1))
self._table_id_map[int(m.group(1))] = m.group(2)
logger.debug(
f'Found table {m.group(1)}={m.group(2)} in rt_tables'
)
def getTableId(self, table_name):
try:
return self.table_map[table_name]
except KeyError:
try:
# Maybe we passed a table id?
return int(table_name)
except ValueError:
logger.error(
f'Could not find table id for {table_name}. '
)
return None
def getTableName(self, table_id):
'''Translate a table_id into a table name'''
try:
return self._table_id_map[table_id]
except KeyError:
logger.error(f"Unable to get table name for table {table_id}")
return table_id
class RouteBuilder():
'''
This class builds route objects
'''
def __init__(self):
# Generate table mapping information
self.table_map = TableMap()
self.interface_map = InterfaceMap()
def build(
self,
dst_len=32,
src_len=0,
tos=0,
table='main',
proto='static',
scope='universe',
dst=None,
type='unicast',
gateway=None,
prefsrc=None,
iinterface=None,
ointerface=None
):
'''Build a route from kwargs'''
try:
# Parse proto
proto = RouteProto[proto.lower()]
except Exception as ex:
logger.critical(
f'Unable to parse proto={proto}. Options are {[str(p) for p in RouteProto]}. Exception: {ex}'
)
proto = RouteProto.static
try:
# Parse type
type = RouteType[type.lower()]
except Exception as ex:
logger.critical(
f'Unable to parse type={type}. Options are {[str(t) for t in RouteType]}. Exception: {ex}'
)
type = RouteType.unicast
try:
# Parse scope
scope = RouteScope[scope.lower()]
except Exception as ex:
logger.critical(
f'Unable to parse scope={scope}. Options are {[str(s) for s in RouteScope]}. Exception: {ex}'
)
scope = RouteScope.universe
logger.debug(
f'Building route: '
f'dst_len={dst_len} '
f'src_len={src_len} '
f'tos={tos} '
f'table_id={table} '
f'proto={proto} '
f'scope={scope} '
f'type={type} '
f'gateway={gateway} '
f'prefsrc={prefsrc} '
f'dst={dst} '
f'iif={iinterface} '
f'oif={ointerface} '
)
# Try to parse the iinterface as an interface name
if iinterface is not None:
iif = self.interface_map.getInterfaceId(iinterface)
else:
iif = None
# Try to parse the ointerface as an interface name
if ointerface is not None:
oif = self.interface_map.getInterfaceId(ointerface)
else:
oif = None
# Try to parse table an a table name
try:
table_id = self.table_map.getTableId(table)
except KeyError:
try:
# Maybe we passed a table id?
table_id = int(table)
except ValueError:
logger.error(
f'Could not find table id for {table}. '
)
# TODO check that dst_len is valid
# TODO check that src_len is valid
# TODO check that tos is valid
# TODO check that proto is valid
# TODO check that scope is valid
# TODO check that dst is valid ip address
# TODO check that type is valid
# TODO check that gateway is valid ip address
# TODO check that prefsrc is valid ip address
return Route(
dst_len=dst_len,
src_len=src_len,
tos=tos,
table_id=table_id,
proto=proto,
scope=scope,
type=type,
gateway=gateway,
prefsrc=prefsrc,
dst=dst,
iif=iif,
oif=oif,
route_builder=self
)
def fromNetlinkMessage(self, message):
'''Build a route from a netlink message'''
try:
dst_len = message['dst_len']
except KeyError:
dst_len = None
try:
src_len = message['src_len']
except KeyError:
src_len = None
try:
tos = message['tos']
except KeyError:
tos = None
try:
table_id = message['table']
except KeyError:
table_id = None
try:
proto = message['proto']
except KeyError:
proto = None
try:
scope = message['scope']
except KeyError:
scope = None
try:
type = message['type']
except KeyError:
type = None
try:
gateway = message.get_attr('RTA_GATEWAY')
except AttributeError:
gateway = None
try:
prefsrc = message.get_attr('RTA_PREFSRC')
except AttributeError:
prefsrc = None
try:
dst = message.get_attr('RTA_DST')
except AttributeError:
dst = None
try:
iif = message.get_attr('RTA_IIF')
except AttributeError:
iif = None
try:
oif = message.get_attr('RTA_OIF')
except AttributeError:
oif = None
logger.debug(
f'Building route: '
f'dst_len={dst_len} '
f'src_len={src_len} '
f'tos={tos} '
f'table_id={table_id} '
f'proto={proto} '
f'scope={scope} '
f'type={type} '
f'gateway={gateway} '
f'prefsrc={prefsrc} '
f'dst={dst} '
f'iif={iif} '
f'oif={oif} '
)
return Route(
dst_len=dst_len,
src_len=src_len,
tos=tos,
table_id=table_id,
proto=proto,
scope=scope,
type=type,
gateway=gateway,
prefsrc=prefsrc,
dst=dst,
iif=iif,
oif=oif,
route_builder=self
)
class Route():
def __init__(
self,
dst_len=32,
src_len=0,
tos=0,
table_id=254, # main
proto=RouteProto.static,
scope=RouteScope.universe,
dst=None,
type=RouteType.unicast,
gateway=None,
prefsrc=None,
iif=None,
oif=None,
route_builder=None
):
self.dst_len = dst_len
self.src_len = src_len
self.tos = tos
self.table_id = table_id
self.proto = proto
self.scope = scope
self.dst = dst
self.type = type
self.gateway = gateway
self.prefsrc = prefsrc
self.iif = iif
self.oif = oif
self._route_builder = route_builder
def getTableName(self):
if self._route_builder is not None:
return self._route_builder.table_map.getTableName(self.table_id)
else:
return self.table_id
def getInterfaceName(self, interface_id):
if self._route_builder is not None:
return (
self._route_builder.interface_map.getInterfaceName(interface_id)
)
else:
return interface_id
def __repr__(self):
return f'Route({self.__format__(None)})'
def __format__(self, format_spec):
f = ''
route_type = RouteType(self.type)
if route_type is not RouteType.unicast:
f = f + f'{route_type} '
if self.dst is not None:
f = f + f'{self.dst} '
else:
f = f + 'default '
if self.gateway is not None:
f = f + f'via {self.gateway} '
f = f + f'dev {self.getInterfaceName(self.oif)} '
if self.getTableName() != 'main':
f = f + f'table {self.getTableName()} '
route_proto = RouteProto(self.proto)
if route_proto != RouteProto.static:
f = f + f'proto {route_proto} '
route_scope = RouteScope(self.scope)
if route_scope != RouteScope.universe:
f = f + f'scope {route_scope} '
if self.prefsrc is not None:
f = f + f'src {self.prefsrc} '
return f.strip()
def __eq__(self, other):
return (
self.dst_len == other.dst_len and
self.src_len == other.src_len and
self.tos == other.tos and
self.table_id == other.table_id and
self.proto == other.proto and
self.scope == other.scope and
self.dst == other.dst and
self.type == other.type and
self.gateway == other.gateway and
self.prefsrc == other.prefsrc and
self.iif == other.iif and
self.oif == other.oif
)
def __ne__(self, other):
return (
self.dst_len != other.dst_len or
self.src_len != other.src_len or
self.tos != other.tos or
self.table_id != other.table_id or
self.proto != other.proto or
self.scope != other.scope or
self.dst != other.dst or
self.type != other.type or
self.gateway != other.gateway or
self.prefsrc != other.prefsrc or
self.iif != other.iif or
self.oif != other.oif
)
def add(self, ipr):
'''Add the route to the routing table'''
ipr.route(
'add',
dst_len=self.dst_len,
src_len=self.src_len,
tos=self.tos,
table_id=self.table_id,
proto=self.proto,
scope=self.scope,
type=self.type,
gateway=self.gateway,
prefsrc=self.prefsrc,
dst=self.dst,
iif=self.iif,
oif=self.oif
)
def delete(self, ipr):
'''Delete the route to the routing table'''
ipr.route(
'del',
dst_len=self.dst_len,
src_len=self.src_len,
tos=self.tos,
table_id=self.table_id,
proto=self.proto,
scope=self.scope,
type=self.type,
gateway=self.gateway,
prefsrc=self.prefsrc,
dst=self.dst,
iif=self.iif,
oif=self.oif
)
class Mode(Enum):
MONITOR = 1
ENFORCE = 2
STRICT = 3
class ConfigurationHandler(FileSystemEventHandler):
def __init__(self, zelus):
self._zelus = zelus
def on_modified(self, event):
if not event.is_directory:
logger.info(f"Configuration changed detected. Reloading. {event}")
self._zelus._loadConfiguration()
class Zelus():
def __init__(
self, mode,
monitored_interfaces, monitored_tables=['main'],
configuration_path="zelus.yml"):
self.mode = mode
logger.info(f"Zelus in {mode} mode!")
self._route_builder = RouteBuilder()
self._ipr = IPRoute()
self.stop = threading.Event()
# This is used to ensure that if config is reloaded while a new netlink message
# arrives we do not end up with an empty list of protected routes while building
# the new protected route list
self._protected_routes_lock = threading.Lock()
self.table_map = TableMap()
self.interface_map = InterfaceMap()
self._monitored_tables = [] # This is a list of table ids to monitor
for t in monitored_tables:
try:
table_id = self.table_map.getTableId(t)
self._monitored_tables.append(table_id)
table_name = self.table_map.getTableName(table_id)
logger.debug(f'Monitoring table {table_name}({table_id})')
except KeyError:
try:
# Maybe we passed a table id?
table_id = int(t)
self._monitored_tables.append(table_id)
logger.debug(f'Monitoring table UNKNOWN({table_id})')
except ValueError:
logger.error(
f'Could not find table id for {t}. '
f'Not monitoring this table')
# This is a list of interface ids to monitor
self._monitored_interfaces = []
for interface_name in monitored_interfaces:
interface_id = self.interface_map.getInterfaceId(interface_name)
self._monitored_interfaces.append(interface_id)
i_name = self.interface_map.getInterfaceName(interface_id)
logger.debug(
f"Monitoring interface {i_name}({interface_id})"
)
self._configuration_path = configuration_path
self._protected_routes = []
self.loadConfiguration()
def __del__(self):
self._ipr.close()
def loadConfiguration(self):
# 1. Load initial configuation
self._loadConfiguration()
# 2. Watch for configuration file changes and reload
config_observer = Observer()
config_handler = ConfigurationHandler(self)
config_observer.schedule(
config_handler,
os.path.expanduser(self._configuration_path)
)
config_observer.start()
def _loadConfiguration(self):
'''
Load configuation from config_path and construct routes to be enforced
and interfaces to be monitored
'''
protected_routes = []
try:
# 1. Use config file as jinja2 template
template_loader = FileSystemLoader(
searchpath=os.path.dirname(
os.path.expanduser(self._configuration_path)
)
)
environment = Environment(loader=template_loader)
template = environment.get_template(
os.path.basename(
os.path.expanduser(self._configuration_path)
)
)
configuration_content = template.render(
interfaces=self.interface_map.interface_map
)
logger.debug(f"Configuation file content: {configuration_content}")
# 2. Load templated content as yaml
contents = yaml.safe_load(configuration_content)
logger.debug(f"Configuation file content: {contents}")
protected_routes = contents['protected_routes']
except OSError as ex:
logger.critical(f'Unable to load configuration file @ {self._configuration_path}. Exception: {ex}')
except KeyError:
logger.critical(f'Configuration file @ {self._configuration_path} must contain protected_routes key.')
except yaml.YAMLError as ex:
logger.critical(f'Error in configuration file @ {self._configuration_path}: {ex}')
except jinja2.exceptions.UndefinedError as ex:
logger.critical(f'Error in configuration file @ {self._configuration_path}: {ex}')
except Exception as ex:
logger.critical(f"Could not load configuration @ {self._configuration_path}. Exception: {ex}")
# Clear old protected routes
new_protected_routes = []
for route in protected_routes:
try:
ointerface = route['ointerface']
oif = self._route_builder.interface_map.getInterfaceId(ointerface)
except KeyError:
logger.critical(f'Each route must have a ointerface. Not protecting. {route}')
continue
if oif is None:
logger.critical(f'Unable to find interface id for ointerface {ointerface}. Not protecting. {route}')
continue
if oif not in self._monitored_interfaces:
logger.critical(
f'Protected route ointerface {ointerface} '
f'must be in the monitored interface list '
f'{[ self._route_builder.interface_map.getInterfaceName(i) for i in self._monitored_interfaces]}. '
f'Not protecting. route: {route}'
)
continue
try:
table_name = route['table']
table_id = self._route_builder.table_map.getTableId(table_name)
except KeyError:
table_id = 254 # main
if table_id not in self._monitored_tables:
logger.critical(
f'Protected route table {self.table_map.getTableName(table_id)} '
f'must be in the monitored table list '
f'{[ self._route_builder.table_map.getTableName(t) for t in self._monitored_tables]}. '
f'Not protecting. route: {route}'
)
continue
try:
new_protected_route = self._route_builder.build(**route) # Pass in dictionary as kwargs
logger.info(f"Protecting route {new_protected_route}")
new_protected_routes.append(new_protected_route)
except Exception as ex:
logger.critical(f"Could not build route for {route}. {ex}")
# LOCK PROTECTED ROUTES while we update
self._protected_routes_lock.acquire()
self._protected_routes = new_protected_routes
self._protected_routes_lock.release()
self.initialSync()
def formatRoute(self, action, route):
return (
f'ip route {action} {route}'
)
def initialSync(self):
# Process initial routes
logger.info("Performing initial sync.")
initial_routes = []
for message in self._ipr.get_routes():
# Process all the intial routes construct the inital route list
initial_route = self._processMessage(message)
if initial_route is not None:
initial_routes.append(initial_route)
logger.debug(
f'initial routes: {initial_routes}')
logger.info("Adding missing protected routes")
self._protected_routes_lock.acquire()
for route in self._protected_routes:
if route not in initial_routes:
if self.mode in [Mode.ENFORCE, Mode.STRICT]:
logger.info(f'Missing initial route. Enforcing. {self.formatRoute("add", route)}')
try:
route.add(self._ipr)
except Exception as ex:
logger.critical(f'Unable to add route. {self.formatRoute("add", route)} Exception: {ex}')
else:
logger.info(f'Missing initial route: {route}')
self._protected_routes_lock.release()
if self.mode == Mode.STRICT:
logger.info("Removing unprotected routes")
for route in initial_routes:
if route not in self._protected_routes:
logger.info(f'Extra initial route. Enforcing. {self.formatRoute("del", route)}')
try:
route.delete(self._ipr)
except Exception as ex:
logger.critical(f'Unable to delete route. {self.formatRoute("del", route)} Exception: {ex}')
logger.info("Initial sync completed.")
def monitor(self):
thread = threading.Thread(target=self._monitor)
thread.start()
return thread
def _processMessage(self, message):
if message['event'] in ['RTM_NEWROUTE', 'RTM_DELROUTE']:
if (
message.get_attr('RTA_TABLE') in self._monitored_tables and
message.get_attr('RTA_OIF') in self._monitored_interfaces
):
logger.debug(
f'NETLINK MESSAGE: {message}'
)
route = self._route_builder.fromNetlinkMessage(message)
logger.debug(f'{route}')
if message['event'] == 'RTM_DELROUTE':
logger.info(
f'Detected change: {self.formatRoute("del", route)}'
)
if self.mode in [Mode.ENFORCE, Mode.STRICT]:
self._enforceDeletedRoute(route)
if message['event'] == 'RTM_NEWROUTE':
logger.info(
f'Detected change: {self.formatRoute("add", route)}'
)
if self.mode == Mode.STRICT:
self._enforceAddedRoute(route)
return route
return None
def routeProtected(self, route):
'''Check if the route is in the protected routes list'''
self._protected_routes_lock.acquire()
protected = route in self._protected_routes
self._protected_routes_lock.release()
return protected
def _enforceDeletedRoute(self, route):
'''
Check if the deleted route is in the protected route list and re-added
it if it is.
'''
if self.routeProtected(route) is False:
# Route is protected. re-add it
route.add(self._ipr)
logger.info(f'Enforcing. Reverting {self.formatRoute("add", route)}')
def _enforceAddedRoute(self, route):
'''
Check if the added route is in the protected route list and delete it
if it is not
'''
if self.routeProtected(route) is False:
# Route is not protected. Remove it
route.delete(self._ipr)
logger.info(f'Enforcing. Reverting {self.formatRoute("del", route)}')
def _monitor(self):
'''
Monitor interfaces for changes in routing
'''
poll = select.poll()
poll.register(self._ipr)
self._ipr.bind() # receive broadcasts on IPRoute
while True:
if self.stop.is_set():
break
events = poll.poll()
for fd, flags in events:
if fd == self._ipr.fileno():
for message in self._ipr.get():
self._processMessage(message) | zelus-route-manager | /zelus_route_manager-0.1.0-py3-none-any.whl/zelus/core.py | core.py |
import os
import argparse
import logging
import pkgutil
from .core import Zelus, Mode
logger = logging.getLogger('zelus')
install_files = [
{
"source": "data/zelus.service",
"dest": "etc/systemd/system/zelus.service",
"replace": True
},
{
"source": "data/zelus.yml",
"dest": "etc/zelus/zelus.yml",
"replace": False
}
]
def setLoggingLevel(verbosity):
if verbosity == 0:
logging.basicConfig(level=logging.INFO)
elif verbosity >= 1:
logging.basicConfig(level=logging.DEBUG)
def parseMode(mode):
if mode == 'monitor':
return Mode.MONITOR
elif mode == 'enforce':
return Mode.ENFORCE
elif mode == 'strict':
return Mode.STRICT
def install(install_root='/'):
# Ensure directories exist
os.makedirs(
os.path.join(
install_root,
'etc/zelus'
),
mode=0o700,
exist_ok=True
)
for f in install_files:
if (
not os.path.isfile(os.path.join(install_root, f['dest'])) or
f['replace']
):
with open(os.path.join(install_root, f['dest']), 'w') as out_file:
out_file.write(
pkgutil.get_data(
"zelus",
f["source"]
).decode()
)
def main():
parser = argparse.ArgumentParser(
prog='zelus',
description='Monitor and enforce routes using netlink')
parser.add_argument('-c', '--config', default='config.yml')
parser.add_argument(
'-i', '--interface',
nargs='+', required=True,
default=os.environ.get('ZELUS_MONITORED_INTERFACES', 'eth0')
)
parser.add_argument(
'-t', '--table',
nargs='+',
default=os.environ.get('ZELUS_MONITORED_TABLES', 'main').split()
)
parser.add_argument(
'--verbose', '-v',
action='count',
default=int(os.environ.get('ZELUS_LOGLEVEL', '0'))
)
parser.add_argument(
'--mode', '-m',
choices=[
'monitor',
'enforce',
'strict'
],
default='enforce')
args = parser.parse_args()
setLoggingLevel(args.verbose)
log = 'cli arguments: '
for (k, v) in args._get_kwargs():
log = log + f'{k}: {v} '
logger.debug(log)
z = Zelus(
mode=parseMode(args.mode),
monitored_interfaces=args.interface,
monitored_tables=args.table,
configuration_path=args.config
)
h = z.monitor()
try:
h.join()
except KeyboardInterrupt:
print("Exiting!")
exit(0) | zelus-route-manager | /zelus_route_manager-0.1.0-py3-none-any.whl/zelus/cli.py | cli.py |
from typing import List
import random
from time import sleep
from tweepy import Client
from TwitterAPI import TwitterAPI
class Twitter:
creds : dict
hash_tags: list
nfts_to_tweet: dict
nfts_to_reply: dict
def __init__(self, creds: dict, nfts_to_tweet: dict, nfts_to_reply: dict) -> None:
self.creds = creds
self.nfts_to_tweet = nfts_to_tweet
self.nfts_to_reply = nfts_to_reply
self.influencers = ["OpenSea", "ZssBecker", "rarible", "beeple", "BoredApeYC", "elliotrades", "MetaMask", "TheSandboxGame", "TheBinanceNFT", "DCLBlogger",
"thebrettway", "decentraland", "niftygateway", "MrsunNFT", "BinanceChain"]
self.hash_tags = ["#NFTArt", "#NFTCommunity", "#NFTCollection", "#NFTArtist", "#NFTs"]
self.my_user_id = "1474097571408883730"
def _get_bearer_client(self) -> Client:
client = Client(bearer_token = self.creds["bearer_token"], wait_on_rate_limit = True)
return client
def _get_access_client(self) -> Client:
client = Client(consumer_key = self.creds["consumer_key"],
consumer_secret = self.creds["consumer_secret"],
access_token = self.creds["access_token"],
access_token_secret = self.creds["access_secret"],
wait_on_rate_limit = True)
return client
def _search_followables(self) -> List[str]:
client = self._get_bearer_client()
influencer = client.get_user(username = random.choice(self.influencers))
choices = ["follow from tweets", "follow from followers"]
if random.choice(choices) == choices[0]:
print("searching from tweets")
tweets = self._get_user_timeline_tweets(user_id = influencer.data["id"])
tweets = [t for t in tweets.data]
random.shuffle(tweets)
likers = []
for i in range(5):
chosen_tweet = tweets.pop(0)
temp = client.get_liking_users(id = chosen_tweet.id)
new = [l.id for l in temp.data]
likers += new
return likers
else:
temp = client.get_users_followers(id = influencer.data["id"], max_results = 1000)
followers = [f.id for f in temp.data]
return followers
def _get_user_timeline_tweets(self, user_id: str) -> list:
client = self._get_bearer_client()
tweets = client.get_users_tweets(id = user_id, exclude = ["retweets"])
return tweets
def _like_tweet(self, tweet_id: str) -> None:
client = self._get_access_client()
response = client.like(tweet_id = tweet_id)
def _follow(self, user_id: str) -> None:
client = self._get_access_client()
response = client.follow_user(target_user_id = user_id)
def _get_my_timeline(self) -> list:
client = self._get_bearer_client()
ts = client.get_users_tweets(id = self.my_user_id, tweet_fields = ["context_annotations"])
tweets = []
retweets = []
for tweet in ts.data:
if tweet.data["text"].startswith("@"):
retweets.append(tweet)
else:
tweets.append(tweet)
return tweets, retweets
def _search_tweets_to_reply(self) -> List[str]:
client = self._get_bearer_client()
query = ["drop your nft -is:retweet"]
ts = client.search_recent_tweets(query = query, tweet_fields = ["context_annotations"], max_results = 100)
tweets = []
for tweet in ts.data:
if tweet["text"].startswith("@") == False:
tweets.append(tweet)
return tweets
def _reply(self, tweet_id: str) -> None:
chosen_nft = random.choice(list(self.nfts_to_reply.keys()))
nft_info = self.nfts_to_reply[chosen_nft]
collection = nft_info["collection"]
link = nft_info["link"]
random.shuffle(self.hash_tags)
hashtags = f"{self.hash_tags[0]} {self.hash_tags[1]} {self.hash_tags[2]} {self.hash_tags[3]} {self.hash_tags[4]}"
text1 = random.choice(["Get this #NFT", "Check this #NFT", "How about this #NFT"])
text2 = random.choice([":", " to display in the #Metaverse:", " for you #Metaverse collection:"])
text3 = random.choice(["by me", "by myself", "by yours truly"])
text4 = random.choice(["From the", "Part of the", "Out of the"])
text5 = random.choice(["Luxury", ""])
text6 = random.choice(["available at", "only at", "at"])
text = f'{text1}{text2}"{chosen_nft}" {text3} #JacquesDeVoid | {text4} {collection} {text5} Collection {text6} @opensea\n\n {hashtags} \n {link}'
client = self._get_access_client()
response = client.create_tweet(text = text, in_reply_to_tweet_id = tweet_id)
self._like_tweet(tweet_id = response.data["id"])
def _get_my_retweets(self) -> List[str]:
tweets = self._get_my_timeline()[1]
return tweets
def _get_my_tweets(self) -> List[str]:
tweets = self._get_my_timeline()[0]
return tweets
def _delete_tweet(self, tweet_id: str) -> None:
client = self._get_access_client()
response = client.delete_tweet(id = tweet_id)
def _get_my_num_followers(self) -> int:
api = TwitterAPI(self.creds["consumer_key"],
self.creds["consumer_secret"],
self.creds["access_token"],
self.creds["access_secret"],
api_version = "2")
followers = api.request(f"users/:{self.my_user_id}/followers")
count = len([f for f in followers])
return count
def reply_something(self) -> None:
tweets = self._search_tweets_to_reply()
for tweet in tweets:
self._like_tweet(tweet_id = tweet.data["id"])
tweet = random.choice(tweets)
self._reply(tweet_id = tweet.data["id"])
def delete_my_timeline(self) -> None:
tweets = self._get_my_tweets()
for tweet in tweets:
self._delete_tweet(tweet_id = tweet.data["id"])
def follow_people(self) -> None:
num_followers = self._get_my_num_followers()
if num_followers < 1000:
num_followers = 1000
coef = 0.08
to_follow = coef*num_followers
count = 0
if to_follow > 500:
to_follow = 500
while count < to_follow:
people = self._search_followables()
if people != None:
if len(people) > to_follow - count:
index = to_follow - count
else:
index = len(people)
for i in range(int(index)):
sleep(random.randint(60, 180))
self._follow(user_id = people[i])
count += 1
def tweet(self) -> None:
chosen_nft = random.choice(list(self.nfts_to_tweet.keys()))
nft_info = self.nfts_to_tweet.pop(chosen_nft)
collection = nft_info["collection"]
link = nft_info["link"]
random.shuffle(self.hash_tags)
hashtags = f"{self.hash_tags[0]} {self.hash_tags[1]} {self.hash_tags[2]} {self.hash_tags[3]} {self.hash_tags[4]} #Metaverse"
text1 = random.choice(["Behold", "How about", "Check", "How would you like"])
text2 = random.choice(["this", "my"])
text3 = random.choice(["amazing", "awesome"])
text4 = random.choice(["by me", "by myself", "by yours truly"])
text5 = random.choice(["From the", "Part of the", "Out of the"])
text6 = random.choice(["Luxury", ""])
text7 = random.choice(["available at", "only at", "at"])
text = f'{text1} {text2} {text3} "{chosen_nft}" #NFT {text4} #JacquesDeVoid | {text5} {collection} {text6} Collection {text7} @opensea\n\n {hashtags} \n {link}'
client = self._get_access_client()
response = client.create_tweet(text = text)
self._like_tweet(tweet_id = response.data["id"]) | zelus | /src/services/twitter/service.py | service.py |
# pysolarmanv5
This is a Python module to interact with Solarman (IGEN-Tech) v5 based solar
inverter data loggers. Modbus RTU frames can be encapsulated in the proprietary
Solarman v5 protocol and requests sent to the data logger on port tcp/8899.
This module aims to simplify the Solarman v5 protocol, exposing interfaces
similar to that of the [uModbus](https://pysolarmanv5.readthedocs.io/) library.
Details of the Solarman v5 protocol have been based on the excellent work of
[Inverter-Data-Logger by XtheOne](https://github.com/XtheOne/Inverter-Data-Logger/)
and others.
## Documentation
pysolarmanv5 documentation is available on [Read the Docs](https://pysolarmanv5.readthedocs.io/).
The Solarman V5 protocol is documented [here](https://pysolarmanv5.readthedocs.io/en/latest/solarmanv5_protocol.html).
## Supported Devices
A user contributed list of supported devices is available [here](https://github.com/jmccrohan/pysolarmanv5/issues/11).
If you are unsure if your device is supported, please use the [solarman_scan](https://github.com/jmccrohan/pysolarmanv5/blob/main/utils/solarman_scan.py)
utility to find compatible data logging sticks on your local network.
Please note that the **Solis S3-WIFI-ST** data logging stick is **NOT supported**.
See [GH issue #8](https://github.com/jmccrohan/pysolarmanv5/issues/8) for further information.
Some Ethernet data logging sticks have native support Modbus TCP and therefore **do not require pysolarmanv5**.
See [GH issue #5](https://github.com/jmccrohan/pysolarmanv5/issues/5) for further information.
## Dependencies
- pysolarmanv5 requires Python 3.8 or greater.
- pysolarmanv5 depends on [uModbus](https://github.com/AdvancedClimateSystems/uModbus).
## Installation
To install the latest stable version of pysolarmanv5 from PyPi, run:
`pip install pysolarmanv5`
To install the latest development version from git, run:
`pip install git+https://github.com/jmccrohan/pysolarmanv5.git`
## Projects using pysolarmanv5
- [NosIreland/solismon3](https://github.com/NosIreland/solismon3)
- [NosIreland/solismod](https://github.com/NosIreland/solismod)
- [jmccrohan/ha_pyscript_pysolarmanv5](https://github.com/jmccrohan/ha_pyscript_pysolarmanv5)
- [YodaDaCoda/hass-solarman-modbus](https://github.com/YodaDaCoda/hass-solarman-modbus)
- [schwatter/solarman_mqtt](https://github.com/schwatter/solarman_mqtt)
- [RonnyKempe/solismon](https://github.com/RonnyKempe/solismon)
- [toledobastos/solarman_battery_autocharge](https://github.com/toledobastos/solarman_battery_autocharge)
- [AndyTaylorTweet/solis2mqtt](https://github.com/AndyTaylorTweet/solis2mqtt)
- [pixellos/codereinvented.automation.py](https://github.com/pixellos/codereinvented.automation.py)
- [cjgwhite/hass-solar](https://github.com/cjgwhite/hass-solar)
- [imcfarla2003/solarconfig](https://github.com/imcfarla2003/solarconfig)
- [githubDante/deye-controller](https://github.com/githubDante/deye-controller)
## Contributions
Contributions welcome. Please raise any Issues / Pull Requests via [Github](https://github.com/jmccrohan/pysolarmanv5).
## License
pysolarmanv5 is licensed under the [MIT License](https://github.com/jmccrohan/pysolarmanv5/blob/master/LICENSE). Copyright (c) 2022 Jonathan McCrohan
| zem-pysolarmanv5 | /zem-pysolarmanv5-2.5.0rc1.tar.gz/zem-pysolarmanv5-2.5.0rc1/README.md | README.md |
import asyncio
from umodbus.client.serial import rtu
from multiprocessing import Event
from .pysolarmanv5 import NoSocketAvailableError, PySolarmanV5
class PySolarmanV5Async(PySolarmanV5):
"""
The PySolarmanV5Async class establishes a TCP connection to a Solarman V5 data
logging stick on a call to connect() and exposes methods to send/receive
Modbus RTU requests and responses asynchronously.
For more detailed information on the Solarman V5 Protocol, see
:doc:`solarmanv5_protocol`
:param address: IP address or hostname of data logging stick
:type address: str
:param serial: Serial number of the data logging stick (not inverter!)
:type serial: int
:param port: TCP port to connect to data logging stick, defaults to 8899
:type port: int, optional
:param mb_slave_id: Inverter Modbus slave ID, defaults to 1
:type mb_slave_id: int, optional
:param v5_error_correction: Enable naive error correction for V5 frames,
defaults to False
:type v5_error_correction: bool, optional
:param auto_reconnect: Auto reconnect to the data logging stick
:type auto_reconnect: bool, optional
Basic example:
>>> import asyncio
>>> from pysolarmanv5 import PySolarmanV5Async
>>> modbus = PySolarmanV5Async("192.168.1.10", 123456789)
>>> modbus2 = PySolarmanV5Async("192.168.1.11", 123456790)
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(asyncio.gather(*[modbus.connect(), modbus2.connect()], return_exceptions=True)
>>>
>>> print(loop.run_until_complete(modbus.read_input_registers(register_addr=33022, quantity=6)))
>>> print(loop.run_until_complete(modbus2.read_input_registers(register_addr=33022, quantity=6)))
See :doc:`examples` directory for further examples.
"""
def __init__(self, address, serial, **kwargs):
"""Constructor"""
kwargs.update({'socket': ''})
super(PySolarmanV5Async, self).__init__(address, serial, **kwargs)
self._needs_reconnect = kwargs.get("auto_reconnect", False)
""" Auto-reconnect feature """
self.reader: asyncio.StreamReader = None # noqa
self.writer: asyncio.StreamWriter = None # noqa
self.data_queue = asyncio.Queue(maxsize=1)
self.data_wanted_ev = Event()
self.reader_task: asyncio.Task = None # noqa
async def connect(self) -> None:
"""
Connect to the data logging stick and start the socket reader loop
:return: None
:raises NoSocketAvailableError: When connection cannot be established
"""
loop = asyncio.get_running_loop()
try:
self.reader, self.writer = await asyncio.open_connection(self.address, self.port)
self.reader_task = loop.create_task(self._conn_keeper(), name='ConnKeeper')
except:
raise NoSocketAvailableError(f'Cannot open connection to {self.address}')
async def reconnect(self) -> None:
"""
Reconnect to the data logging stick. It's called automatically if the auto-reconnect option is enabled
:return: None
:raises NoSocketAvailableError: When connection cannot be re-established
"""
try:
if self.reader_task:
self.reader_task.cancel()
self.reader, self.writer = await asyncio.open_connection(self.address, self.port)
loop = asyncio.get_running_loop()
self.reader_task = loop.create_task(self._conn_keeper(), name='ConnKeeper')
self.log.debug(f'[{self.serial}] Successful reconnect')
except:
raise NoSocketAvailableError(f'Cannot open connection to {self.address}')
def _send_data(self, data: bytes):
"""
Sends the data received from the socket to the receiver.
:param data:
:return:
"""
if self.data_wanted_ev.is_set():
if not self.data_queue.empty():
_ = self.data_queue.get_nowait()
self.data_queue.put_nowait(data)
self.data_wanted_ev.clear()
async def _conn_keeper(self) -> None:
"""
Socket reader loop with extra logic when auto-reconnect is enabled
:return: None
"""
while True:
try:
data = await self.reader.read(1024)
except ConnectionResetError:
self.log.debug(f'[{self.serial}] Connection reset. Closing the socket reader.')
break
if data == b'':
self.log.debug(f'[{self.serial}] Connection closed by the remote. Closing the socket reader.')
break
elif data.startswith(b'\xa5\x01\x00\x10G'):
# Frame with control code 0x4710 - Counter frame
self.log.debug(f'[{self.serial}] COUNTER: {data.hex(" ")}')
continue
elif self.data_wanted_ev.is_set():
self._send_data(data)
else:
self.log.debug('Data received but nobody waits for it... Discarded')
self.reader = None
self.writer = None
self._send_data(b'')
if self._needs_reconnect:
self.log.debug(f'[{self.serial}] Auto reconnect enabled. Will try to restart the socket reader')
loop = asyncio.get_running_loop()
loop.create_task(self.reconnect())
async def _send_receive_v5_frame(self, data_logging_stick_frame):
"""Send v5 frame to the data logger and receive response
:param data_logging_stick_frame: V5 frame to transmit
:type data_logging_stick_frame: bytes
:return: V5 frame received
:rtype: bytes
:raises NoSocketAvailableError: When the connection to data logging stick is closed.
Can occur even when auto-reconnect is enabled.
"""
self.log.debug("SENT: " + data_logging_stick_frame.hex(" "))
self.data_wanted_ev.set()
try:
self.writer.write(data_logging_stick_frame)
await self.writer.drain()
v5_response = await self.data_queue.get()
if v5_response == b'':
raise NoSocketAvailableError('Connection closed on read. Retry if auto-reconnect is enabled')
except AttributeError:
raise NoSocketAvailableError('Connection already closed')
except NoSocketAvailableError:
raise
except Exception as exc:
self.log.exception(f'[{self.serial}] Send/Receive error: {exc}')
raise
finally:
self.data_wanted_ev.clear()
self.log.debug("RECD: " + v5_response.hex(" "))
return v5_response
async def _send_receive_modbus_frame(self, mb_request_frame):
"""Encodes mb_frame, sends/receives v5_frame, decodes response
:param mb_request_frame: Modbus RTU frame to transmit
:type mb_request_frame: bytes
:return: Modbus RTU frame received
:rtype: bytes
"""
v5_request_frame = self._v5_frame_encoder(mb_request_frame)
v5_response_frame = await self._send_receive_v5_frame(v5_request_frame)
mb_response_frame = self._v5_frame_decoder(v5_response_frame)
return mb_response_frame
async def _get_modbus_response(self, mb_request_frame):
"""Returns mb response values for a given mb_request_frame
:param mb_request_frame: Modbus RTU frame to parse
:type mb_request_frame: bytes
:return: Modbus RTU decoded values
:rtype: list[int]
"""
mb_response_frame = await self._send_receive_modbus_frame(mb_request_frame)
modbus_values = rtu.parse_response_adu(mb_response_frame, mb_request_frame)
return modbus_values
async def read_input_registers(self, register_addr, quantity):
"""Read input registers from modbus slave (Modbus function code 4)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: List containing register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_input_registers(
self.mb_slave_id, register_addr, quantity
)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def read_holding_registers(self, register_addr, quantity):
"""Read holding registers from modbus slave (Modbus function code 3)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: List containing register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_holding_registers(
self.mb_slave_id, register_addr, quantity
)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def read_input_register_formatted(self, register_addr, quantity, **kwargs):
"""Read input registers from modbus slave and format as single value (Modbus function code 4)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:param scale: Scaling factor
:type scale: int
:param signed: Signed value (2s complement)
:type signed: bool
:param bitmask: Bitmask value
:type bitmask: int
:param bitshift: Bitshift value
:type bitshift: int
:return: Formatted register value
:rtype: int
"""
modbus_values = await self.read_input_registers(register_addr, quantity)
value = self._format_response(modbus_values, **kwargs)
return value
async def read_holding_register_formatted(self, register_addr, quantity, **kwargs):
"""Read holding registers from modbus slave and format as single value (Modbus function code 3)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:param scale: Scaling factor
:type scale: int
:param signed: Signed value (2s complement)
:type signed: bool
:param bitmask: Bitmask value
:type bitmask: int
:param bitshift: Bitshift value
:type bitshift: int
:return: Formatted register value
:rtype: int
"""
modbus_values = await self.read_holding_registers(register_addr, quantity)
value = self._format_response(modbus_values, **kwargs)
return value
async def write_holding_register(self, register_addr, value):
"""Write a single holding register to modbus slave (Modbus function code 6)
:param register_addr: Modbus register address
:type register_addr: int
:param value: value to write
:type value: int
:return: value written
:rtype: int
"""
mb_request_frame = rtu.write_single_register(
self.mb_slave_id, register_addr, value
)
value = await self._get_modbus_response(mb_request_frame)
return value
async def write_multiple_holding_registers(self, register_addr, values):
"""Write list of multiple values to series of holding registers on modbus slave (Modbus function code 16)
:param register_addr: Modbus register start address
:type register_addr: int
:param values: values to write
:type values: list[int]
:return: values written
:rtype: list[int]
"""
mb_request_frame = rtu.write_multiple_registers(
self.mb_slave_id, register_addr, values
)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def read_coils(self, register_addr, quantity):
"""Read coils from modbus slave and return list of coil values (Modbus function code 1)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_coils(self.mb_slave_id, register_addr, quantity)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def read_discrete_inputs(self, register_addr, quantity):
"""Read discrete inputs from modbus slave and return list of input values (Modbus function code 2)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_discrete_inputs(
self.mb_slave_id, register_addr, quantity
)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def write_single_coil(self, register_addr, value):
"""Write single coil value to modbus slave (Modbus function code 5)
:param register_addr: Modbus register start address
:type register_addr: int
:param value: value to write; ``0xFF00`` (On) or ``0x0000`` (Off)
:type value: int
:return: value written
:rtype: int
"""
mb_request_frame = rtu.write_single_coil(self.mb_slave_id, register_addr, value)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def write_multiple_coils(self, register_addr, values):
"""Write multiple coil values to modbus slave (Modbus function code 15)
:param register_addr: Modbus register start address
:type register_addr: int
:param values: values to write; ``1`` (On) or ``0`` (Off)
:type values: list[int]
:return: values written
:rtype: list[int]
"""
mb_request_frame = rtu.write_multiple_coils(
self.mb_slave_id, register_addr, values
)
modbus_values = await self._get_modbus_response(mb_request_frame)
return modbus_values
async def masked_write_holding_register(self, register_addr, **kwargs):
"""Mask write a single holding register to modbus slave (Modbus function code 22)
Used to set or clear individual bits within a holding register
If default values are provided for both ``or_mask`` and ``and_mask``,
the write element of this function is a NOP.
.. warning::
This is not implemented as a native Modbus function. It is a software
implementation using a combination of :func:`read_holding_registers()
and :func:`write_holding_register()
It is therefore **not atomic**.
:param register_addr: Modbus register address
:type register_addr: int
:param or_mask: OR mask (set bits), defaults to ``0x0000`` (no change)
:type or_mask: int
:param and_mask: AND mask (clear bits), defaults to ``0xFFFF`` (no change)
:type and_mask: int
:return: value written
:rtype: int
"""
or_mask = kwargs.get("or_mask", 0x0000)
and_mask = kwargs.get("and_mask", 0xFFFF)
current_value = await self.read_holding_registers(register_addr, 1)[0]
if (or_mask != 0x0000) or (and_mask != 0xFFFF):
masked_value = current_value
masked_value |= or_mask
masked_value &= and_mask
updated_value = await self.write_holding_register(register_addr, masked_value)
return updated_value
return current_value
async def send_raw_modbus_frame(self, mb_request_frame):
"""Send raw modbus frame and return modbus response frame
Wrapper around internal method :func:`_send_receive_modbus_frame()
:param mb_request_frame: Modbus frame
:type mb_request_frame: bytearray
:return: Modbus frame
:rtype: bytearray
"""
return await self._send_receive_modbus_frame(mb_request_frame)
async def send_raw_modbus_frame_parsed(self, mb_request_frame):
"""Send raw modbus frame and return parsed modbus response list
Wrapper around internal method :func:`_get_modbus_response()
:param mb_request_frame: Modbus frame
:type mb_request_frame: bytearray
:return: Modbus RTU decoded values
:rtype: list[int]
"""
return await self._get_modbus_response(mb_request_frame) | zem-pysolarmanv5 | /zem-pysolarmanv5-2.5.0rc1.tar.gz/zem-pysolarmanv5-2.5.0rc1/pysolarmanv5/pysolarmanv5_async.py | pysolarmanv5_async.py |
import struct
import socket
import logging
import select
from threading import Thread, Event
from multiprocessing import Queue
from umodbus.client.serial import rtu
from random import randrange
class V5FrameError(Exception):
"""V5 Frame Validation Error"""
pass
class NoSocketAvailableError(Exception):
"""No Socket Available Error"""
pass
class PySolarmanV5:
"""
The PySolarmanV5 class establishes a TCP connection to a Solarman V5 data
logging stick and exposes methods to send/receive Modbus RTU requests and
responses.
For more detailed information on the Solarman V5 Protocol, see
:doc:`solarmanv5_protocol`
:param address: IP address or hostname of data logging stick
:type address: str
:param serial: Serial number of the data logging stick (not inverter!)
:type serial: int
:param port: TCP port to connect to data logging stick, defaults to 8899
:type port: int, optional
:param mb_slave_id: Inverter Modbus slave ID, defaults to 1
:type mb_slave_id: int, optional
:param socket_timeout: Socket timeout duration in seconds, defaults to 60
:type socket_timeout: int, optional
:param v5_error_correction: Enable naive error correction for V5 frames,
defaults to False
:type v5_error_correction: bool, optional
.. versionadded:: v2.4.0
:param logger: Python logging facility
:type logger: Logger, optional
:param socket: TCP Socket connection to data logging stick. If **socket**
argument is provided, **address** argument is unused (however, it is
still required as a positional argument)
:type socket: Socket, optional
:raises NoSocketAvailableError: If no network socket is available
.. versionadded:: v2.5.0
:param auto_reconnect: Activates the auto-reconnect functionality. PySolarman will try to
keep the connection open. The default is False. Not compatible with custom sockets.
:type auto_reconnect: Boolean, optional
.. deprecated:: v2.4.0
:param verbose: Enable verbose logging, defaults to False. Use **logger**
instead. For compatibility purposes, **verbose**, if enabled, will
create a logger, and set the logging level to DEBUG.
:type verbose: bool, optional
Basic example:
>>> from pysolarmanv5 import PySolarmanV5
>>> modbus = PySolarmanV5("192.168.1.10", 123456789)
>>> print(modbus.read_input_registers(register_addr=33022, quantity=6))
See :doc:`examples` directory for further examples.
"""
def __init__(self, address, serial, **kwargs):
"""Constructor"""
self.log = kwargs.get("logger", None)
if self.log is None:
logging.basicConfig()
self.log = logging.getLogger(__name__)
self.address = address
self.serial = serial
self.port = kwargs.get("port", 8899)
self.mb_slave_id = kwargs.get("mb_slave_id", 1)
self.verbose = kwargs.get("verbose", False)
self.socket_timeout = kwargs.get("socket_timeout", 60)
self.v5_error_correction = kwargs.get("error_correction", False)
self.sequence_number = None
if self.verbose:
self.log.setLevel("DEBUG")
self._v5_frame_def()
self.sock: socket.socket = kwargs.get("socket", self._create_socket())
if self.sock is None:
raise NoSocketAvailableError("No socket available")
self._poll = select.poll()
self._sock_fd = self.sock.fileno()
self._auto_reconnect = False if kwargs.get('socket') else kwargs.get('auto_reconnect', False)
self._data_queue = Queue(maxsize=1)
self._data_wanted = Event()
self._reader_exit = Event()
self._reader_thr = Thread(target=self._data_receiver, daemon=True)
self._reader_thr.start()
def _v5_frame_def(self):
"""Define and construct V5 request frame structure."""
self.v5_start = bytes.fromhex("A5")
self.v5_length = bytes.fromhex("0000") # placeholder value
self.v5_controlcode = struct.pack("<H", 0x4510)
self.v5_serial = bytes.fromhex("0000") # placeholder value
self.v5_loggerserial = struct.pack("<I", self.serial)
self.v5_frametype = bytes.fromhex("02")
self.v5_sensortype = bytes.fromhex("0000")
self.v5_deliverytime = bytes.fromhex("00000000")
self.v5_powerontime = bytes.fromhex("00000000")
self.v5_offsettime = bytes.fromhex("00000000")
self.v5_checksum = bytes.fromhex("00") # placeholder value
self.v5_end = bytes.fromhex("15")
@staticmethod
def _calculate_v5_frame_checksum(frame):
"""Calculate checksum on all frame bytes except head, end and checksum
:param frame: V5 frame
:type frame: bytes
:return: Checksum value of V5 frame
:rtype: int
"""
checksum = 0
for i in range(1, len(frame) - 2, 1):
checksum += frame[i] & 0xFF
return int(checksum & 0xFF)
def _get_next_sequence_number(self):
"""Get the next sequence number for use in outgoing packets
If ``sequence_number`` is None, generate a random int as initial value.
:return: Sequence number
:rtype: int
"""
if self.sequence_number is None:
self.sequence_number = randrange(0x01, 0xFF)
else:
self.sequence_number = (self.sequence_number + 1) & 0xFF
return self.sequence_number
def _v5_frame_encoder(self, modbus_frame):
"""Take a modbus RTU frame and encode it in a V5 data logging stick frame
:param modbus_frame: Modbus RTU frame
:type modbus_frame: bytes
:return: V5 frame
:rtype: bytearray
"""
self.v5_length = struct.pack("<H", 15 + len(modbus_frame))
self.v5_serial = struct.pack("<H", self._get_next_sequence_number())
v5_header = bytearray(
self.v5_start
+ self.v5_length
+ self.v5_controlcode
+ self.v5_serial
+ self.v5_loggerserial
)
v5_payload = bytearray(
self.v5_frametype
+ self.v5_sensortype
+ self.v5_deliverytime
+ self.v5_powerontime
+ self.v5_offsettime
+ modbus_frame
)
v5_trailer = bytearray(self.v5_checksum + self.v5_end)
v5_frame = v5_header + v5_payload + v5_trailer
v5_frame[len(v5_frame) - 2] = self._calculate_v5_frame_checksum(v5_frame)
return v5_frame
def _v5_frame_decoder(self, v5_frame):
"""Decodes a V5 data logging stick frame and returns a modbus RTU frame
Modbus RTU frame will start at position 25 through ``len(v5_frame)-2``.
Occasionally logger can send a spurious 'keep-alive' reply with a
control code of ``0x4710``. These messages can either take the place of,
or be appended to valid ``0x1510`` responses. In this case, the v5_frame
will contain an invalid checksum.
Validate the following:
1) V5 start and end are correct (``0xA5`` and ``0x15`` respectively)
2) V5 checksum is correct
3) V5 outgoing sequence number has been echoed back to us (byte 5)
4) V5 data logger serial number is correct (in most (all?) instances the
reply is correct, but request can obviously be incorrect)
5) V5 control code is correct (``0x1510``)
6) v5_frametype contains the correct value (``0x02`` in byte 11)
7) Modbus RTU frame length is at least 5 bytes (vast majority of RTU
frames will be >=6 bytes, but valid 5 byte error/exception RTU frames
are possible)
:param v5_frame: V5 frame
:type v5_frame: bytes
:return: Modbus RTU Frame
:rtype: bytes
:raises V5FrameError: If parsing fails due to invalid V5 frame
"""
frame_len = len(v5_frame)
(payload_len,) = struct.unpack("<H", v5_frame[1:3])
frame_len_without_payload_len = 13
if frame_len != (frame_len_without_payload_len + payload_len):
self.log.debug("frame_len does not match payload_len.")
if self.v5_error_correction:
frame_len = frame_len_without_payload_len + payload_len
if (v5_frame[0] != int.from_bytes(self.v5_start, byteorder="big")) or (
v5_frame[frame_len - 1] != int.from_bytes(self.v5_end, byteorder="big")
):
raise V5FrameError("V5 frame contains invalid start or end values")
if v5_frame[frame_len - 2] != self._calculate_v5_frame_checksum(v5_frame):
raise V5FrameError("V5 frame contains invalid V5 checksum")
if v5_frame[5] != self.sequence_number:
raise V5FrameError("V5 frame contains invalid sequence number")
if v5_frame[7:11] != self.v5_loggerserial:
raise V5FrameError("V5 frame contains incorrect data logger serial number")
if v5_frame[3:5] != struct.pack("<H", 0x1510):
raise V5FrameError("V5 frame contains incorrect control code")
if v5_frame[11] != int("02", 16):
raise V5FrameError("V5 frame contains invalid frametype")
modbus_frame = v5_frame[25 : frame_len - 2]
if len(modbus_frame) < 5:
raise V5FrameError("V5 frame does not contain a valid Modbus RTU frame")
return modbus_frame
def _send_receive_v5_frame(self, data_logging_stick_frame):
"""Send v5 frame to the data logger and receive response
:param data_logging_stick_frame: V5 frame to transmit
:type data_logging_stick_frame: bytes
:return: V5 frame received
:rtype: bytes
"""
self.log.debug("SENT: " + data_logging_stick_frame.hex(" "))
if not self._reader_thr.is_alive():
raise NoSocketAvailableError('Connection already closed.')
self.sock.sendall(data_logging_stick_frame)
self._data_wanted.set()
#v5_response = self.sock.recv(1024)
try:
v5_response = self._data_queue.get(timeout=self.socket_timeout)
self._data_wanted.clear()
except TimeoutError:
raise
self.log.debug("RECD: " + v5_response.hex(" "))
return v5_response
def _data_receiver(self):
self._poll.register(self.sock.fileno(), select.POLLIN)
while True:
events = self._poll.poll(500)
if self._reader_exit.is_set():
return
for event in events:
# We are registered only for inbound data on a single socket,
# so there is no need to check the (fileno, mask) tuples
data = self.sock.recv(1024)
if data == b'':
self.log.debug(f'[POLL] Socket closed. Reader thread exiting.')
if self._data_wanted.is_set():
self._data_queue.put_nowait(data)
self._reconnect()
return
elif data.startswith(b'\xa5\x01\x00\x10G'):
# Frame with control code 0x4710 - Counter frame
self.log.debug(f'[{self.serial}] COUNTER: {data.hex(" ")}')
continue
if self._data_wanted.is_set():
self._data_queue.put(data, timeout=self.socket_timeout)
else:
self.log.debug("[POLL-DISCARDED] RECD: " + data.hex(' '))
def _reconnect(self):
"""
Reconnect to the data logger if needed
"""
if self._reader_thr.is_alive():
self.sock.send(b'')
self.sock.close()
self._reader_exit.set()
self._reader_thr.join(.5)
if self._reader_thr.is_alive():
raise RuntimeError('Reader thread is still alive!')
self._reader_exit.clear()
if self._auto_reconnect:
self.log.debug(f'Auto-Reconnect enabled. Trying to establish a new connection')
self._poll.unregister(self._sock_fd)
self.sock = self._create_socket()
if self.sock:
self._sock_fd = self.sock.fileno()
self._reader_thr = Thread(target=self._data_receiver, daemon=True)
self._reader_thr.start()
self.log.debug(f'Auto-Reconnect successful.')
else:
self.log.debug(f'No socket available! Reconnect failed.')
else:
self.log.debug('Auto-Reconnect inactive.')
def disconnect(self) -> None:
"""
Disconnect the socket and set a signal for the reader thread to exit
"""
self.sock.send(b'')
self.sock.close()
self._reader_exit.set()
self._reader_thr.join(.5)
self._poll.unregister(self._sock_fd)
def _send_receive_modbus_frame(self, mb_request_frame):
"""Encodes mb_frame, sends/receives v5_frame, decodes response
:param mb_request_frame: Modbus RTU frame to transmit
:type mb_request_frame: bytes
:return: Modbus RTU frame received
:rtype: bytes
"""
v5_request_frame = self._v5_frame_encoder(mb_request_frame)
v5_response_frame = self._send_receive_v5_frame(v5_request_frame)
mb_response_frame = self._v5_frame_decoder(v5_response_frame)
return mb_response_frame
def _get_modbus_response(self, mb_request_frame):
"""Returns mb response values for a given mb_request_frame
:param mb_request_frame: Modbus RTU frame to parse
:type mb_request_frame: bytes
:return: Modbus RTU decoded values
:rtype: list[int]
"""
mb_response_frame = self._send_receive_modbus_frame(mb_request_frame)
modbus_values = rtu.parse_response_adu(mb_response_frame, mb_request_frame)
return modbus_values
def _create_socket(self):
"""Creates and returns a socket"""
try:
sock = socket.create_connection(
(self.address, self.port), self.socket_timeout
)
except OSError:
return None
return sock
@staticmethod
def twos_complement(val, num_bits):
"""Calculate 2s Complement
:param val: Value to calculate
:type val: int
:param num_bits: Number of bits
:type num_bits: int
:return: 2s Complement value
:rtype: int
"""
if val < 0:
val = (1 << num_bits) + val
else:
if val & (1 << (num_bits - 1)):
val = val - (1 << num_bits)
return val
def _format_response(self, modbus_values, **kwargs):
"""Formats a list of modbus register values (16 bits each) as a single value
:param modbus_values: Modbus register values
:type modbus_values: list[int]
:param scale: Scaling factor
:type scale: int
:param signed: Signed value (2s complement)
:type signed: bool
:param bitmask: Bitmask value
:type bitmask: int
:param bitshift: Bitshift value
:type bitshift: int
:return: Formatted register value
:rtype: int
"""
scale = kwargs.get("scale", 1)
signed = kwargs.get("signed", False)
bitmask = kwargs.get("bitmask", None)
bitshift = kwargs.get("bitshift", None)
response = 0
num_registers = len(modbus_values)
for i, j in zip(range(num_registers), range(num_registers - 1, -1, -1)):
response += modbus_values[i] << (j * 16)
if signed:
response = self.twos_complement(response, num_registers * 16)
if scale != 1:
response *= scale
if bitmask is not None:
response &= bitmask
if bitshift is not None:
response >>= bitshift
return response
def read_input_registers(self, register_addr, quantity):
"""Read input registers from modbus slave (Modbus function code 4)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: List containing register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_input_registers(
self.mb_slave_id, register_addr, quantity
)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def read_holding_registers(self, register_addr, quantity):
"""Read holding registers from modbus slave (Modbus function code 3)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: List containing register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_holding_registers(
self.mb_slave_id, register_addr, quantity
)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def read_input_register_formatted(self, register_addr, quantity, **kwargs):
"""Read input registers from modbus slave and format as single value (Modbus function code 4)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:param scale: Scaling factor
:type scale: int
:param signed: Signed value (2s complement)
:type signed: bool
:param bitmask: Bitmask value
:type bitmask: int
:param bitshift: Bitshift value
:type bitshift: int
:return: Formatted register value
:rtype: int
"""
modbus_values = self.read_input_registers(register_addr, quantity)
value = self._format_response(modbus_values, **kwargs)
return value
def read_holding_register_formatted(self, register_addr, quantity, **kwargs):
"""Read holding registers from modbus slave and format as single value (Modbus function code 3)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:param scale: Scaling factor
:type scale: int
:param signed: Signed value (2s complement)
:type signed: bool
:param bitmask: Bitmask value
:type bitmask: int
:param bitshift: Bitshift value
:type bitshift: int
:return: Formatted register value
:rtype: int
"""
modbus_values = self.read_holding_registers(register_addr, quantity)
value = self._format_response(modbus_values, **kwargs)
return value
def write_holding_register(self, register_addr, value):
"""Write a single holding register to modbus slave (Modbus function code 6)
:param register_addr: Modbus register address
:type register_addr: int
:param value: value to write
:type value: int
:return: value written
:rtype: int
"""
mb_request_frame = rtu.write_single_register(
self.mb_slave_id, register_addr, value
)
value = self._get_modbus_response(mb_request_frame)
return value
def write_multiple_holding_registers(self, register_addr, values):
"""Write list of multiple values to series of holding registers on modbus slave (Modbus function code 16)
:param register_addr: Modbus register start address
:type register_addr: int
:param values: values to write
:type values: list[int]
:return: values written
:rtype: list[int]
"""
mb_request_frame = rtu.write_multiple_registers(
self.mb_slave_id, register_addr, values
)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def read_coils(self, register_addr, quantity):
"""Read coils from modbus slave and return list of coil values (Modbus function code 1)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_coils(self.mb_slave_id, register_addr, quantity)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def read_discrete_inputs(self, register_addr, quantity):
"""Read discrete inputs from modbus slave and return list of input values (Modbus function code 2)
:param register_addr: Modbus register start address
:type register_addr: int
:param quantity: Number of registers to query
:type quantity: int
:return: register values
:rtype: list[int]
"""
mb_request_frame = rtu.read_discrete_inputs(
self.mb_slave_id, register_addr, quantity
)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def write_single_coil(self, register_addr, value):
"""Write single coil value to modbus slave (Modbus function code 5)
:param register_addr: Modbus register start address
:type register_addr: int
:param value: value to write; ``0xFF00`` (On) or ``0x0000`` (Off)
:type value: int
:return: value written
:rtype: int
"""
mb_request_frame = rtu.write_single_coil(self.mb_slave_id, register_addr, value)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def write_multiple_coils(self, register_addr, values):
"""Write multiple coil values to modbus slave (Modbus function code 15)
:param register_addr: Modbus register start address
:type register_addr: int
:param values: values to write; ``1`` (On) or ``0`` (Off)
:type values: list[int]
:return: values written
:rtype: list[int]
"""
mb_request_frame = rtu.write_multiple_coils(
self.mb_slave_id, register_addr, values
)
modbus_values = self._get_modbus_response(mb_request_frame)
return modbus_values
def masked_write_holding_register(self, register_addr, **kwargs):
"""Mask write a single holding register to modbus slave (Modbus function code 22)
Used to set or clear individual bits within a holding register
If default values are provided for both ``or_mask`` and ``and_mask``,
the write element of this function is a NOP.
.. warning::
This is not implemented as a native Modbus function. It is a software
implementation using a combination of :func:`read_holding_registers() <pysolarmanv5.PySolarmanV5.read_holding_registers>`
and :func:`write_holding_register() <pysolarmanv5.PySolarmanV5.write_holding_register>`.
It is therefore **not atomic**.
:param register_addr: Modbus register address
:type register_addr: int
:param or_mask: OR mask (set bits), defaults to ``0x0000`` (no change)
:type or_mask: int
:param and_mask: AND mask (clear bits), defaults to ``0xFFFF`` (no change)
:type and_mask: int
:return: value written
:rtype: int
"""
or_mask = kwargs.get("or_mask", 0x0000)
and_mask = kwargs.get("and_mask", 0xFFFF)
current_value = self.read_holding_registers(register_addr, 1)[0]
if (or_mask != 0x0000) or (and_mask != 0xFFFF):
masked_value = current_value
masked_value |= or_mask
masked_value &= and_mask
updated_value = self.write_holding_register(register_addr, masked_value)
return updated_value
return current_value
def send_raw_modbus_frame(self, mb_request_frame):
"""Send raw modbus frame and return modbus response frame
Wrapper around internal method :func:`_send_receive_modbus_frame() <pysolarmanv5.PySolarmanV5._send_receive_modbus_frame>`
:param mb_request_frame: Modbus frame
:type mb_request_frame: bytearray
:return: Modbus frame
:rtype: bytearray
"""
return self._send_receive_modbus_frame(mb_request_frame)
def send_raw_modbus_frame_parsed(self, mb_request_frame):
"""Send raw modbus frame and return parsed modbusresponse list
Wrapper around internal method :func:`_get_modbus_response() <pysolarmanv5.PySolarmanV5._get_modbus_response>`
:param mb_request_frame: Modbus frame
:type mb_request_frame: bytearray
:return: Modbus RTU decoded values
:rtype: list[int]
"""
return self._get_modbus_response(mb_request_frame) | zem-pysolarmanv5 | /zem-pysolarmanv5-2.5.0rc1.tar.gz/zem-pysolarmanv5-2.5.0rc1/pysolarmanv5/pysolarmanv5.py | pysolarmanv5.py |
__all__ = [
"ExtractionDataType",
"ZeMASamples",
"ZEMA_DATASET_HASH",
"ZEMA_DATASET_URL",
"ZEMA_QUANTITIES",
]
import operator
import os
import pickle
from enum import Enum
from functools import reduce
from os.path import exists
from pathlib import Path
from typing import cast
import h5py
import numpy as np
from h5py import Dataset
from numpy._typing import NDArray
from pooch import os_cache, retrieve
from zema_emc_annotated.data_types import (
RealMatrix,
RealVector,
SampleSize,
UncertainArray,
)
ZEMA_DATASET_HASH = (
"sha256:fb0e80de4e8928ae8b859ad9668a1b6ea6310028a6690bb8d4c1abee31cb8833"
)
ZEMA_DATASET_URL = "https://zenodo.org/record/5185953/files/axis11_2kHz_ZeMA_PTB_SI.h5"
ZEMA_QUANTITIES = (
"Acceleration",
"Active_Current",
"Force",
"Motor_Current",
"Pressure",
"Sound_Pressure",
"Velocity",
)
class ExtractionDataType(Enum):
"""Identifiers of data types in ZeMA dataset
Attributes
----------
VALUES : str
with value ``qudt:value``
UNCERTAINTIES : str
with value ``qudt:standardUncertainty``
"""
VALUES = "qudt:value"
UNCERTAINTIES = "qudt:standardUncertainty"
class ZeMASamples:
"""Extracts requested number of samples of values with associated uncertainties
The underlying dataset is the annotated "Sensor data set of one electromechanical
cylinder at ZeMA testbed (ZeMA DAQ and Smart-Up Unit)" by Dorst et al. [Dorst2021]_.
Each extracted sample will be cached in the download directory of the file,
which is handled by :func:`pooch.os_cache`, where ``<AppName>`` evaluates to
``pooch``. That way the concurrent retrieval of the same data is as performant as
possible and can simply be left to ``zema_emc_annotated``. Where ever the result
of ``ZeMASamples`` is needed in an external code base, it should be safe to call
it over and over without causing unnecessary extractions or even downloads. The
underlying mechanism is Python's built-in ``pickle``.
Parameters
----------
sample_size : SampleSize, optional
tuple containing information about which samples to extract, defaults to
default of :class:`~zema_emc_annotated.data_types.SampleSize`
normalize : bool, optional
if ``True``, then values are centered around zero and values and
uncertainties are scaled to values' unit std, defaults to ``False``
skip_hash_check : bool, optional
allow to circumvent strict hash checking during the retrieve of dataset file,
to speed up concurrent calls as each check for the large file might take
several seconds, defaults to ``False``
Attributes
----------
uncertain_values : UncertainArray
The collection of samples of values with associated uncertainties,
will be of shape (``sample_size.n_cycles``, 11 x
``sample_size.datapoints_per_cycle``)
"""
uncertain_values: UncertainArray
def __init__(
self,
sample_size: SampleSize = SampleSize(),
normalize: bool = False,
skip_hash_check: bool = False,
):
self.samples_slice: slice = np.s_[
sample_size.idx_first_cycle : sample_size.idx_first_cycle
+ sample_size.n_cycles
]
self.size_scaler = sample_size.datapoints_per_cycle
if cached_data := self._check_and_load_cache(normalize):
self.uncertain_values = cached_data
else:
self._uncertainties = np.empty((sample_size.n_cycles, 0))
self._values = np.empty((sample_size.n_cycles, 0))
self.uncertain_values = self._extract_data(normalize, skip_hash_check)
self._store_cache(normalize)
del self._uncertainties
del self._values
def _extract_data(
self, normalize: bool, skip_hash_check: bool = True
) -> UncertainArray:
"""Extract the data as specified"""
dataset_full_path = retrieve(
url=ZEMA_DATASET_URL,
known_hash=None if skip_hash_check else ZEMA_DATASET_HASH,
progressbar=True,
)
assert exists(dataset_full_path)
relevant_datasets = (
["ZeMA_DAQ", quantity, datatype.value]
for quantity in ZEMA_QUANTITIES
for datatype in ExtractionDataType
)
self._normalization_divisors: dict[str, NDArray[np.double] | float] = {}
with h5py.File(dataset_full_path, "r") as h5f:
for dataset_descriptor in relevant_datasets:
self._current_dataset: Dataset = cast(
Dataset, reduce(operator.getitem, dataset_descriptor, h5f)
)
if ExtractionDataType.VALUES.value in self._current_dataset.name:
treating_values = True
print(f" Extract values from {self._current_dataset.name}")
else:
treating_values = False
print(
f" Extract uncertainties from "
f"{self._current_dataset.name}"
)
if self._current_dataset.shape[0] == 3:
for idx, sensor in enumerate(self._current_dataset):
if treating_values:
self._normalize_values_if_requested_and_append(
sensor,
self._extract_sub_dataset_name(idx),
normalize,
)
else:
self._normalize_uncertainties_if_requested_and_append(
sensor,
self._extract_sub_dataset_name(idx),
normalize,
)
else:
if treating_values:
self._normalize_values_if_requested_and_append(
self._current_dataset,
self._strip_data_type_from_dataset_descriptor(),
normalize,
)
else:
self._normalize_uncertainties_if_requested_and_append(
self._current_dataset,
self._strip_data_type_from_dataset_descriptor(),
normalize,
)
if treating_values:
print(" Values extracted")
else:
print(" Uncertainties extracted")
return UncertainArray(self._values, self._uncertainties)
def _normalize_values_if_requested_and_append(
self, values: Dataset, dataset_descriptor: str, normalize: bool
) -> None:
"""Normalize the provided values and append according to current state"""
_potentially_normalized_values = values[
np.s_[: self.size_scaler, self.samples_slice]
]
if normalize:
_potentially_normalized_values -= np.mean(
values[:, self.samples_slice], axis=0
)
data_std = np.std(values[:, self.samples_slice], axis=0)
data_std[data_std == 0] = 1.0
self._normalization_divisors[dataset_descriptor] = data_std
_potentially_normalized_values /= self._normalization_divisors[
dataset_descriptor
]
self._values = np.append(
self._values, _potentially_normalized_values.transpose(), axis=1
)
def _normalize_uncertainties_if_requested_and_append(
self, uncertainties: Dataset, dataset_descriptor: str, normalize: bool
) -> None:
"""Normalize the provided uncertainties and append according to current state"""
_potentially_normalized_uncertainties = uncertainties[
np.s_[: self.size_scaler, self.samples_slice]
]
if normalize:
_potentially_normalized_uncertainties /= self._normalization_divisors[
dataset_descriptor
]
self._uncertainties = np.append(
self._uncertainties,
_potentially_normalized_uncertainties.transpose(),
axis=1,
)
def _extract_sub_dataset_name(self, idx: int) -> str:
return str(
self._strip_data_type_from_dataset_descriptor()
+ self._current_dataset.attrs["si:label"]
.split(",")[idx]
.strip("[")
.strip("]")
.replace(" ", "")
.replace('"', "")
.replace("uncertainty", "")
).replace("\n", "")
def _strip_data_type_from_dataset_descriptor(self) -> str:
return str(
self._current_dataset.name.replace(
ExtractionDataType.UNCERTAINTIES.value, ""
).replace(ExtractionDataType.VALUES.value, "")
)
@property
def values(self) -> RealVector:
"""The values of the stored :class:`UncertainArray` object"""
return self.uncertain_values.values
@property
def uncertainties(self) -> RealMatrix | RealVector:
"""The uncertainties of the stored :class:`UncertainArray` object"""
return self.uncertain_values.uncertainties
def _check_and_load_cache(self, normalize: bool) -> UncertainArray | None:
"""Checks if corresponding file for n_cycles exists and loads it with pickle"""
if os.path.exists(cache_path := self._cache_path(normalize)):
with open(cache_path, "rb") as cache_file:
return cast(UncertainArray, pickle.load(cache_file))
return None
def _cache_path(self, normalize: bool) -> Path:
"""Local file system path for a cache file containing n ZeMA samples
The result does not guarantee, that the file at the specified location exists,
but can be used to check for existence or creation.
"""
assert self.samples_slice.stop is not None # pylint: disable=no-member
idx_start = self.samples_slice.start # pylint: disable=no-member
n_samples = (
self.samples_slice.stop - idx_start # pylint: disable=no-member
if self.samples_slice.start is not None # pylint: disable=no-member
else self.samples_slice.stop # pylint: disable=no-member
)
return Path(
os_cache("pooch").joinpath(
f"{str(n_samples)}_samples"
f"{'_starting_from_' + str(idx_start) if idx_start else ''}_with_"
f"{str(self.size_scaler)}_values_per_sensor"
f"{'_normalized' if normalize else ''}.pickle"
)
)
def _store_cache(self, normalize: bool) -> None:
"""Dumps provided uncertain tensor to corresponding pickle file"""
with open(self._cache_path(normalize), "wb") as cache_file:
pickle.dump(self.uncertain_values, cache_file) | zema-emc-annotated | /zema_emc_annotated-0.7.1-py3-none-any.whl/zema_emc_annotated/dataset.py | dataset.py |
# Read ZeMA dataset and preprocess data
```
import json
import h5py
from pooch import retrieve
def local_path_to_dataset_after_download_if_required():
ZEMA_DATASET_URL = (
"https://zenodo.org/record/5185953/files/axis11_2kHz_ZeMA_PTB_SI.h5"
)
return retrieve(
url=ZEMA_DATASET_URL,
known_hash=None,
progressbar=True,
)
def print_attrs(h5py_dataset_or_group):
for key in h5py_dataset_or_group.attrs:
print(key)
val = json.loads(h5py_dataset_or_group.attrs[key])
if isinstance(val, dict):
for subkey, subval in val.items():
print(f" {subkey} : {subval}")
else:
print(f" {val}")
with h5py.File(local_path_to_dataset_after_download_if_required(), "r") as h5f:
print_attrs(h5f)
with h5py.File(local_path_to_dataset_after_download_if_required(), "r") as h5f:
my_uncertainty = h5f["PTB_SUU"]["MPU_9250"]["Acceleration"][
"qudt:standardUncertainty"
]
print("qudt:standardUncertainty" in my_uncertainty.name)
print_attrs(my_uncertainty)
print(my_uncertainty)
print(list(h5f["PTB_SUU"]))
```
| zema-emc-annotated | /zema_emc_annotated-0.7.1-py3-none-any.whl/zema_emc_annotated/examples/read_dataset.ipynb | read_dataset.ipynb |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='language_id.proto',
package='zemberek.langid',
syntax='proto3',
serialized_options=_b('\n\016zemberek.protoP\001'),
serialized_pb=_b('\n\x11language_id.proto\x12\x0fzemberek.langid\":\n\x11LanguageIdRequest\x12\r\n\x05input\x18\x01 \x01(\t\x12\x16\n\x0emaxSampleCount\x18\x02 \x01(\x05\"$\n\x12LanguageIdResponse\x12\x0e\n\x06langId\x18\x01 \x01(\t2\xbd\x01\n\x11LanguageIdService\x12Q\n\x06\x44\x65tect\x12\".zemberek.langid.LanguageIdRequest\x1a#.zemberek.langid.LanguageIdResponse\x12U\n\nDetectFast\x12\".zemberek.langid.LanguageIdRequest\x1a#.zemberek.langid.LanguageIdResponseB\x12\n\x0ezemberek.protoP\x01\x62\x06proto3')
)
_LANGUAGEIDREQUEST = _descriptor.Descriptor(
name='LanguageIdRequest',
full_name='zemberek.langid.LanguageIdRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.langid.LanguageIdRequest.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maxSampleCount', full_name='zemberek.langid.LanguageIdRequest.maxSampleCount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=96,
)
_LANGUAGEIDRESPONSE = _descriptor.Descriptor(
name='LanguageIdResponse',
full_name='zemberek.langid.LanguageIdResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='langId', full_name='zemberek.langid.LanguageIdResponse.langId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=134,
)
DESCRIPTOR.message_types_by_name['LanguageIdRequest'] = _LANGUAGEIDREQUEST
DESCRIPTOR.message_types_by_name['LanguageIdResponse'] = _LANGUAGEIDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LanguageIdRequest = _reflection.GeneratedProtocolMessageType('LanguageIdRequest', (_message.Message,), dict(
DESCRIPTOR = _LANGUAGEIDREQUEST,
__module__ = 'language_id_pb2'
# @@protoc_insertion_point(class_scope:zemberek.langid.LanguageIdRequest)
))
_sym_db.RegisterMessage(LanguageIdRequest)
LanguageIdResponse = _reflection.GeneratedProtocolMessageType('LanguageIdResponse', (_message.Message,), dict(
DESCRIPTOR = _LANGUAGEIDRESPONSE,
__module__ = 'language_id_pb2'
# @@protoc_insertion_point(class_scope:zemberek.langid.LanguageIdResponse)
))
_sym_db.RegisterMessage(LanguageIdResponse)
DESCRIPTOR._options = None
_LANGUAGEIDSERVICE = _descriptor.ServiceDescriptor(
name='LanguageIdService',
full_name='zemberek.langid.LanguageIdService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=137,
serialized_end=326,
methods=[
_descriptor.MethodDescriptor(
name='Detect',
full_name='zemberek.langid.LanguageIdService.Detect',
index=0,
containing_service=None,
input_type=_LANGUAGEIDREQUEST,
output_type=_LANGUAGEIDRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DetectFast',
full_name='zemberek.langid.LanguageIdService.DetectFast',
index=1,
containing_service=None,
input_type=_LANGUAGEIDREQUEST,
output_type=_LANGUAGEIDRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_LANGUAGEIDSERVICE)
DESCRIPTOR.services_by_name['LanguageIdService'] = _LANGUAGEIDSERVICE
# @@protoc_insertion_point(module_scope) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/language_id_pb2.py | language_id_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='morphology.proto',
package='zemberek.morphology',
syntax='proto3',
serialized_options=_b('\n\031zemberek.proto.morphologyP\001'),
serialized_pb=_b('\n\x10morphology.proto\x12\x13zemberek.morphology\"$\n\x13WordAnalysisRequest\x12\r\n\x05input\x18\x01 \x01(\t\"N\n\x13\x44ictionaryItemProto\x12\r\n\x05lemma\x18\x01 \x01(\t\x12\x12\n\nprimaryPos\x18\x02 \x01(\t\x12\x14\n\x0csecondaryPos\x18\x03 \x01(\t\"\xe4\x01\n\x13SingleAnalysisProto\x12\x0b\n\x03pos\x18\x01 \x01(\t\x12@\n\x0e\x64ictionaryItem\x18\x02 \x01(\x0b\x32(.zemberek.morphology.DictionaryItemProto\x12\x10\n\x08\x61nalysis\x18\x03 \x01(\t\x12\x10\n\x08informal\x18\x04 \x01(\x08\x12\x0f\n\x07runtime\x18\x05 \x01(\x08\x12\x0e\n\x06lemmas\x18\x06 \x03(\t\x12\x39\n\tmorphemes\x18\x07 \x03(\x0b\x32&.zemberek.morphology.MorphemeDataProto\"6\n\x11MorphemeDataProto\x12\x0f\n\x07surface\x18\x01 \x01(\t\x12\x10\n\x08morpheme\x18\x02 \x01(\t\"^\n\x11WordAnalysisProto\x12\r\n\x05input\x18\x01 \x01(\t\x12:\n\x08\x61nalyses\x18\x02 \x03(\x0b\x32(.zemberek.morphology.SingleAnalysisProto\"D\n\x17SentenceAnalysisRequest\x12\r\n\x05input\x18\x01 \x01(\t\x12\x1a\n\x12\x63ontainAllAnalyses\x18\x02 \x01(\x08\"g\n\x15SentenceAnalysisProto\x12\r\n\x05input\x18\x01 \x01(\t\x12?\n\x07results\x18\x02 \x03(\x0b\x32..zemberek.morphology.SentenceWordAnalysisProto\"\x97\x01\n\x19SentenceWordAnalysisProto\x12\r\n\x05token\x18\x01 \x01(\t\x12\x36\n\x04\x62\x65st\x18\x02 \x01(\x0b\x32(.zemberek.morphology.SingleAnalysisProto\x12\x33\n\x03\x61ll\x18\x03 \x01(\x0b\x32&.zemberek.morphology.WordAnalysisProto2\xe1\x01\n\x11MorphologyService\x12k\n\x0f\x41nalyzeSentence\x12,.zemberek.morphology.SentenceAnalysisRequest\x1a*.zemberek.morphology.SentenceAnalysisProto\x12_\n\x0b\x41nalyzeWord\x12(.zemberek.morphology.WordAnalysisRequest\x1a&.zemberek.morphology.WordAnalysisProtoB\x1d\n\x19zemberek.proto.morphologyP\x01\x62\x06proto3')
)
_WORDANALYSISREQUEST = _descriptor.Descriptor(
name='WordAnalysisRequest',
full_name='zemberek.morphology.WordAnalysisRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.morphology.WordAnalysisRequest.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=77,
)
_DICTIONARYITEMPROTO = _descriptor.Descriptor(
name='DictionaryItemProto',
full_name='zemberek.morphology.DictionaryItemProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lemma', full_name='zemberek.morphology.DictionaryItemProto.lemma', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='primaryPos', full_name='zemberek.morphology.DictionaryItemProto.primaryPos', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secondaryPos', full_name='zemberek.morphology.DictionaryItemProto.secondaryPos', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=79,
serialized_end=157,
)
_SINGLEANALYSISPROTO = _descriptor.Descriptor(
name='SingleAnalysisProto',
full_name='zemberek.morphology.SingleAnalysisProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pos', full_name='zemberek.morphology.SingleAnalysisProto.pos', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dictionaryItem', full_name='zemberek.morphology.SingleAnalysisProto.dictionaryItem', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='analysis', full_name='zemberek.morphology.SingleAnalysisProto.analysis', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='informal', full_name='zemberek.morphology.SingleAnalysisProto.informal', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='runtime', full_name='zemberek.morphology.SingleAnalysisProto.runtime', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lemmas', full_name='zemberek.morphology.SingleAnalysisProto.lemmas', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='morphemes', full_name='zemberek.morphology.SingleAnalysisProto.morphemes', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=388,
)
_MORPHEMEDATAPROTO = _descriptor.Descriptor(
name='MorphemeDataProto',
full_name='zemberek.morphology.MorphemeDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='surface', full_name='zemberek.morphology.MorphemeDataProto.surface', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='morpheme', full_name='zemberek.morphology.MorphemeDataProto.morpheme', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=390,
serialized_end=444,
)
_WORDANALYSISPROTO = _descriptor.Descriptor(
name='WordAnalysisProto',
full_name='zemberek.morphology.WordAnalysisProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.morphology.WordAnalysisProto.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='analyses', full_name='zemberek.morphology.WordAnalysisProto.analyses', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=446,
serialized_end=540,
)
_SENTENCEANALYSISREQUEST = _descriptor.Descriptor(
name='SentenceAnalysisRequest',
full_name='zemberek.morphology.SentenceAnalysisRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.morphology.SentenceAnalysisRequest.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containAllAnalyses', full_name='zemberek.morphology.SentenceAnalysisRequest.containAllAnalyses', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=610,
)
_SENTENCEANALYSISPROTO = _descriptor.Descriptor(
name='SentenceAnalysisProto',
full_name='zemberek.morphology.SentenceAnalysisProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.morphology.SentenceAnalysisProto.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='results', full_name='zemberek.morphology.SentenceAnalysisProto.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=612,
serialized_end=715,
)
_SENTENCEWORDANALYSISPROTO = _descriptor.Descriptor(
name='SentenceWordAnalysisProto',
full_name='zemberek.morphology.SentenceWordAnalysisProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='zemberek.morphology.SentenceWordAnalysisProto.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='best', full_name='zemberek.morphology.SentenceWordAnalysisProto.best', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all', full_name='zemberek.morphology.SentenceWordAnalysisProto.all', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=718,
serialized_end=869,
)
_SINGLEANALYSISPROTO.fields_by_name['dictionaryItem'].message_type = _DICTIONARYITEMPROTO
_SINGLEANALYSISPROTO.fields_by_name['morphemes'].message_type = _MORPHEMEDATAPROTO
_WORDANALYSISPROTO.fields_by_name['analyses'].message_type = _SINGLEANALYSISPROTO
_SENTENCEANALYSISPROTO.fields_by_name['results'].message_type = _SENTENCEWORDANALYSISPROTO
_SENTENCEWORDANALYSISPROTO.fields_by_name['best'].message_type = _SINGLEANALYSISPROTO
_SENTENCEWORDANALYSISPROTO.fields_by_name['all'].message_type = _WORDANALYSISPROTO
DESCRIPTOR.message_types_by_name['WordAnalysisRequest'] = _WORDANALYSISREQUEST
DESCRIPTOR.message_types_by_name['DictionaryItemProto'] = _DICTIONARYITEMPROTO
DESCRIPTOR.message_types_by_name['SingleAnalysisProto'] = _SINGLEANALYSISPROTO
DESCRIPTOR.message_types_by_name['MorphemeDataProto'] = _MORPHEMEDATAPROTO
DESCRIPTOR.message_types_by_name['WordAnalysisProto'] = _WORDANALYSISPROTO
DESCRIPTOR.message_types_by_name['SentenceAnalysisRequest'] = _SENTENCEANALYSISREQUEST
DESCRIPTOR.message_types_by_name['SentenceAnalysisProto'] = _SENTENCEANALYSISPROTO
DESCRIPTOR.message_types_by_name['SentenceWordAnalysisProto'] = _SENTENCEWORDANALYSISPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WordAnalysisRequest = _reflection.GeneratedProtocolMessageType('WordAnalysisRequest', (_message.Message,), dict(
DESCRIPTOR = _WORDANALYSISREQUEST,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.WordAnalysisRequest)
))
_sym_db.RegisterMessage(WordAnalysisRequest)
DictionaryItemProto = _reflection.GeneratedProtocolMessageType('DictionaryItemProto', (_message.Message,), dict(
DESCRIPTOR = _DICTIONARYITEMPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.DictionaryItemProto)
))
_sym_db.RegisterMessage(DictionaryItemProto)
SingleAnalysisProto = _reflection.GeneratedProtocolMessageType('SingleAnalysisProto', (_message.Message,), dict(
DESCRIPTOR = _SINGLEANALYSISPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.SingleAnalysisProto)
))
_sym_db.RegisterMessage(SingleAnalysisProto)
MorphemeDataProto = _reflection.GeneratedProtocolMessageType('MorphemeDataProto', (_message.Message,), dict(
DESCRIPTOR = _MORPHEMEDATAPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.MorphemeDataProto)
))
_sym_db.RegisterMessage(MorphemeDataProto)
WordAnalysisProto = _reflection.GeneratedProtocolMessageType('WordAnalysisProto', (_message.Message,), dict(
DESCRIPTOR = _WORDANALYSISPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.WordAnalysisProto)
))
_sym_db.RegisterMessage(WordAnalysisProto)
SentenceAnalysisRequest = _reflection.GeneratedProtocolMessageType('SentenceAnalysisRequest', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEANALYSISREQUEST,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.SentenceAnalysisRequest)
))
_sym_db.RegisterMessage(SentenceAnalysisRequest)
SentenceAnalysisProto = _reflection.GeneratedProtocolMessageType('SentenceAnalysisProto', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEANALYSISPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.SentenceAnalysisProto)
))
_sym_db.RegisterMessage(SentenceAnalysisProto)
SentenceWordAnalysisProto = _reflection.GeneratedProtocolMessageType('SentenceWordAnalysisProto', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEWORDANALYSISPROTO,
__module__ = 'morphology_pb2'
# @@protoc_insertion_point(class_scope:zemberek.morphology.SentenceWordAnalysisProto)
))
_sym_db.RegisterMessage(SentenceWordAnalysisProto)
DESCRIPTOR._options = None
_MORPHOLOGYSERVICE = _descriptor.ServiceDescriptor(
name='MorphologyService',
full_name='zemberek.morphology.MorphologyService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=872,
serialized_end=1097,
methods=[
_descriptor.MethodDescriptor(
name='AnalyzeSentence',
full_name='zemberek.morphology.MorphologyService.AnalyzeSentence',
index=0,
containing_service=None,
input_type=_SENTENCEANALYSISREQUEST,
output_type=_SENTENCEANALYSISPROTO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='AnalyzeWord',
full_name='zemberek.morphology.MorphologyService.AnalyzeWord',
index=1,
containing_service=None,
input_type=_WORDANALYSISREQUEST,
output_type=_WORDANALYSISPROTO,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MORPHOLOGYSERVICE)
DESCRIPTOR.services_by_name['MorphologyService'] = _MORPHOLOGYSERVICE
# @@protoc_insertion_point(module_scope) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/morphology_pb2.py | morphology_pb2.py |
import grpc
import zemberek_grpc.morphology_pb2 as morphology__pb2
class MorphologyServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AnalyzeSentence = channel.unary_unary(
'/zemberek.morphology.MorphologyService/AnalyzeSentence',
request_serializer=morphology__pb2.SentenceAnalysisRequest.SerializeToString,
response_deserializer=morphology__pb2.SentenceAnalysisProto.FromString,
)
self.AnalyzeWord = channel.unary_unary(
'/zemberek.morphology.MorphologyService/AnalyzeWord',
request_serializer=morphology__pb2.WordAnalysisRequest.SerializeToString,
response_deserializer=morphology__pb2.WordAnalysisProto.FromString,
)
class MorphologyServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def AnalyzeSentence(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeWord(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MorphologyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AnalyzeSentence': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSentence,
request_deserializer=morphology__pb2.SentenceAnalysisRequest.FromString,
response_serializer=morphology__pb2.SentenceAnalysisProto.SerializeToString,
),
'AnalyzeWord': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeWord,
request_deserializer=morphology__pb2.WordAnalysisRequest.FromString,
response_serializer=morphology__pb2.WordAnalysisProto.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zemberek.morphology.MorphologyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/morphology_pb2_grpc.py | morphology_pb2_grpc.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='normalization.proto',
package='zemberek.normalization',
syntax='proto3',
serialized_options=_b('\n\016zemberek.protoP\001'),
serialized_pb=_b('\n\x13normalization.proto\x12\x16zemberek.normalization\"%\n\x14NormalizationRequest\x12\r\n\x05input\x18\x01 \x01(\t\"@\n\x15NormalizationResponse\x12\x18\n\x10normalized_input\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t2\x80\x01\n\x14NormalizationService\x12h\n\tNormalize\x12,.zemberek.normalization.NormalizationRequest\x1a-.zemberek.normalization.NormalizationResponseB\x12\n\x0ezemberek.protoP\x01\x62\x06proto3')
)
_NORMALIZATIONREQUEST = _descriptor.Descriptor(
name='NormalizationRequest',
full_name='zemberek.normalization.NormalizationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.normalization.NormalizationRequest.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=84,
)
_NORMALIZATIONRESPONSE = _descriptor.Descriptor(
name='NormalizationResponse',
full_name='zemberek.normalization.NormalizationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='normalized_input', full_name='zemberek.normalization.NormalizationResponse.normalized_input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='zemberek.normalization.NormalizationResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name['NormalizationRequest'] = _NORMALIZATIONREQUEST
DESCRIPTOR.message_types_by_name['NormalizationResponse'] = _NORMALIZATIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NormalizationRequest = _reflection.GeneratedProtocolMessageType('NormalizationRequest', (_message.Message,), dict(
DESCRIPTOR = _NORMALIZATIONREQUEST,
__module__ = 'normalization_pb2'
# @@protoc_insertion_point(class_scope:zemberek.normalization.NormalizationRequest)
))
_sym_db.RegisterMessage(NormalizationRequest)
NormalizationResponse = _reflection.GeneratedProtocolMessageType('NormalizationResponse', (_message.Message,), dict(
DESCRIPTOR = _NORMALIZATIONRESPONSE,
__module__ = 'normalization_pb2'
# @@protoc_insertion_point(class_scope:zemberek.normalization.NormalizationResponse)
))
_sym_db.RegisterMessage(NormalizationResponse)
DESCRIPTOR._options = None
_NORMALIZATIONSERVICE = _descriptor.ServiceDescriptor(
name='NormalizationService',
full_name='zemberek.normalization.NormalizationService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=153,
serialized_end=281,
methods=[
_descriptor.MethodDescriptor(
name='Normalize',
full_name='zemberek.normalization.NormalizationService.Normalize',
index=0,
containing_service=None,
input_type=_NORMALIZATIONREQUEST,
output_type=_NORMALIZATIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_NORMALIZATIONSERVICE)
DESCRIPTOR.services_by_name['NormalizationService'] = _NORMALIZATIONSERVICE
# @@protoc_insertion_point(module_scope) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/normalization_pb2.py | normalization_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='preprocess.proto',
package='zemberek.preprocessor',
syntax='proto3',
serialized_options=_b('\n\016zemberek.protoP\001'),
serialized_pb=_b('\n\x10preprocess.proto\x12\x15zemberek.preprocessor\"D\n\x13TokenizationRequest\x12\r\n\x05input\x18\x01 \x01(\t\x12\x1e\n\x16includeTokenBoundaries\x18\x02 \x01(\x08\"E\n\nTokenProto\x12\r\n\x05token\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\r\n\x05start\x18\x03 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x05\"I\n\x14TokenizationResponse\x12\x31\n\x06tokens\x18\x01 \x03(\x0b\x32!.zemberek.preprocessor.TokenProto\"-\n\x19SentenceExtractionRequest\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\"/\n\x1aSentenceExtractionResponse\x12\x11\n\tsentences\x18\x01 \x03(\t2\xf4\x01\n\x14PreprocessingService\x12\x63\n\x08Tokenize\x12*.zemberek.preprocessor.TokenizationRequest\x1a+.zemberek.preprocessor.TokenizationResponse\x12w\n\x10\x45xtractSentences\x12\x30.zemberek.preprocessor.SentenceExtractionRequest\x1a\x31.zemberek.preprocessor.SentenceExtractionResponseB\x12\n\x0ezemberek.protoP\x01\x62\x06proto3')
)
_TOKENIZATIONREQUEST = _descriptor.Descriptor(
name='TokenizationRequest',
full_name='zemberek.preprocessor.TokenizationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='zemberek.preprocessor.TokenizationRequest.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='includeTokenBoundaries', full_name='zemberek.preprocessor.TokenizationRequest.includeTokenBoundaries', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=43,
serialized_end=111,
)
_TOKENPROTO = _descriptor.Descriptor(
name='TokenProto',
full_name='zemberek.preprocessor.TokenProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='zemberek.preprocessor.TokenProto.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='zemberek.preprocessor.TokenProto.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='zemberek.preprocessor.TokenProto.start', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='zemberek.preprocessor.TokenProto.end', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=182,
)
_TOKENIZATIONRESPONSE = _descriptor.Descriptor(
name='TokenizationResponse',
full_name='zemberek.preprocessor.TokenizationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tokens', full_name='zemberek.preprocessor.TokenizationResponse.tokens', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=184,
serialized_end=257,
)
_SENTENCEEXTRACTIONREQUEST = _descriptor.Descriptor(
name='SentenceExtractionRequest',
full_name='zemberek.preprocessor.SentenceExtractionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='document', full_name='zemberek.preprocessor.SentenceExtractionRequest.document', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=304,
)
_SENTENCEEXTRACTIONRESPONSE = _descriptor.Descriptor(
name='SentenceExtractionResponse',
full_name='zemberek.preprocessor.SentenceExtractionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentences', full_name='zemberek.preprocessor.SentenceExtractionResponse.sentences', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=306,
serialized_end=353,
)
_TOKENIZATIONRESPONSE.fields_by_name['tokens'].message_type = _TOKENPROTO
DESCRIPTOR.message_types_by_name['TokenizationRequest'] = _TOKENIZATIONREQUEST
DESCRIPTOR.message_types_by_name['TokenProto'] = _TOKENPROTO
DESCRIPTOR.message_types_by_name['TokenizationResponse'] = _TOKENIZATIONRESPONSE
DESCRIPTOR.message_types_by_name['SentenceExtractionRequest'] = _SENTENCEEXTRACTIONREQUEST
DESCRIPTOR.message_types_by_name['SentenceExtractionResponse'] = _SENTENCEEXTRACTIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TokenizationRequest = _reflection.GeneratedProtocolMessageType('TokenizationRequest', (_message.Message,), dict(
DESCRIPTOR = _TOKENIZATIONREQUEST,
__module__ = 'preprocess_pb2'
# @@protoc_insertion_point(class_scope:zemberek.preprocessor.TokenizationRequest)
))
_sym_db.RegisterMessage(TokenizationRequest)
TokenProto = _reflection.GeneratedProtocolMessageType('TokenProto', (_message.Message,), dict(
DESCRIPTOR = _TOKENPROTO,
__module__ = 'preprocess_pb2'
# @@protoc_insertion_point(class_scope:zemberek.preprocessor.TokenProto)
))
_sym_db.RegisterMessage(TokenProto)
TokenizationResponse = _reflection.GeneratedProtocolMessageType('TokenizationResponse', (_message.Message,), dict(
DESCRIPTOR = _TOKENIZATIONRESPONSE,
__module__ = 'preprocess_pb2'
# @@protoc_insertion_point(class_scope:zemberek.preprocessor.TokenizationResponse)
))
_sym_db.RegisterMessage(TokenizationResponse)
SentenceExtractionRequest = _reflection.GeneratedProtocolMessageType('SentenceExtractionRequest', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEEXTRACTIONREQUEST,
__module__ = 'preprocess_pb2'
# @@protoc_insertion_point(class_scope:zemberek.preprocessor.SentenceExtractionRequest)
))
_sym_db.RegisterMessage(SentenceExtractionRequest)
SentenceExtractionResponse = _reflection.GeneratedProtocolMessageType('SentenceExtractionResponse', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEEXTRACTIONRESPONSE,
__module__ = 'preprocess_pb2'
# @@protoc_insertion_point(class_scope:zemberek.preprocessor.SentenceExtractionResponse)
))
_sym_db.RegisterMessage(SentenceExtractionResponse)
DESCRIPTOR._options = None
_PREPROCESSINGSERVICE = _descriptor.ServiceDescriptor(
name='PreprocessingService',
full_name='zemberek.preprocessor.PreprocessingService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=356,
serialized_end=600,
methods=[
_descriptor.MethodDescriptor(
name='Tokenize',
full_name='zemberek.preprocessor.PreprocessingService.Tokenize',
index=0,
containing_service=None,
input_type=_TOKENIZATIONREQUEST,
output_type=_TOKENIZATIONRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ExtractSentences',
full_name='zemberek.preprocessor.PreprocessingService.ExtractSentences',
index=1,
containing_service=None,
input_type=_SENTENCEEXTRACTIONREQUEST,
output_type=_SENTENCEEXTRACTIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PREPROCESSINGSERVICE)
DESCRIPTOR.services_by_name['PreprocessingService'] = _PREPROCESSINGSERVICE
# @@protoc_insertion_point(module_scope) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/preprocess_pb2.py | preprocess_pb2.py |
import grpc
import zemberek_grpc.preprocess_pb2 as preprocess__pb2
class PreprocessingServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Tokenize = channel.unary_unary(
'/zemberek.preprocessor.PreprocessingService/Tokenize',
request_serializer=preprocess__pb2.TokenizationRequest.SerializeToString,
response_deserializer=preprocess__pb2.TokenizationResponse.FromString,
)
self.ExtractSentences = channel.unary_unary(
'/zemberek.preprocessor.PreprocessingService/ExtractSentences',
request_serializer=preprocess__pb2.SentenceExtractionRequest.SerializeToString,
response_deserializer=preprocess__pb2.SentenceExtractionResponse.FromString,
)
class PreprocessingServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def Tokenize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExtractSentences(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PreprocessingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Tokenize': grpc.unary_unary_rpc_method_handler(
servicer.Tokenize,
request_deserializer=preprocess__pb2.TokenizationRequest.FromString,
response_serializer=preprocess__pb2.TokenizationResponse.SerializeToString,
),
'ExtractSentences': grpc.unary_unary_rpc_method_handler(
servicer.ExtractSentences,
request_deserializer=preprocess__pb2.SentenceExtractionRequest.FromString,
response_serializer=preprocess__pb2.SentenceExtractionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zemberek.preprocessor.PreprocessingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/preprocess_pb2_grpc.py | preprocess_pb2_grpc.py |
import grpc
import zemberek_grpc.language_id_pb2 as language__id__pb2
class LanguageIdServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Detect = channel.unary_unary(
'/zemberek.langid.LanguageIdService/Detect',
request_serializer=language__id__pb2.LanguageIdRequest.SerializeToString,
response_deserializer=language__id__pb2.LanguageIdResponse.FromString,
)
self.DetectFast = channel.unary_unary(
'/zemberek.langid.LanguageIdService/DetectFast',
request_serializer=language__id__pb2.LanguageIdRequest.SerializeToString,
response_deserializer=language__id__pb2.LanguageIdResponse.FromString,
)
class LanguageIdServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def Detect(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DetectFast(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LanguageIdServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Detect': grpc.unary_unary_rpc_method_handler(
servicer.Detect,
request_deserializer=language__id__pb2.LanguageIdRequest.FromString,
response_serializer=language__id__pb2.LanguageIdResponse.SerializeToString,
),
'DetectFast': grpc.unary_unary_rpc_method_handler(
servicer.DetectFast,
request_deserializer=language__id__pb2.LanguageIdRequest.FromString,
response_serializer=language__id__pb2.LanguageIdResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zemberek.langid.LanguageIdService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | zemberek-grpc | /zemberek_grpc-0.16.1.tar.gz/zemberek_grpc-0.16.1/zemberek_grpc/language_id_pb2_grpc.py | language_id_pb2_grpc.py |
# ZEMBEREK-PYTHON
Python implementation of Natural Language Processing library
for Turkish, [zemberek-nlp](https://github.com/ahmetaa/zemberek-nlp). It is based on
zemberek 0.17.1 and is completely written in Python meaning there is no need to setup
a Java development environment to run it.
*Source Code*
https://github.com/Loodos/zemberek-python
**Dependencies**
* antlr4-python3-runtime==4.8
* numpy>=1.19.0
## Supported Modules
Currently, following modules are supported.
* Core (Partially)
* TurkishMorphology (Partially)
* Single Word Analysis
* Diacritics Ignored Analysis
* Word Generation
* Sentence Analysis
* Ambiguity Resolution
* Tokenization
* Sentence Boundary Detection
* Tokenization
* Normalization (Partially)
* Spelling Suggestion
* Noisy Text Normalization
## Installation
You can install the package with pip
pip install zemberek-python
## Examples
Example usages can be found in [examples.py](zemberek/examples.py)
## Notes
There are some minor changes in codes where original contains some Java specific
functionality and data structures. We used Python
equivalents as much as we could but sometimes we needed to change them. And it
affects the performance and accuracy a bit.
In [MultiLevelMphf](zemberek/core/hash/multi_level_mphf.py) class, in the original Java
implementation, there are some integer multiplication operations which I
tried to reimplement using vanilla Python 'int', but the results were not the
same. Then I tried it with numpy.int32 and numpy.float32, since default java
int and float types are 4 byte. The results were the same with Java, however, oftenly
these operations produced RuntimeWarning as the multiplication caused overflow. In Java
there were no overflow warnings whatsoever. I could not find a reasonable explanation to
this situation, nor I could find a better way to implement it. So I suppressed overflow warnings
for MultiLevelMphf. Therefore, please be aware that, this is not a healthy behaviour, and you should
be careful using this code.
## Credits
This project is Python port of [zemberek-nlp](https://github.com/ahmetaa/zemberek-nlp).
| zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/README.md | README.md |
import time
import logging
from zemberek import (
TurkishSpellChecker,
TurkishSentenceNormalizer,
TurkishSentenceExtractor,
TurkishMorphology,
TurkishTokenizer
)
logger = logging.getLogger(__name__)
examples = ["Yrn okua gidicem",
"Tmm, yarin havuza giricem ve aksama kadar yaticam :)",
"ah aynen ya annemde fark ettı siz evinizden cıkmayın diyo",
"gercek mı bu? Yuh! Artık unutulması bile beklenmiyo",
"Hayır hayat telaşm olmasa alacam buraları gökdelen dikicem.",
"yok hocam kesınlıkle oyle birşey yok",
"herseyi soyle hayatında olmaması gerek bence boyle ınsanların falan baskı yapıyosa",
"email adresim [email protected]",
"Kredi başvrusu yapmk istiyrum.",
"Bankanizin hesp blgilerini ogrenmek istyorum."]
morphology = TurkishMorphology.create_with_defaults()
# SENTENCE NORMALIZATION
start = time.time()
normalizer = TurkishSentenceNormalizer(morphology)
logger.info(f"Normalization instance created in: {time.time() - start} s")
start = time.time()
for example in examples:
print(example)
print(normalizer.normalize(example), "\n")
logger.info(f"Sentences normalized in: {time.time() - start} s")
start = time.time()
sc = TurkishSpellChecker(morphology)
logger.info(f"Spell checker instance created in: {time.time() - start} s")
# SPELLING SUGGESTION
li = ["okuyablirim", "tartısıyor", "Ankar'ada", "knlıca", "yapablrim", "kıredi", "geldm", "geliyom", "aldm", "asln"]
start = time.time()
for word in li:
print(word + " = " + ' '.join(sc.suggest_for_word(word)))
logger.info(f"Spells checked in: {time.time() - start} s")
# SENTENCE BOUNDARY DETECTION
start = time.time()
extractor = TurkishSentenceExtractor()
print("Extractor instance created in: ", time.time() - start, " s")
text = "İnsanoğlu aslında ne para ne sevgi ne kariyer ne şöhret ne de çevre ile sonsuza dek mutlu olabilecek bir " \
"yapıya sahiptir. Dış kaynaklardan gelebilecek bu mutluluklar sadece belirli bir zaman için insanı mutlu " \
"kılıyor. Kişi bu kaynakları elde ettiği zaman belirli bir dönem için kendini iyi hissediyor, ancak alışma " \
"dönemine girdiği andan itibaren bu iyilik hali hızla tükeniyor. Mutlu olma sanatının özü bu değildir. Gerçek " \
"mutluluk, kişinin her türlü olaya ve duruma karşı kendini pozitif tutarak mutlu hissedebilmesi halidir. Bu " \
"davranış şeklini edinen insan, zor günlerde güçlü, mutlu günlerde zevk alan biri olur ve mutluluğu kalıcı " \
"kılar. "
start = time.time()
sentences = extractor.from_paragraph(text)
print(f"Sentences separated in {time.time() - start}s")
for sentence in sentences:
print(sentence)
print("\n")
# SINGLE WORD MORPHOLOGICAL ANALYSIS
results = morphology.analyze("kalemin")
for result in results:
print(result)
print("\n")
# SENTENCE ANALYSIS AND DISAMBIGUATION
sentence = "Yarın kar yağacak."
analysis = morphology.analyze_sentence(sentence)
after = morphology.disambiguate(sentence, analysis)
print("\nBefore disambiguation")
for e in analysis:
print(f"Word = {e.inp}")
for s in e:
print(s.format_string())
print("\nAfter disambiguation")
for s in after.best_analysis():
print(s.format_string())
# TOKENIZATION
tokenizer = TurkishTokenizer.DEFAULT
tokens = tokenizer.tokenize("Saat 12:00.")
for token in tokens:
print('Content = ', token.content)
print('Type = ', token.type_.name)
print('Start = ', token.start)
print('Stop = ', token.end, '\n') | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/examples.py | examples.py |
from typing import Dict, List, Tuple
class Node:
TYPE_EMPTY = 0
TYPE_WORD = 1
TYPE_ENDING = 2
TYPE_GRAPH_ROOT = 3
def __init__(self, index: int, char: str, type_: int, word: str = None):
self.index = index
self.char = char
self.type_ = type_
self.word = word
self.epsilon_nodes = None
self.nodes: Dict[str, Node] = {}
def __str__(self):
sb = "[" + self.char
characters = [c for c in self.nodes.keys()]
characters.sort()
if len(self.nodes) > 0:
sb += " children=" + ', '.join(characters)
if self.word:
sb += " word=" + self.word
sb += "]"
return sb
def __hash__(self):
return self.index
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, Node):
return self.index == other.index
else:
return False
def has_epsilon_connection(self) -> bool:
return self.epsilon_nodes is not None
def has_child(self, c: str) -> bool:
if self.has_immediate_child(c):
return True
elif self.epsilon_nodes is None:
return False
else:
for node in self.epsilon_nodes:
if node.has_immediate_child(c):
return True
return False
def has_immediate_child(self, c: str) -> bool:
return c in self.nodes.keys()
def get_immediate_child(self, c: str) -> 'Node':
return self.nodes.get(c)
def get_immediate_child_nodes(self) -> Tuple['Node']:
return tuple(self.nodes.values())
def get_immediate_child_node_iterable(self) -> Tuple['Node']:
return tuple(self.nodes.values())
def get_all_child_nodes(self) -> Tuple['Node', ...]:
if self.epsilon_nodes is None:
return tuple(self.nodes.values())
else:
node_list = list(self.nodes.values())
for empty_node in self.epsilon_nodes:
for n in empty_node.nodes.values():
node_list.append(n)
return tuple(node_list)
def get_child_list(self, c: str = None, char_array: Tuple[str, ...] = None) -> Tuple['Node', ...]:
children = []
if c:
self.add_if_child_exists(c, children)
if self.epsilon_nodes:
for empty_node in self.epsilon_nodes:
empty_node.add_if_child_exists(c, children)
else: # it means char_array is not None
for c_ in char_array:
self.add_if_child_exists(c_, children)
if self.epsilon_nodes:
for empty_node in self.epsilon_nodes:
empty_node.add_if_child_exists(c_, children)
return tuple(children)
def connect_epsilon(self, node: 'Node') -> bool:
if self.epsilon_nodes is None:
self.epsilon_nodes = [node]
else:
for n in self.epsilon_nodes:
if n == node:
return False
self.epsilon_nodes.append(node)
return True
def add_if_child_exists(self, c: str, node_list: List['Node']):
child = self.nodes.get(c)
if child:
node_list.append(child)
def add_child(self, index: int, c: str, type_: int, word: str = None) -> 'Node':
node = self.nodes.get(c)
if word:
if node is None:
node = Node(index, c, type_, word)
self.nodes[c] = node
else:
node.word = word
node.type_ = type_
else:
if node is None:
node = Node(index, c, type_)
self.nodes[c] = node
return node | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/normalization/node.py | node.py |
from __future__ import annotations
import struct
from enum import Enum, auto
from typing import Dict, Set, Tuple, TYPE_CHECKING
from abc import ABC
if TYPE_CHECKING:
from zemberek.normalization.node import Node
from zemberek.normalization.character_graph import CharacterGraph
from zemberek.core.turkish import TurkishAlphabet
class CharacterGraphDecoder:
DIACRITICS_IGNORING_MATCHER: 'CharacterGraphDecoder.DiacriticsIgnoringMatcher'
def __init__(self, graph: CharacterGraph):
self.graph = graph
self.max_penalty = 1.0
self.check_near_key_substitution = False
def get_suggestions(self, input_: str, matcher: 'CharacterGraphDecoder.CharMatcher') -> Tuple[str]:
return tuple(CharacterGraphDecoder.Decoder(matcher, self).decode(input_).keys())
class Decoder:
def __init__(self, matcher: 'CharacterGraphDecoder.CharMatcher', outer: 'CharacterGraphDecoder'):
self.finished: Dict[str, float] = {}
self.matcher = matcher
self.outer = outer
def decode(self, inp: str) -> Dict[str, float]:
hyp = CharacterGraphDecoder.Hypothesis(None, self.outer.graph.root, penalty=0.0,
operation=CharacterGraphDecoder.Operation.N_A, word=None,
ending=None)
next_: Set['CharacterGraphDecoder.Hypothesis'] = self.expand(hyp, inp)
while True:
new_hyps: Set['CharacterGraphDecoder.Hypothesis'] = set()
for hypothesis in next_:
expand_: Set['CharacterGraphDecoder.Hypothesis'] = self.expand(hypothesis, inp)
new_hyps |= expand_ # updating new_hyps set with new elements of expand
if len(new_hyps) == 0:
return self.finished
next_ = new_hyps
def expand(self, hypothesis: 'CharacterGraphDecoder.Hypothesis', inp: str) -> \
Set['CharacterGraphDecoder.Hypothesis']:
new_hypotheses: Set['CharacterGraphDecoder.Hypothesis'] = set()
next_index = hypothesis.char_index + 1
next_char = inp[next_index] if next_index < len(inp) else chr(0)
if next_index < len(inp):
cc = None if self.matcher is None else self.matcher.matches(next_char)
if hypothesis.node.has_epsilon_connection():
child_list: Tuple[Node] = hypothesis.node.get_child_list(c=next_char) if cc is None else \
hypothesis.node.get_child_list(char_array=cc)
for child in child_list:
h = hypothesis.get_new_move_forward(child, 0.0, CharacterGraphDecoder.Operation.NO_ERROR)
h.set_word(child)
new_hypotheses.add(h)
if next_index >= len(inp) - 1 and h.node.word:
self.add_hypothesis(h)
elif cc is None:
child: Node = hypothesis.node.get_immediate_child(next_char)
if child:
h = hypothesis.get_new_move_forward(child, 0.0, CharacterGraphDecoder.Operation.NO_ERROR)
h.set_word(child)
new_hypotheses.add(h)
if next_index >= len(inp) - 1 and h.node.word:
self.add_hypothesis(h)
else:
for c in cc:
child: Node = hypothesis.node.get_immediate_child(c)
if child:
h = hypothesis.get_new_move_forward(child, 0.0, CharacterGraphDecoder.Operation.NO_ERROR)
h.set_word(child)
new_hypotheses.add(h)
if next_index >= len(inp) - 1 and h.node.word:
self.add_hypothesis(h)
elif hypothesis.node.word:
self.add_hypothesis(hypothesis)
if hypothesis.penalty >= self.outer.max_penalty:
return new_hypotheses
else:
all_child_notes = hypothesis.node.get_all_child_nodes() if hypothesis.node.has_epsilon_connection() \
else hypothesis.node.get_immediate_child_node_iterable()
if next_index < len(inp):
for child in all_child_notes:
# penalty = 0.0
if self.outer.check_near_key_substitution:
# IMPLEMENT IF NEEDED
raise NotImplementedError("Not implemented, implement if needed")
else:
penalty = 1.0
if penalty > 0.0 and hypothesis.penalty + penalty <= self.outer.max_penalty:
h = hypothesis.get_new_move_forward(child, penalty,
CharacterGraphDecoder.Operation.SUBSTITUTION)
h.set_word(child)
if next_index == len(inp) - 1:
if h.node.word:
self.add_hypothesis(h)
else:
new_hypotheses.add(h)
if hypothesis.penalty + 1.0 > self.outer.max_penalty:
return new_hypotheses
else:
new_hypotheses.add(hypothesis.get_new_move_forward(hypothesis.node, 1.0,
CharacterGraphDecoder.Operation.DELETION))
for child in all_child_notes:
h = hypothesis.get_new(child, 1.0, CharacterGraphDecoder.Operation.INSERTION)
h.set_word(child)
new_hypotheses.add(h)
if len(inp) > 2 and next_index < len(inp) - 1:
transpose: str = inp[next_index + 1]
if self.matcher:
tt: Tuple[str] = self.matcher.matches(transpose)
cc: Tuple[str] = self.matcher.matches(next_char)
for t in tt:
next_nodes: Tuple[Node] = hypothesis.node.get_child_list(c=t)
for next_node in next_nodes:
for c in cc:
if hypothesis.node.has_child(t) and next_node.has_child(c):
for n in next_node.get_child_list(c=c):
h = hypothesis.get_new(n,
1.0,
CharacterGraphDecoder.Operation.TRANSPOSITION,
index=next_index + 1)
h.set_word(n)
if next_index == len(inp) - 1:
if h.node.word:
self.add_hypothesis(h)
else:
new_hypotheses.add(h)
else:
next_nodes: Tuple[Node] = hypothesis.node.get_child_list(c=transpose)
for next_node in next_nodes:
if hypothesis.node.has_child(transpose) and next_node.has_child(next_char):
for n in next_node.get_child_list(c=next_char):
h = hypothesis.get_new(n, 1.0, CharacterGraphDecoder.Operation.TRANSPOSITION,
next_index + 1)
h.set_word(n)
if next_index == len(inp) - 1:
if h.node.word:
self.add_hypothesis(h)
else:
new_hypotheses.add(h)
return new_hypotheses
def add_hypothesis(self, hypothesis: 'CharacterGraphDecoder.Hypothesis'):
hyp_word = hypothesis.get_content()
if hyp_word not in self.finished.keys():
self.finished[hyp_word] = hypothesis.penalty
elif self.finished[hyp_word] > hypothesis.penalty:
self.finished[hyp_word] = hypothesis.penalty
class Hypothesis:
def __init__(self, previous, node: Node, penalty: float,
operation: 'CharacterGraphDecoder.Operation', word, ending, char_index: int = -1):
self.previous = previous # previous: Hypothesis, word: str, ending: str
self.node = node
self.penalty = penalty
self.operation = operation
self.word = word
self.ending = ending
self.char_index = char_index
@staticmethod
def float_to_int_bits(f: float) -> int:
s = struct.pack('>f', f)
return struct.unpack('>l', s)[0]
def __eq__(self, other):
if self is other:
return True
elif other is not None and self.__class__ == other.__class__:
if self.char_index != other.char_index:
return False
elif self.penalty != other.penalty:
return False
elif self.node != other.node:
return False
else:
return False if self.word != other.word else (self.ending == other.ending)
else:
return False
def __hash__(self):
result = self.char_index
result = 31 * result + hash(self.node)
result = 31 * result + (self.float_to_int_bits(self.penalty) if self.penalty != 0.0 else 0)
result = 31 * result + (hash(self.word) if self.word is not None else 0)
result = 31 * result + (hash(self.ending) if self.ending is not None else 0)
return result
def get_new(self, node: Node, penalty_to_add: float, op: 'CharacterGraphDecoder.Operation',
index: int = None) -> 'CharacterGraphDecoder.Hypothesis':
char_index = self.char_index if index is None else index
return CharacterGraphDecoder.Hypothesis(self, node, self.penalty + penalty_to_add, op, self.word,
self.ending, char_index=char_index)
def get_new_move_forward(self, node: Node, penalty_to_add: float, op: 'CharacterGraphDecoder.Operation') -> \
'CharacterGraphDecoder.Hypothesis':
return CharacterGraphDecoder.Hypothesis(self, node, self.penalty + penalty_to_add, op, self.word,
self.ending, char_index=self.char_index + 1)
def get_content(self):
w = "" if self.word is None else self.word
e = "" if self.ending is None else self.ending
return w + e
def set_word(self, node: Node):
if node.word:
if node.type_ == 1:
self.word = node.word
elif node.type_ == 2:
self.ending = node.word
class CharMatcher(ABC):
def matches(self, var1: str) -> Tuple[str, ...]:
raise NotImplementedError
class DiacriticsIgnoringMatcher(CharMatcher):
map_: Dict[int, Tuple[str, ...]] = {}
def __init__(self):
all_letters = TurkishAlphabet.INSTANCE.all_letters + "+.,'-"
for c in all_letters:
self.map_[ord(c)] = (c,)
self.map_[99] = (u'c', u'ç')
self.map_[103] = (u'g', u'ğ')
self.map_[305] = (u'ı', u'i')
self.map_[105] = (u'ı', u'i')
self.map_[111] = (u'o', u'ö')
self.map_[115] = (u's', u'ş')
self.map_[117] = (u'u', u'ü')
self.map_[97] = (u'a', u'â')
self.map_[105] = (u'i', u'î')
self.map_[117] = (u'u', u'û')
self.map_[67] = (u'C', u'Ç')
self.map_[71] = (u'G', u'Ğ')
self.map_[73] = (u'I', u'İ')
self.map_[304] = (u'İ', u'I')
self.map_[79] = (u'O', u'Ö')
self.map_[214] = (u'Ö', u'Ş')
self.map_[85] = (u'U', u'Ü')
self.map_[65] = (u'A', u'Â')
self.map_[304] = (u'İ', u'Î')
self.map_[85] = (u'U', u'Û')
def matches(self, c: str) -> Tuple[str, ...]:
res = self.map_.get(ord(c))
return (c,) if res is None else res
class Operation(Enum):
NO_ERROR = auto()
INSERTION = auto()
DELETION = auto()
SUBSTITUTION = auto()
TRANSPOSITION = auto()
N_A = auto()
CharacterGraphDecoder.DIACRITICS_IGNORING_MATCHER = CharacterGraphDecoder.DiacriticsIgnoringMatcher() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/normalization/character_graph_decoder.py | character_graph_decoder.py |
from __future__ import annotations
import os
import re
import logging
from pkg_resources import resource_filename
from operator import itemgetter
from typing import List, Set, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology import TurkishMorphology
from zemberek.core.turkish import TurkishAlphabet, Turkish
from zemberek.morphology.analysis.word_analysis_surface_formatter import WordAnalysisSurfaceFormatter
from zemberek.lm import SmoothLM
from zemberek.normalization.stem_ending_graph import StemEndingGraph
from zemberek.normalization.character_graph_decoder import CharacterGraphDecoder
logger = logging.getLogger(__name__)
class TurkishSpellChecker:
formatter = WordAnalysisSurfaceFormatter()
def __init__(self, morphology: TurkishMorphology, matcher: CharacterGraphDecoder.CharMatcher = None,
decoder: CharacterGraphDecoder = None):
self.morphology = morphology
if not decoder:
graph = StemEndingGraph(morphology)
self.decoder = CharacterGraphDecoder(graph.stem_graph)
self.unigram_model: SmoothLM = SmoothLM.builder(
resource=resource_filename("zemberek", os.path.join("resources", "lm-unigram.slm"))).build()
self.char_matcher = matcher
else:
self.decoder = decoder
self.char_matcher = matcher
def suggest_for_word(self, word: str, lm: SmoothLM = None) -> Tuple[str]:
if not lm:
lm = self.unigram_model
unranked: Tuple[str] = self.get_unranked_suggestions(word)
return self.rank_with_unigram_probability(unranked, lm)
def suggest_for_word_for_normalization(self, word: str, left_context: str, right_context: str, lm: SmoothLM) -> \
Tuple[str]:
unranked: Tuple[str] = self.get_unranked_suggestions(word)
if lm is None:
logger.warning("No language model provided. Returning unraked results.")
return unranked
if lm.order < 2:
logger.warning("Language model order is 1. For context ranking it should be at least 2. "
"Unigram ranking will be applied.")
return self.suggest_for_word(word, lm)
vocabulary = lm.vocabulary
results: List[Tuple[str, float]] = []
for string in unranked:
if left_context is None:
left_context = vocabulary.sentence_start
else:
left_context = self.normalize_for_lm(left_context)
if right_context is None:
right_context = vocabulary.sentence_end
else:
right_context = self.normalize_for_lm(right_context)
w = self.normalize_for_lm(string)
word_index = vocabulary.index_of(w)
left_index = vocabulary.index_of(left_context)
right_index = vocabulary.index_of(right_context)
score: float
if lm.order == 2:
score = lm.get_probability((left_index, word_index)) + lm.get_probability((word_index, right_index))
else:
score = lm.get_probability((left_index, word_index, right_index))
results.append((string, score))
results.sort(key=itemgetter(1), reverse=True)
return tuple(item for item, _ in results)
def get_unranked_suggestions(self, word: str) -> Tuple[str]:
normalized = TurkishAlphabet.INSTANCE.normalize(re.sub("['’]", "", word))
strings: Tuple[str] = self.decoder.get_suggestions(normalized, self.char_matcher)
case_type = self.formatter.guess_case(word)
if case_type == WordAnalysisSurfaceFormatter.CaseType.MIXED_CASE or case_type == \
WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE:
case_type = WordAnalysisSurfaceFormatter.CaseType.DEFAULT_CASE
results: Set[str] = set()
for string in strings:
analyses = self.morphology.analyze(string)
for analysis in analyses:
if analysis.is_unknown():
continue
formatted = self.formatter.format_to_case(analysis, case_type, self.get_apostrophe(word))
results.add(formatted)
return tuple(results)
def rank_with_unigram_probability(self, strings: Tuple[str], lm: SmoothLM) -> Tuple[str]:
if lm is None:
logger.warning("No language model provided, returning unranked results.")
return strings
else:
results: List[Tuple[str, float]] = []
for string in strings:
w = self.normalize_for_lm(string)
word_index = lm.vocabulary.index_of(w)
results.append((w, lm.get_unigram_probability(word_index)))
results.sort(key=itemgetter(1), reverse=True)
return tuple(word for word, _ in results)
@staticmethod
def normalize_for_lm(s: str) -> str:
return Turkish.capitalize(s) if s.find(chr(39)) > 0 else s.translate(TurkishAlphabet.INSTANCE.lower_map).lower()
@staticmethod
def get_apostrophe(inp: str) -> str:
if inp.find(chr(8217)) > 0:
return "’"
else:
return "'" if inp.find(chr(39)) > 0 else None | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/normalization/turkish_spell_checker.py | turkish_spell_checker.py |
import math
from pkg_resources import resource_filename
from typing import List, Tuple, Dict, FrozenSet, Set, Union, OrderedDict as ODict
import os
import numpy as np
from collections import OrderedDict
from zemberek.core.turkish import TurkishAlphabet, SecondaryPos
from zemberek.lm import SmoothLM
from zemberek.morphology import TurkishMorphology
from zemberek.morphology.analysis.word_analysis import WordAnalysis
from zemberek.morphology.analysis.informal_analysis_converter import InformalAnalysisConverter
from zemberek.morphology.generator import WordGenerator
from zemberek.tokenization.turkish_tokenizer import TurkishTokenizer
from zemberek.tokenization.token import Token
from zemberek.normalization.stem_ending_graph import StemEndingGraph
from zemberek.normalization.character_graph_decoder import CharacterGraphDecoder
from zemberek.normalization.turkish_spell_checker import TurkishSpellChecker
from zemberek.normalization.deasciifier.deasciifier import Deasciifier
def load_replacements() -> Dict[str, str]:
with open(
resource_filename("zemberek", os.path.join("resources", "normalization", "multi-word-replacements.txt")),
"r",
encoding="utf-8"
) as f:
replacements: Dict[str, str] = {}
for line in f:
tokens = line.replace('\n', "").split("=")
replacements[tokens[0].strip()] = tokens[1].strip()
return replacements
def load_no_split() -> FrozenSet[str]:
with open(
resource_filename("zemberek", os.path.join("resources", "normalization", "no-split.txt")),
"r",
encoding="utf-8"
) as f:
s = set()
for line in f:
if len(line.replace('\n', "").strip()) > 0:
s.add(line.replace('\n', "").strip())
return frozenset(s)
def load_common_split() -> Dict[str, str]:
common_splits: Dict[str, str] = {}
with open(
resource_filename("zemberek", os.path.join("resources", "normalization", "split.txt")),
"r",
encoding="utf-8"
) as f:
for line in f:
tokens = line.replace('\n', "").split('-')
common_splits[tokens[0].strip()] = tokens[1].strip()
return common_splits
def load_multimap(resource: str) -> ODict[str, Tuple[str]]:
with open(resource, "r", encoding="utf-8") as f:
lines: List[str] = f.read().split('\n')
multimap: OrderedDict[str, Tuple[str, ...]] = OrderedDict()
for i, line in enumerate(lines):
if len(line.strip()) == 0:
continue
index = line.find("=")
if index < 0:
raise Exception(f"Line needs to have `=` symbol. But it is: {i} - {line}")
key, value = line[0:index].strip(), line[index + 1:].strip()
if value.find(',') >= 0:
if key in multimap.keys():
multimap[key] = tuple(value.split(','))
else:
if key in multimap.keys():
multimap[key] = multimap[key] + (value,)
else:
multimap[key] = (value,)
return multimap
class TurkishSentenceNormalizer:
START: 'TurkishSentenceNormalizer.Candidate'
END: 'TurkishSentenceNormalizer.Candidate'
END_CANDIDATES: 'TurkishSentenceNormalizer.Candidates'
def __init__(self, morphology: TurkishMorphology):
self.morphology = morphology
self.analysis_converter: InformalAnalysisConverter = InformalAnalysisConverter(morphology.word_generator)
self.lm: SmoothLM = SmoothLM.builder(resource_filename("zemberek", os.path.join("resources", "lm.2gram.slm"))). \
log_base(np.e).build()
graph = StemEndingGraph(morphology)
decoder = CharacterGraphDecoder(graph.stem_graph)
self.spell_checker = TurkishSpellChecker(morphology, decoder=decoder,
matcher=CharacterGraphDecoder.DIACRITICS_IGNORING_MATCHER)
self.replacements: Dict[str, str] = load_replacements()
self.no_split_words: FrozenSet[str] = load_no_split()
self.common_splits = load_common_split()
with open(
resource_filename("zemberek", os.path.join("resources", "normalization", "question-suffixes.txt")),
"r",
encoding="utf-8"
) as f:
lines = f.read().split('\n')
del f
self.common_connected_suffixes: FrozenSet[str] = frozenset(lines)
self.always_apply_deasciifier = False
self.lookup_manual: Dict[str, Tuple[str]] = load_multimap(
resource_filename("zemberek", os.path.join("resources", "normalization", "candidates-manual.txt")))
self.lookup_from_graph: Dict[str, Tuple[str]] = load_multimap(
resource_filename("zemberek", os.path.join("resources", "normalization", "lookup-from-graph.txt"))
)
self.lookup_from_ascii: Dict[str, Tuple[str]] = load_multimap(
resource_filename("zemberek", os.path.join("resources", "normalization", "ascii-map.txt")))
for s in self.lookup_manual.keys():
try:
self.lookup_from_graph.pop(s)
except KeyError:
pass
self.informal_ascii_tolerant_morphology = TurkishMorphology.builder(morphology.lexicon) \
.use_informal_analysis().ignore_diacritics_in_analysis_().build()
def normalize(self, sentence: str) -> str:
processed = self.pre_process(sentence)
tokens: Tuple[Token] = tuple(TurkishTokenizer.DEFAULT.tokenize(processed))
candidates_list: List['TurkishSentenceNormalizer.Candidates'] = []
candidates: List[str] = []
candidates_set: Set[str] = set()
for i, current_token in enumerate(tokens):
current = current_token.content
next_ = None if i == len(tokens) - 1 else tokens[i + 1].content
previous = None if i == 0 else tokens[i - 1].content
candidates.clear()
candidates_set.clear()
for c in self.lookup_manual.get(current, ()) + self.lookup_from_graph.get(current, ()) + \
self.lookup_from_ascii.get(current, ()):
if c not in candidates_set:
candidates.append(c)
candidates_set.add(c)
# candidates.update(self.lookup_manual.get(current, ()))
# candidates.update(self.lookup_from_graph.get(current, ()))
# candidates.update(self.lookup_from_ascii.get(current, ()))
analyses: WordAnalysis = self.informal_ascii_tolerant_morphology.analyze(current)
for analysis in analyses:
if analysis.contains_informal_morpheme():
result: Union[WordGenerator.Result, TurkishSentenceNormalizer.Candidates]
result = self.analysis_converter.convert(current, analysis)
if result is not None and result.surface not in candidates_set:
candidates.append(result.surface)
candidates_set.add(result.surface)
else:
results: Tuple[WordGenerator.Result] = self.morphology.word_generator.generate(
item=analysis.item, morphemes=analysis.get_morphemes()
)
for result in results:
if result.surface not in candidates_set:
candidates_set.add(result.surface)
candidates.append(result.surface)
if len(analyses.analysis_results) == 0 and len(current) > 3:
spell_candidates = self.spell_checker.suggest_for_word_for_normalization(
current, previous, next_, self.lm
)
if len(spell_candidates) > 3:
spell_candidates = spell_candidates[:3]
candidates.extend([c for c in spell_candidates if c not in candidates_set])
candidates_set.update(spell_candidates)
if len(candidates) == 0 or self.morphology.analyze(current).is_correct():
if current not in candidates_set:
candidates_set.add(current)
candidates.append(current)
result = TurkishSentenceNormalizer.Candidates(current_token.content,
tuple(TurkishSentenceNormalizer.Candidate(s) for
s in candidates))
candidates_list.append(result)
return ' '.join(self.decode(candidates_list))
def decode(self, candidates_list: List['TurkishSentenceNormalizer.Candidates']) -> Tuple[str]:
current: List['TurkishSentenceNormalizer.Hypothesis'] = []
next_: List['TurkishSentenceNormalizer.Hypothesis'] = []
candidates_list.append(TurkishSentenceNormalizer.END_CANDIDATES)
initial = TurkishSentenceNormalizer.Hypothesis()
lm_order = self.lm.order
initial.history = [TurkishSentenceNormalizer.START] * (lm_order - 1)
initial.current = TurkishSentenceNormalizer.START
initial.score = np.float32(0.)
current.append(initial)
for candidates in candidates_list:
for h in current:
for c in candidates.candidates:
new_hyp = TurkishSentenceNormalizer.Hypothesis()
hist = [None] * (lm_order - 1)
if lm_order > 2:
hist = h.history[1: lm_order]
hist[-1] = h.current
new_hyp.current = c
new_hyp.history = hist
new_hyp.previous = h
indexes = [0] * lm_order
for j in range(lm_order - 1):
indexes[j] = self.lm.vocabulary.index_of(hist[j].content)
indexes[-1] = self.lm.vocabulary.index_of(c.content)
score = self.lm.get_probability(tuple(indexes))
new_hyp.score = np.float32(h.score + score)
try:
idx = next_.index(new_hyp)
next_[idx] = new_hyp if new_hyp.score > next_[idx].score else next_[idx]
except ValueError:
next_.append(new_hyp)
current = next_
next_ = []
best: 'TurkishSentenceNormalizer.Hypothesis' = self.get_best(current)
seq: List[str] = []
h = best
h = h.previous
while h and h.current != TurkishSentenceNormalizer.START:
seq.append(h.current.content)
h = h.previous
return tuple(reversed(seq))
@staticmethod
def get_best(li: List['TurkishSentenceNormalizer.Hypothesis']) -> 'TurkishSentenceNormalizer.Hypothesis':
best = None
for t in li:
if t:
if not best or t.score > best.score:
best = t
return best
def pre_process(self, sentence: str) -> str:
sentence = sentence.translate(TurkishAlphabet.lower_map).lower()
tokens: Tuple[Token] = TurkishTokenizer.DEFAULT.tokenize(sentence)
s: str = self.replace_common(tokens)
tokens: Tuple[Token] = TurkishTokenizer.DEFAULT.tokenize(s)
s = self.combine_necessary_words(tokens)
tokens: Tuple[Token] = TurkishTokenizer.DEFAULT.tokenize(s)
s = self.split_necessary_words(tokens, use_look_up=False)
if self.always_apply_deasciifier or self.probably_requires_deasciifier(s):
deasciifier = Deasciifier(s)
s = deasciifier.convert_to_turkish()
tokens: Tuple[Token] = TurkishTokenizer.DEFAULT.tokenize(s)
s = self.combine_necessary_words(tokens)
tokens: Tuple[Token] = TurkishTokenizer.DEFAULT.tokenize(s)
return self.split_necessary_words(tokens, use_look_up=True)
def split_necessary_words(self, tokens: Tuple[Token], use_look_up: bool) -> str:
result: List[str] = []
for token in tokens:
text = token.content
if self.is_word(token):
result.append(self.separate_common(text, use_look_up))
else:
result.append(text)
return ' '.join(result)
def separate_common(self, inp: str, use_look_up: bool) -> str:
if inp in self.no_split_words:
return inp
if use_look_up and inp in self.common_splits:
return self.common_splits[inp]
if not self.has_regular_analysis(inp):
for i in range(len(inp)):
tail = inp[i:]
if tail in self.common_connected_suffixes:
head = inp[0:i]
if len(tail) < 3:
if not self.lm.ngram_exists(self.lm.vocabulary.to_indexes((head, tail))):
return inp
if self.has_regular_analysis(head):
return f"{head} {tail}"
else:
return inp
return inp
@staticmethod
def probably_requires_deasciifier(sentence: str) -> bool:
turkish_spec_count = 0
for c in sentence:
if c != 'ı' and c != 'I' and TurkishAlphabet.INSTANCE.is_turkish_specific(c):
turkish_spec_count += 1
ratio = turkish_spec_count * 1. / len(sentence)
return ratio < 0.1
def combine_necessary_words(self, tokens: Tuple[Token]) -> str:
result: List[str] = []
combined = False
for i in range(len(tokens) - 1):
first: Token = tokens[i]
second: Token = tokens[i + 1]
first_s = first.content
second_s = second.content
if self.is_word(first) and self.is_word(second):
if combined:
combined = False
else:
c = self.combine_common(first_s, second_s)
if len(c) > 0:
result.append(c)
combined = True
else:
result.append(first.content)
combined = False
else:
combined = False
result.append(first_s)
if not combined:
result.append(tokens[-1].content)
return ' '.join(result)
def combine_common(self, i1: str, i2: str) -> str:
combined = i1 + i2
if i2.startswith("'") or i2.startswith("bil"):
w: WordAnalysis = self.morphology.analyze(combined)
if self.has_analysis(w):
return combined
if not self.has_regular_analysis(i2):
w: WordAnalysis = self.morphology.analyze(combined)
if self.has_analysis(w):
return combined
return ""
def has_regular_analysis(self, s: str) -> bool:
a: WordAnalysis = self.morphology.analyze(s)
for s in a:
if (not s.is_unknown()) and (not s.is_runtime()) and s.item.secondary_pos != SecondaryPos.ProperNoun \
and s.item.secondary_pos != SecondaryPos.Abbreviation:
return True
return False
@staticmethod
def has_analysis(w: WordAnalysis) -> bool:
for s in w:
if (not s.is_runtime()) and (not s.is_unknown()):
return True
return False
@staticmethod
def is_word(token: Token) -> bool:
typ: Token.Type = token.type_
return typ == Token.Type.Word or typ == Token.Type.WordWithSymbol or typ == Token.Type.WordAlphanumerical \
or typ == Token.Type.UnknownWord
def replace_common(self, tokens: Tuple[Token]) -> str:
result: List[str] = []
for token in tokens:
text = token.content
result.append(self.replacements.get(text, text))
return ' '.join(result)
class Hypothesis:
def __init__(self):
self.history: Union[List['TurkishSentenceNormalizer.Candidate'], None] = None
self.current: Union['TurkishSentenceNormalizer.Candidate', None] = None
self.previous: Union['TurkishSentenceNormalizer.Hypothesis', None] = None
self.score: Union[np.float32, None] = None
def __eq__(self, other):
if self is other:
return True
if isinstance(other, TurkishSentenceNormalizer.Hypothesis):
return False if self.history != other.history else self.current == other.current
return False
def __hash__(self):
result = 0
for c in self.history:
result = 31 * result + (hash(c) if c else 0)
result = 31 * result + hash(self.current)
return result
def __str__(self):
return "Hypothesis{history=" + f"{' '.join([str(s) for s in self.history])}" + f", current={self.current}" \
f", score={self.score}" + '}'
class Candidate:
def __init__(self, content: str):
self.content = content
self.score = np.float32(1.0)
def __eq__(self, other):
if self is other:
return True
if isinstance(other, TurkishSentenceNormalizer.Candidate):
return self.content == other.content
return False
def __hash__(self):
return hash(self.content)
def __str__(self):
return "Candidate{content='" + self.content + f"', score={self.score}" + '}'
class Candidates:
def __init__(self, word: str, candidates: Tuple['TurkishSentenceNormalizer.Candidate']):
self.word = word
self.candidates = candidates
def __str__(self):
return "Candidates{word='" + self.word + "', candidates=" + ' '.join(str(self.candidates)) + '}'
TurkishSentenceNormalizer.START = TurkishSentenceNormalizer.Candidate(content="<s>")
TurkishSentenceNormalizer.END = TurkishSentenceNormalizer.Candidate(content="</s>")
TurkishSentenceNormalizer.END_CANDIDATES = TurkishSentenceNormalizer.Candidates(word="</s>",
candidates=(
TurkishSentenceNormalizer.END,
)) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/normalization/turkish_sentence_normalizer.py | turkish_sentence_normalizer.py |
import pickle
import os
from pkg_resources import resource_filename
from typing import Dict
class Deasciifier:
turkish_context_size = 10
with open(
resource_filename("zemberek", os.path.join("resources", "normalization", "turkish_pattern_table.pickle")),
"rb"
) as f:
turkish_pattern_table: Dict[str, Dict] = pickle.load(f)
del f
turkish_asciify_table = {u'ç': u'c', u'Ç': u'C', u'ğ': u'g', u'Ğ': u'G', u'ö': u'o',
u'Ö': u'O', u'ı': u'i', u'İ': u'I', u'ş': u's', u'Ş': u'S'}
uppercase_letters = (u"A", u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"I", u"J", u"K", u"L", u"M", u"N", u"O",
u"P", u"Q", u"R", u"S", u"T", u"U", u"V", u"W", u"X", u"Y", u"Z")
turkish_downcase_asciify_table: Dict[str, str] = {u'ç': u'c', u"Ç": u"c", u"ğ": u"g", u"Ğ": u"g", u"ö": u"o",
u"Ö": u"o", u"ı": u"i", u"İ": u"i", u"ş": u"s", u"Ş": u"s",
u"ü": u"u", u"Ü": u"u"}
for c in uppercase_letters:
turkish_downcase_asciify_table[c] = c.lower()
turkish_downcase_asciify_table[c.lower()] = c.lower()
turkish_upcase_accents_table: Dict[str, str] = {}
for c in uppercase_letters:
turkish_upcase_accents_table[c] = c.lower()
turkish_upcase_accents_table[c.lower()] = c.lower()
turkish_upcase_accents_table[u'ç'] = u'C'
turkish_upcase_accents_table[u'Ç'] = u'C'
turkish_upcase_accents_table[u'ğ'] = u'G'
turkish_upcase_accents_table[u'Ğ'] = u'G'
turkish_upcase_accents_table[u'ö'] = u'O'
turkish_upcase_accents_table[u'Ö'] = u'O'
turkish_upcase_accents_table[u'ı'] = u'I'
turkish_upcase_accents_table[u'İ'] = u'i'
turkish_upcase_accents_table[u'ş'] = u'S'
turkish_upcase_accents_table[u'Ş'] = u'S'
turkish_upcase_accents_table[u'ü'] = u'U'
turkish_upcase_accents_table[u'Ü'] = u'U'
turkish_toggle_accent_table = {u'c': u'ç', u'C': u'Ç', u'g': u'ğ', u'G': u'Ğ', u'o': u'ö', u'O': u'Ö', u'u': u'ü',
u'U': u'Ü', u'i': u'ı', u'I': u'İ', u's': u'ş', u'S': u'Ş', u'ç': u'c', u'Ç': u'C',
u'ğ': u'g', u'Ğ': u'G', u'ö': u'o', u'Ö': u'O', u'ü': u'u', u'Ü': u'U', u'ı': u'i',
u'İ': u'I', u'ş': u's', u'Ş': u'S'}
def __init__(self, ascii_string: str):
self.ascii_string = ascii_string
self.turkish_string = ascii_string
def convert_to_turkish(self) -> str:
for i, c in enumerate(self.turkish_string):
if self.turkish_need_correction(c, i):
self.turkish_string = self.turkish_string[:i] + self.turkish_toggle_accent_table.get(c, c) + \
self.turkish_string[i+1:]
else:
self.turkish_string = self.turkish_string[:i] + c + self.turkish_string[i+1:]
return self.turkish_string
def turkish_need_correction(self, c: str, point: int) -> bool:
tr = self.turkish_asciify_table.get(c)
if not tr:
tr = c
pl: Dict[str, int] = self.turkish_pattern_table.get(tr.lower())
m = False
if pl:
m = self.turkish_match_pattern(pl, point)
if tr == u'I':
if c == tr:
return not m
else:
return m
else:
if c == tr:
return m
else:
return not m
def turkish_match_pattern(self, dlist: Dict[str, int], point: int) -> bool:
rank = len(dlist) * 2
string = self.turkish_get_context(self.turkish_context_size, point)
start = 0
_len = len(string)
while start <= self.turkish_context_size:
end = self.turkish_context_size + 1
while end <= _len:
s = string[start:end]
r = dlist.get(s)
if r is not None and abs(r) < abs(rank):
rank = r
end += 1
start += 1
return rank > 0
def turkish_get_context(self, size: int, point: int):
s = ' ' * (1 + (2 * size))
s = s[:size] + 'X' + s[size + 1:]
i = size + 1
space = False
index = point + 1
while i < len(s) and not space and index < len(self.ascii_string):
current_char = self.turkish_string[index]
x = self.turkish_downcase_asciify_table.get(current_char, False)
if not x:
if not space:
i = i + 1
space = True
else:
s = s[:i] + x + s[i+1:]
i = i + 1
space = False
index = index + 1
s = s[:i]
index = point - 1
i = size - 1
space = False
while i >= 0 and index >= 0:
current_char = self.turkish_string[index]
x = self.turkish_upcase_accents_table.get(current_char, False)
if not x:
if not space:
i = i - 1
space = True
else:
s = s[:i] + x + s[i+1:]
i = i - 1
space = False
index = index - 1
return s | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/normalization/deasciifier/deasciifier.py | deasciifier.py |
from __future__ import annotations
import time
import logging
import os
from typing import Tuple, TYPE_CHECKING, List, Optional
from functools import lru_cache
from pkg_resources import resource_filename
if TYPE_CHECKING:
from zemberek.tokenization.token import Token
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
from zemberek.morphology.ambiguity.ambiguity_resolver import AmbiguityResolver
from zemberek.tokenization import TurkishTokenizer
from zemberek.core.turkish import TurkishAlphabet, StemAndEnding, PrimaryPos
from zemberek.core.text import TextUtil
from zemberek.morphology.analysis.word_analysis import WordAnalysis
from zemberek.morphology.analysis.sentence_analysis import SentenceAnalysis
from zemberek.morphology.analysis.rule_based_analyzer import RuleBasedAnalyzer
from zemberek.morphology.analysis.unidentified_token_analyzer import UnidentifiedTokenAnalyzer
from zemberek.morphology.generator import WordGenerator
from zemberek.morphology.lexicon import RootLexicon
from zemberek.morphology.morphotactics import TurkishMorphotactics, InformalTurkishMorphotactics
from zemberek.morphology.ambiguity.perceptron_ambiguity_resolver import PerceptronAmbiguityResolver
logger = logging.getLogger(__name__)
class TurkishMorphology:
def __init__(self, builder: 'TurkishMorphology.Builder'):
self.lexicon = builder.lexicon
self.morphotactics = InformalTurkishMorphotactics(self.lexicon) if builder.informal_analysis \
else TurkishMorphotactics(self.lexicon)
self.analyzer = RuleBasedAnalyzer.ignore_diacritics_instance(self.morphotactics) if \
builder.ignore_diacritics_in_analysis else RuleBasedAnalyzer.instance(self.morphotactics)
self.unidentified_token_analyzer = UnidentifiedTokenAnalyzer(self.analyzer)
self.tokenizer = builder.tokenizer
self.word_generator = WordGenerator(self.morphotactics)
self.use_unidentified_token_analyzer = builder.use_unidentifiedTokenAnalyzer
if builder.ambiguity_resolver is None:
resource_path = resource_filename("zemberek", os.path.join("resources", "ambiguity", "model-compressed"))
try:
self.ambiguity_resolver = PerceptronAmbiguityResolver.from_resource(resource_path)
except IOError as e:
logger.error(e)
raise RuntimeError(f"Cannot initialize PerceptronAmbiguityResolver from resource {resource_path}")
else:
self.ambiguity_resolver = builder.ambiguity_resolver
@staticmethod
def builder(lexicon: RootLexicon) -> 'TurkishMorphology.Builder':
return TurkishMorphology.Builder(lexicon)
@staticmethod
def create_with_defaults() -> 'TurkishMorphology':
start_time = time.time()
instance = TurkishMorphology.Builder(RootLexicon.get_default()).build()
logger.info(f"TurkishMorphology instance initialized in {time.time() - start_time}")
return instance
@lru_cache(maxsize=250)
def analyze(self, word: str = None, token: Token = None) -> WordAnalysis:
return self.analyze_without_cache(word=word, token=token)
@staticmethod
def normalize_for_analysis(word: str) -> str:
s = word.translate(TurkishAlphabet.INSTANCE.lower_map).lower()
s = TurkishAlphabet.INSTANCE.normalize_circumflex(s)
no_dot = s.replace(".", "")
if len(no_dot) == 0:
no_dot = s
return TextUtil.normalize_apostrophes(no_dot)
def analyze_sentence(self, sentence: str) -> List[WordAnalysis]:
normalized = TextUtil.normalize_quotes_hyphens(sentence)
result = [
self.analyze(token=t) for t in self.tokenizer.tokenize(normalized)
]
return result
def disambiguate(self, sentence: str, sentence_analysis: List[WordAnalysis]) -> SentenceAnalysis:
return self.ambiguity_resolver.disambiguate(sentence, sentence_analysis)
def analyze_and_disambiguate(self, sentence: str) -> SentenceAnalysis:
return self.disambiguate(sentence, self.analyze_sentence(sentence))
def analyze_without_cache(self, word: str = None, token: Token = None) -> WordAnalysis:
if word:
tokens: Tuple[Token] = self.tokenizer.tokenize(word)
return WordAnalysis(word, (), normalized_input=word) if len(tokens) != 1 else \
self.analyze_without_cache(token=tokens[0])
else: # token is not None
word = token.content # equal to token.getText()
s = self.normalize_for_analysis(word)
if len(s) == 0:
return WordAnalysis.EMPTY_INPUT_RESULT
else:
if TurkishAlphabet.INSTANCE.contains_apostrophe(s):
s = TurkishAlphabet.INSTANCE.normalize_apostrophe(s)
result = self.analyze_words_with_apostrophe(s)
else:
result = self.analyzer.analyze(s)
if len(result) == 0 and self.use_unidentified_token_analyzer:
result = self.unidentified_token_analyzer.analyze(token)
if len(result) == 1 and result[0].item.is_unknown():
result = ()
return WordAnalysis(word, normalized_input=s, analysis_results=result)
def analyze_words_with_apostrophe(self, word: str) -> Tuple[SingleAnalysis, ...]:
index = word.find(chr(39))
if index > 0 and index != len(word) - 1:
se = StemAndEnding(word[0:index], word[index + 1:])
stem = TurkishAlphabet.INSTANCE.normalize(se.stem)
without_quote = word.replace("'", "")
no_quotes_parses = self.analyzer.analyze(without_quote)
return () if len(no_quotes_parses) == 0 else \
tuple(p for p in no_quotes_parses if p.item.primary_pos == PrimaryPos.Noun and
(p.contains_morpheme(TurkishMorphotactics.p3sg) or p.get_stem() == stem))
else:
return ()
class Builder:
use_unidentifiedTokenAnalyzer = True
def __init__(self, lexicon: RootLexicon):
self.tokenizer = TurkishTokenizer.DEFAULT
self.lexicon = lexicon
self.informal_analysis = False
self.ignore_diacritics_in_analysis = False
self.ambiguity_resolver: Optional['AmbiguityResolver'] = None
def set_lexicon(self, lexicon: RootLexicon) -> 'TurkishMorphology.Builder':
self.lexicon = lexicon
return self
def use_informal_analysis(self) -> 'TurkishMorphology.Builder':
self.informal_analysis = True
return self
def ignore_diacritics_in_analysis_(self) -> 'TurkishMorphology.Builder':
self.ignore_diacritics_in_analysis = True
return self
def build(self) -> 'TurkishMorphology':
return TurkishMorphology(self) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/turkish_morphology.py | turkish_morphology.py |
from __future__ import annotations
import numpy as np
from typing import List, Dict, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.analysis.search_path import SearchPath
from zemberek.core.turkish import RootAttribute, SecondaryPos
from zemberek.morphology.lexicon import DictionaryItem
from zemberek.morphology.morphotactics import TurkishMorphotactics
from zemberek.morphology.morphotactics.morpheme import Morpheme
class SingleAnalysis:
empty_morpheme_cache: Dict[Morpheme, 'SingleAnalysis.MorphemeData'] = {}
def __init__(self, item: DictionaryItem, morpheme_data_list: List['SingleAnalysis.MorphemeData'],
group_boundaries: np.ndarray):
self.item = item
self.morpheme_data_list = morpheme_data_list
self.group_boundaries = group_boundaries
self.hash_ = 0
self.hash_ = hash(self)
def __str__(self):
return self.format_string()
def format_string(self) -> str:
sb = [f"[{self.item.lemma}:{self.item.primary_pos.short_form}"]
if self.item.secondary_pos != SecondaryPos.None_:
sb.append(", " + self.item.secondary_pos.short_form)
sb.extend(["] "] + self.format_morpheme_string())
return ''.join(sb)
def format_morpheme_string(self):
surfaces = self.morpheme_data_list
sb = [f"{self.get_stem()}:{surfaces[0].morpheme.id_}"]
if len(surfaces) > 1 and not surfaces[1].morpheme.derivational_:
sb.append("+")
for i in range(1, len(surfaces)):
s = surfaces[i]
morpheme = s.morpheme
if morpheme.derivational_:
sb.append("|")
if len(s.surface) > 0:
sb.append(s.surface + ':')
sb += s.morpheme.id_
if s.morpheme.derivational_:
sb.append('→')
elif i < len(surfaces) - 1 and not surfaces[i+1].morpheme.derivational_:
sb.append('+')
return sb
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, SingleAnalysis):
if self.hash_ != other.hash_:
return False
else:
return False if self.item != other.item else self.morpheme_data_list == other.morpheme_data_list
else:
return False
def __hash__(self):
if self.hash_ != 0:
return self.hash_
else:
result = hash(self.item)
for h in self.morpheme_data_list:
result = 31 * result + (hash(h) if h is not None else 0)
result = 31 * result + self.hash_
return result
def is_runtime(self) -> bool:
return self.item.has_attribute(RootAttribute.Runtime)
def is_unknown(self) -> bool:
return self.item.is_unknown()
def get_ending(self) -> str:
return ''.join([m_surface.surface for m_surface in self.morpheme_data_list[1:]])
def get_stem(self) -> str:
return self.morpheme_data_list[0].surface
def surface_form(self) -> str:
return self.get_stem() + self.get_ending()
def get_morphemes(self) -> List[Morpheme]:
return [s.morpheme for s in self.morpheme_data_list]
def contains_informal_morpheme(self) -> bool:
for m in self.morpheme_data_list:
if m.morpheme.informal:
return True
return False
def get_group(self, group_index: int) -> 'SingleAnalysis.MorphemeGroup':
if group_index < 0 or group_index > self.group_boundaries.shape[0]:
raise ValueError(f"There are only {self.group_boundaries.shape[0]} morpheme groups. "
f"But input is {group_index}")
end_index = len(self.morpheme_data_list) if group_index == self.group_boundaries.shape[0] - 1 else \
self.group_boundaries[group_index + 1]
return SingleAnalysis.MorphemeGroup(self.morpheme_data_list[self.group_boundaries[group_index]: end_index])
def contains_morpheme(self, morpheme: Morpheme) -> bool:
for morpheme_data in self.morpheme_data_list:
if morpheme_data.morpheme == morpheme:
return True
return False
def copy_for(self, item: DictionaryItem, stem: str) -> 'SingleAnalysis':
data: List['SingleAnalysis.MorphemeData'] = self.morpheme_data_list.copy()
data[0] = SingleAnalysis.MorphemeData(data[0].morpheme, stem)
return SingleAnalysis(item, data, self.group_boundaries.copy())
@classmethod
def unknown(cls, input_: str) -> 'SingleAnalysis':
item = DictionaryItem.UNKNOWN
s = cls.MorphemeData(Morpheme.UNKNOWN, input_)
boundaries = np.asarray([0], dtype=np.int32)
return cls(item, [s], boundaries)
@staticmethod
def dummy(inp: str, item: DictionaryItem) -> 'SingleAnalysis':
s = SingleAnalysis.MorphemeData(Morpheme.UNKNOWN, inp)
boundaries: np.ndarray = np.zeros(1, dtype=np.int32)
return SingleAnalysis(item, [s], boundaries)
@staticmethod
def from_search_path(search_path: SearchPath) -> 'SingleAnalysis':
morphemes: List['SingleAnalysis.MorphemeData'] = []
derivation_count = 0
for transition in search_path.transitions:
if transition.is_derivative():
derivation_count += 1
morpheme = transition.get_morpheme()
if morpheme != TurkishMorphotactics.nom and morpheme != TurkishMorphotactics.pnon:
if len(transition.surface) == 0:
morpheme_data = SingleAnalysis.empty_morpheme_cache.get(morpheme)
if morpheme_data is None:
morpheme_data = SingleAnalysis.MorphemeData(morpheme, "")
SingleAnalysis.empty_morpheme_cache[morpheme] = morpheme_data
morphemes.append(morpheme_data)
else:
morpheme_data = SingleAnalysis.MorphemeData(morpheme, transition.surface)
morphemes.append(morpheme_data)
group_boundaries: np.ndarray = np.zeros(derivation_count + 1, dtype=np.int32)
morpheme_counter = 0
derivation_counter = 1
for morpheme_data in morphemes:
if morpheme_data.morpheme.derivational_:
group_boundaries[derivation_counter] = morpheme_counter
derivation_counter += 1
morpheme_counter += 1
item = search_path.get_dictionary_item()
if item.has_attribute(RootAttribute.Dummy):
item = item.reference_item
return SingleAnalysis(item, morphemes, group_boundaries)
class MorphemeData:
def __init__(self, morpheme: Morpheme, surface: str):
self.morpheme = morpheme
self.surface = surface
def __str__(self):
return self.to_morpheme_string()
def to_morpheme_string(self) -> str:
return f"{self.surface_string()}{self.morpheme.id_}"
def surface_string(self) -> str:
return "" if len(self.surface) == 0 else f"{self.surface}:"
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, SingleAnalysis.MorphemeData):
return False if self.morpheme != other.morpheme else self.surface == other.surface
else:
return False
def __hash__(self):
result = hash(self.morpheme)
result = 31 * result + hash(self.surface)
return result
class MorphemeGroup:
def __init__(self, morphemes: List['SingleAnalysis.MorphemeData']):
self.morphemes = morphemes
def lexical_form(self) -> str:
sb = [m.morpheme.id_ for m in self.morphemes]
return ''.join(sb) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/single_analysis.py | single_analysis.py |
from copy import deepcopy
from typing import Set
from zemberek.core.turkish import TurkishAlphabet, PhoneticAttribute
class AttributesHelper:
alphabet = TurkishAlphabet.INSTANCE
NO_VOWEL_ATTRIBUTES = (PhoneticAttribute.LastLetterConsonant, PhoneticAttribute.FirstLetterConsonant,
PhoneticAttribute.HasNoVowel)
@classmethod
def get_morphemic_attributes(cls, seq: str, predecessor_attrs: Set[PhoneticAttribute] = None) -> \
Set[PhoneticAttribute]:
if predecessor_attrs is None:
predecessor_attrs = set()
if not seq:
return deepcopy(predecessor_attrs)
else:
attrs = set()
if cls.alphabet.contains_vowel(seq):
last = cls.alphabet.get_last_letter(seq)
if last.is_vowel():
attrs.add(PhoneticAttribute.LastLetterVowel)
else:
attrs.add(PhoneticAttribute.LastLetterConsonant)
last_vowel = last if last.is_vowel() else cls.alphabet.get_last_vowel(seq)
if last_vowel.is_frontal():
attrs.add(PhoneticAttribute.LastVowelFrontal)
else:
attrs.add(PhoneticAttribute.LastVowelBack)
if last_vowel.is_rounded():
attrs.add(PhoneticAttribute.LastVowelRounded)
else:
attrs.add(PhoneticAttribute.LastVowelUnrounded)
if cls.alphabet.get_first_letter(seq).is_vowel():
attrs.add(PhoneticAttribute.FirstLetterVowel)
else:
attrs.add(PhoneticAttribute.FirstLetterConsonant)
else:
attrs = deepcopy(predecessor_attrs)
attrs.update(cls.NO_VOWEL_ATTRIBUTES)
attrs.discard(PhoneticAttribute.LastLetterVowel)
attrs.discard(PhoneticAttribute.ExpectsConsonant)
last = cls.alphabet.get_last_letter(seq)
if last.is_voiceless():
attrs.add(PhoneticAttribute.LastLetterVoiceless)
if last.is_stop_consonant():
attrs.add(PhoneticAttribute.LastLetterVoicelessStop)
else:
attrs.add(PhoneticAttribute.LastLetterVoiced)
return attrs | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/attributes_helper.py | attributes_helper.py |
from __future__ import annotations
import re
from enum import Enum
from typing import List, Dict, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from .rule_based_analyzer import RuleBasedAnalyzer
from zemberek.core.turkish import PrimaryPos, SecondaryPos, TurkishAlphabet, StemAndEnding, RootAttribute, Turkish
from zemberek.tokenization.token import Token
from zemberek.morphology.lexicon import DictionaryItem
from zemberek.morphology.analysis.tr import TurkishNumbers, TurkishNumeralEndingMachine, PronunciationGuesser
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
class UnidentifiedTokenAnalyzer:
ALPHABET = TurkishAlphabet.INSTANCE
non_letter_pattern = re.compile("[^" + ALPHABET.all_letters + "]")
ordinal_map: Dict[str, str] = TurkishNumbers.ordinal_map
def __init__(self, analyzer: RuleBasedAnalyzer):
self.analyzer = analyzer
self.numeral_ending_machine = TurkishNumeralEndingMachine()
self.guesser = PronunciationGuesser()
self.lexicon = analyzer.lexicon
def analyze(self, token: Token) -> Tuple[SingleAnalysis, ...]:
s_pos: SecondaryPos = self.guess_secondary_pos_type(token)
word = token.content
if s_pos == SecondaryPos.None_:
if "?" in word:
return ()
else:
return self.try_numeral(token) if self.ALPHABET.contains_digit(word) else \
self.analyze_word(word, SecondaryPos.Abbreviation if "." in word else SecondaryPos.ProperNoun)
elif s_pos == SecondaryPos.RomanNumeral:
return self.get_for_roman_numeral(token)
elif s_pos != SecondaryPos.Date and s_pos != SecondaryPos.Clock:
normalized = re.sub(self.non_letter_pattern, "", word)
item = DictionaryItem(word, word, PrimaryPos.Noun, s_pos, pronunciation=normalized)
if s_pos != SecondaryPos.HashTag and s_pos != SecondaryPos.Email and s_pos != SecondaryPos.Url and \
s_pos != SecondaryPos.Mention:
item_does_not_exist = item not in self.lexicon
if item_does_not_exist:
item.attributes.add(RootAttribute.Runtime)
self.analyzer.stem_transitions.add_dictionary_item(item)
results: Tuple[SingleAnalysis] = self.analyzer.analyze(word)
if item_does_not_exist:
self.analyzer.stem_transitions.remove_dictionary_item(item)
return results
else:
return self.analyze_word(word, s_pos)
else:
return self.try_numeral(token)
def get_for_roman_numeral(self, token: Token) -> Tuple[SingleAnalysis, ...]:
content = token.content
if "'" in content:
i = content.find(chr(39))
se = StemAndEnding(content[0:i], content[i + 1:])
else:
se = StemAndEnding(content, "")
ss = se.stem
if se.stem.endswith("."):
ss = se.stem[:-1]
decimal = TurkishNumbers.roman_to_decimal(ss)
if decimal == -1:
return ()
else:
if se.stem.endswith("."):
lemma = self.numeral_ending_machine.find(str(decimal))
lemma = self.ordinal_map.get(lemma)
else:
lemma = self.numeral_ending_machine.find(str(decimal))
results: List[SingleAnalysis] = []
if len(se.ending) > 0 and lemma == "dört" and self.ALPHABET.is_vowel(se.ending[0]):
to_parse = "dörd" + se.ending
else:
to_parse = lemma + se.ending
res = self.analyzer.analyze(to_parse)
for re_ in res:
if re_.item.primary_pos == PrimaryPos.Numeral:
run_time_item = DictionaryItem(se.stem, se.stem, PrimaryPos.Numeral, SecondaryPos.RomanNumeral,
pronunciation=content + lemma)
run_time_item.attributes.add(RootAttribute.Runtime)
results.append(re_.copy_for(run_time_item, se.stem))
return tuple(results)
def analyze_word(self, word: str, secondary_pos: SecondaryPos) -> Tuple[SingleAnalysis, ...]:
if word.find(chr(39)) >= 0:
return self.try_word_with_apostrophe(word, secondary_pos)
else:
return self.try_without_apostrophe(word,
secondary_pos) if secondary_pos != SecondaryPos.ProperNoun and \
secondary_pos != SecondaryPos.Abbreviation else []
def try_without_apostrophe(self, word: str, secondary_pos: SecondaryPos) -> Tuple[SingleAnalysis]:
normalized = None
if self.ALPHABET.contains_foreign_diacritics(word):
normalized = self.ALPHABET.foreign_diacritics_to_turkish(word)
normalized = self.ALPHABET.normalize(word) if normalized is None else self.ALPHABET.normalize(normalized)
capitalize: bool = secondary_pos == SecondaryPos.ProperNoun or secondary_pos == SecondaryPos.Abbreviation
pronunciation = self.guess_pronunciation(normalized.replace(".", ""))
item = DictionaryItem(Turkish.capitalize(normalized) if capitalize else normalized, normalized, PrimaryPos.Noun,
secondary_pos, pronunciation=pronunciation)
if self.ALPHABET.contains_vowel(pronunciation):
result = (SingleAnalysis.dummy(word, item),)
return result
else:
item_does_not_exist: bool = item not in self.lexicon
if item_does_not_exist:
item.attributes.add(RootAttribute.Runtime)
self.analyzer.stem_transitions.add_dictionary_item(item)
results: Tuple[SingleAnalysis] = self.analyzer.analyze(normalized)
if item_does_not_exist:
self.analyzer.stem_transitions.remove_dictionary_item(item)
return results
def try_word_with_apostrophe(self, word: str, secondary_pos: SecondaryPos) -> Tuple[SingleAnalysis, ...]:
normalized = self.ALPHABET.normalize_apostrophe(word)
index = normalized.find(chr(39))
if index > 0 and index != len(normalized) - 1:
stem = normalized[0: index]
ending = normalized[index + 1:]
se = StemAndEnding(stem, ending)
stem_normalized = self.ALPHABET.normalize(se.stem).replace(".", "")
ending_normalized = self.ALPHABET.normalize(se.ending)
pronunciation = self.guess_pronunciation(stem_normalized)
capitalize: bool = secondary_pos == SecondaryPos.ProperNoun or secondary_pos == SecondaryPos.Abbreviation
pronunciation_possible: bool = self.ALPHABET.contains_vowel(pronunciation)
item = DictionaryItem(
Turkish.capitalize(normalized) if capitalize else stem if pronunciation_possible else word,
stem_normalized, PrimaryPos.Noun, secondary_pos, pronunciation=pronunciation)
if not pronunciation_possible:
result = (SingleAnalysis.dummy(word, item),)
return result
else:
item_does_not_exist: bool = item not in self.lexicon
if item_does_not_exist:
item.attributes.add(RootAttribute.Runtime)
self.analyzer.stem_transitions.add_dictionary_item(item)
to_parse = stem_normalized + ending_normalized
no_quotes_parses: Tuple[SingleAnalysis] = self.analyzer.analyze(to_parse)
if item_does_not_exist:
self.analyzer.stem_transitions.remove_dictionary_item(item)
analyses: Tuple[SingleAnalysis] = tuple(no_quotes_parse for no_quotes_parse in no_quotes_parses if
no_quotes_parse.get_stem() == stem_normalized)
return analyses
else:
return ()
def guess_pronunciation(self, stem: str) -> str:
return self.guesser.to_turkish_letter_pronunciations(stem) if not self.ALPHABET.contains_vowel(stem) else stem
def try_numeral(self, token: Token) -> Tuple[SingleAnalysis]:
s = token.content
s = s.translate(self.ALPHABET.lower_map).lower()
se: StemAndEnding = self.get_from_numeral(s)
if se.stem.endswith("."):
ss = se.stem[:-1]
lemma = self.numeral_ending_machine.find(ss)
lemma = self.ordinal_map.get(lemma)
else:
lemma = self.numeral_ending_machine.find(se.stem)
results: List[SingleAnalysis] = []
for numerals in UnidentifiedTokenAnalyzer.Numerals:
m = numerals.pattern.search(se.stem)
if m:
if len(se.ending) > 0 and lemma == "dört" and self.ALPHABET.is_vowel(se.ending[0]):
to_parse = "dört" + se.ending
else:
to_parse = lemma + se.ending
res: Tuple[SingleAnalysis] = self.analyzer.analyze(to_parse)
for re_ in res:
if re_.item.primary_pos == PrimaryPos.Numeral:
run_time_item = DictionaryItem(se.stem, se.stem, pronunciation=s + lemma,
primary_pos=PrimaryPos.Numeral,
secondary_pos=numerals.secondary_pos)
run_time_item.attributes.add(RootAttribute.Runtime)
results.append(re_.copy_for(run_time_item, se.stem))
return tuple(results)
@staticmethod
def get_from_numeral(s: str) -> StemAndEnding:
if "'" in s:
j = s.find("'")
return StemAndEnding(s[0:j], s[j + 1:])
else:
j = 0
for cut_point in range(len(s) - 1, -1, -1):
c = s[cut_point]
k = ord(c) - 48
if c == '.' or 0 <= k <= 9:
break
j += 1
cut_point = len(s) - j
return StemAndEnding(s[0: cut_point], s[cut_point:]) # BURASI YANLIŞ OLABILIR DIKKAT ET
@staticmethod
def guess_secondary_pos_type(token: Token) -> SecondaryPos:
if token.type_ == Token.Type.Email:
return SecondaryPos.Email
elif token.type_ == Token.Type.URL:
return SecondaryPos.Url
elif token.type_ == Token.Type.HashTag:
return SecondaryPos.HashTag
elif token.type_ == Token.Type.Mention:
return SecondaryPos.Mention
elif token.type_ == Token.Type.Emoticon:
return SecondaryPos.Emoticon
elif token.type_ == Token.Type.RomanNumeral:
return SecondaryPos.RomanNumeral
elif token.type_ == Token.Type.Abbreviation:
return SecondaryPos.Abbreviation
elif token.type_ == Token.Type.Date:
return SecondaryPos.Date
elif token.type_ == Token.Type.Time:
return SecondaryPos.Clock
else:
return SecondaryPos.None_
class Numerals(Enum):
CARDINAL = ("#", "^[+\\-]?\\d+$", SecondaryPos.Cardinal)
ORDINAL = ("#.", "^[+\\-]?[0-9]+[.]$", SecondaryPos.Ordinal)
RANGE = ("#-#", "^[+\\-]?[0-9]+-[0-9]+$", SecondaryPos.Range)
RATIO = ("#/#", "^[+\\-]?[0-9]+/[0-9]+$", SecondaryPos.Ratio)
REAL = ("#,#", "^[+\\-]?[0-9]+[,][0-9]+$|^[+\\-]?[0-9]+[.][0-9]+$", SecondaryPos.Real)
DISTRIB = ("#DIS", "^\\d+[^0-9]+$", SecondaryPos.Distribution)
PERCENTAGE_BEFORE = ("%#", "(^|[+\\-])(%)(\\d+)((([.]|[,])(\\d+))|)$", SecondaryPos.Percentage)
TIME = ("#:#", "^([012][0-9]|[1-9])([.]|[:])([0-5][0-9])$", SecondaryPos.Clock)
DATE = ("##.##.####", "^([0-3][0-9]|[1-9])([.]|[/])([01][0-9]|[1-9])([.]|[/])(\\d{4})$", SecondaryPos.Date)
def __init__(self, lemma: str, pattern_str: str, secondary_pos: SecondaryPos):
self.lemma = lemma
self.pattern: re.Pattern = re.compile(pattern_str)
self.secondary_pos = secondary_pos | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/unidentified_token_analyzer.py | unidentified_token_analyzer.py |
from __future__ import annotations
from copy import deepcopy
from typing import List, Set, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.morphotactics.morpheme_state import MorphemeState
from zemberek.morphology.morphotactics.stem_transition import StemTransition
from zemberek.core.turkish import PhoneticAttribute
from zemberek.morphology.analysis.surface_transitions import SurfaceTransition
class SearchPath:
def __init__(self, tail: str, current_state: MorphemeState, transitions: List[SurfaceTransition],
phonetic_attributes: Set[PhoneticAttribute], terminal: bool):
self.tail = tail
self.current_state = current_state
self.transitions = transitions
self.phonetic_attributes = phonetic_attributes
self.terminal = terminal
self.contains_derivation = False
self.contains_suffix_with_surface = False
def has_dictionary_item(self, item) -> bool:
return item == self.get_stem_transition().item
def contains_suffix_with_surface_(self):
return self.contains_suffix_with_surface
def get_stem_transition(self):
return self.transitions[0].lexical_transition
def get_last_transition(self) -> SurfaceTransition:
return self.transitions[-1]
def get_dictionary_item(self):
return self.get_stem_transition().item
def get_previous_state(self) -> MorphemeState:
return None if len(self.transitions) < 2 else self.transitions[len(self.transitions) - 2].get_state()
def get_copy_for_generation(self, surface_node: SurfaceTransition, phonetic_attributes: Set[PhoneticAttribute]) -> \
'SearchPath':
is_terminal = surface_node.get_state().terminal_
hist: List[SurfaceTransition] = list(self.transitions)
hist.append(surface_node)
path = SearchPath(self.tail, surface_node.get_state(), hist, phonetic_attributes, is_terminal)
path.contains_suffix_with_surface = self.contains_suffix_with_surface or len(surface_node.surface) != 0
path.contains_derivation = self.contains_derivation or surface_node.get_state().derivative
return path
def get_copy(self, surface_node: SurfaceTransition, phonetic_attributes: Set[PhoneticAttribute]) -> 'SearchPath':
is_terminal = surface_node.get_state().terminal_
hist: List[SurfaceTransition] = self.transitions + [surface_node]
new_tail = self.tail[len(surface_node.surface):]
path: 'SearchPath' = SearchPath(new_tail, surface_node.get_state(), hist, phonetic_attributes, is_terminal)
path.contains_suffix_with_surface = self.contains_suffix_with_surface or len(surface_node.surface) != 0
path.contains_derivation = self.contains_derivation or surface_node.get_state().derivative
return path
@staticmethod
def initial_path(stem_transition: StemTransition, tail: str) -> 'SearchPath':
morphemes: List[SurfaceTransition] = []
root = SurfaceTransition(stem_transition.surface, stem_transition)
morphemes.append(root)
return SearchPath(tail, stem_transition.to, morphemes, deepcopy(stem_transition.phonetic_attributes),
stem_transition.to.terminal_)
def __str__(self):
st = self.get_stem_transition()
morpheme_str = " + ".join(str(s) for s in self.transitions)
return "[(" + st.item.id_ + ")(-" + self.tail + ") " + morpheme_str + "]" | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/search_path.py | search_path.py |
from __future__ import annotations
import logging
from copy import deepcopy
from typing import List, Dict, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.morphotactics import TurkishMorphotactics
from zemberek.core.turkish import PhoneticAttribute, TurkishAlphabet
from zemberek.morphology.analysis.surface_transitions import SurfaceTransition
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
from zemberek.morphology.analysis.search_path import SearchPath
from zemberek.morphology.analysis.attributes_helper import AttributesHelper
logger = logging.getLogger(__name__)
class RuleBasedAnalyzer:
def __init__(self, morphotactics: TurkishMorphotactics):
self.lexicon = morphotactics.get_root_lexicon()
self.stem_transitions = morphotactics.get_stem_transitions()
self.morphotactics = morphotactics
self.debug_mode = False
self.ascii_tolerant = False
@staticmethod
def instance(morphotactics: TurkishMorphotactics) -> 'RuleBasedAnalyzer':
return RuleBasedAnalyzer(morphotactics)
@staticmethod
def ignore_diacritics_instance(morphotactics: TurkishMorphotactics) -> 'RuleBasedAnalyzer':
analyzer = RuleBasedAnalyzer.instance(morphotactics)
analyzer.ascii_tolerant = True
return analyzer
def analyze(self, inp: str) -> Tuple[SingleAnalysis, ...]:
if self.debug_mode:
raise NotImplementedError("Debug mode is not implemented")
candidates = self.stem_transitions.get_prefix_matches(inp, self.ascii_tolerant)
paths: List[SearchPath] = []
for candidate in candidates:
length = len(candidate.surface)
tail = inp[length:]
paths.append(SearchPath.initial_path(candidate, tail))
result_paths: Tuple[SearchPath] = self.search(paths)
result: List[SingleAnalysis] = []
for path in result_paths:
analysis: SingleAnalysis = SingleAnalysis.from_search_path(path)
result.append(analysis)
return tuple(result)
def search(self, current_paths: List[SearchPath]) -> Tuple[SearchPath, ...]:
if len(current_paths) > 30:
current_paths = self.prune_cyclic_paths(current_paths)
result = []
while len(current_paths) > 0:
all_new_paths = []
for path in current_paths:
if len(path.tail) == 0:
if path.terminal and PhoneticAttribute.CannotTerminate not in path.phonetic_attributes:
result.append(path)
continue
if self.debug_mode:
raise NotImplementedError
new_paths = self.advance(path)
all_new_paths.extend(new_paths)
current_paths = all_new_paths
return tuple(result)
def advance(self, path: SearchPath) -> List[SearchPath]:
new_paths: List[SearchPath] = []
for transition in path.current_state.outgoing:
# assert transition.__class__ == SuffixTransition
suffix_transition = transition
if len(path.tail) == 0 and suffix_transition.has_surface_form():
# NO DEBUG
continue
else:
surface = SurfaceTransition.generate_surface(suffix_transition, path.phonetic_attributes)
tail_starts_with = TurkishAlphabet.INSTANCE.starts_with_ignore_diacritics(path.tail, surface) if\
self.ascii_tolerant else path.tail.startswith(surface)
if not tail_starts_with:
if self.debug_mode:
raise NotImplementedError("Not implemented debug_mode")
else:
if self.debug_mode:
raise NotImplementedError("Not implemented debug_mode")
if suffix_transition.can_pass(path):
if not suffix_transition.has_surface_form():
new_paths.append(path.get_copy(SurfaceTransition("", suffix_transition),
path.phonetic_attributes))
else:
surface_transition = SurfaceTransition(surface, suffix_transition)
tail_equals_surface = TurkishAlphabet.INSTANCE.equals_ignore_diacritics(path.tail, surface)\
if self.ascii_tolerant else path.tail == surface
attributes = deepcopy(path.phonetic_attributes) if tail_equals_surface else \
AttributesHelper.get_morphemic_attributes(surface, path.phonetic_attributes)
attributes.discard(PhoneticAttribute.CannotTerminate)
last_token = suffix_transition.get_last_template_token()
if last_token.type_ == SurfaceTransition.TemplateTokenType.LAST_VOICED:
attributes.add(PhoneticAttribute.ExpectsConsonant)
elif last_token.type_ == SurfaceTransition.TemplateTokenType.LAST_NOT_VOICED:
attributes.add(PhoneticAttribute.ExpectsVowel)
attributes.add(PhoneticAttribute.CannotTerminate)
p: SearchPath = path.get_copy(surface_transition, attributes)
new_paths.append(p)
return new_paths
@staticmethod
def prune_cyclic_paths(tokens: List[SearchPath]) -> List[SearchPath]:
def add_or_increment(dict_: Dict[str, int], key: str):
if key in dict_.keys():
dict_[key] += 1
return dict_[key]
else:
dict_[key] = 1
return 1
result: List[SearchPath] = []
for token in tokens:
remove = False
type_counts: Dict[str, int] = {}
for node in token.transitions:
if add_or_increment(type_counts, node.get_state().id_) > 3:
remove = True
break
if not remove:
result.append(token)
return result | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/rule_based_analyzer.py | rule_based_analyzer.py |
from __future__ import annotations
from typing import Set, Union, TYPE_CHECKING
from enum import Enum, auto
if TYPE_CHECKING:
from zemberek.morphology.morphotactics.suffix_transition import SuffixTransition
from zemberek.morphology.morphotactics.morpheme_transition import MorphemeTransition
from zemberek.morphology.morphotactics.stem_transition import StemTransition
from zemberek.core.turkish import PhoneticAttribute, TurkishAlphabet
from zemberek.morphology.analysis.attributes_helper import AttributesHelper
class SurfaceTransition:
alphabet = TurkishAlphabet.INSTANCE
def __init__(self, surface: str, transition: Union[SuffixTransition, StemTransition, MorphemeTransition]):
self.surface = surface
self.lexical_transition = transition
def is_derivative(self) -> bool:
return self.lexical_transition.to.derivative
def get_morpheme(self):
return self.lexical_transition.to.morpheme
def get_state(self): # -> MorphemeState
return self.lexical_transition.to
def __str__(self):
return ("" if len(self.surface) == 0 else self.surface + ":") + self.get_state().id_
@staticmethod
def generate_surface(transition: SuffixTransition, phonetic_attributes: Set[PhoneticAttribute]):
cached: str = transition.get_from_surface_cache(phonetic_attributes)
if cached:
return cached
else:
sb = ""
for index, token in enumerate(transition.token_list):
attrs: Set[PhoneticAttribute] = AttributesHelper.get_morphemic_attributes(sb, phonetic_attributes)
if token.type_ == SurfaceTransition.TemplateTokenType.LETTER:
sb += token.letter
elif token.type_ == SurfaceTransition.TemplateTokenType.A_WOVEL:
if index != 0 or PhoneticAttribute.LastLetterVowel not in phonetic_attributes:
if PhoneticAttribute.LastVowelBack in attrs:
sb += 'a'
else:
if PhoneticAttribute.LastVowelFrontal not in attrs:
raise ValueError("Cannot generate A form!")
sb += 'e'
elif token.type_ == SurfaceTransition.TemplateTokenType.I_WOVEL:
if index != 0 or PhoneticAttribute.LastLetterVowel not in phonetic_attributes:
if PhoneticAttribute.LastVowelFrontal in attrs and PhoneticAttribute.LastVowelUnrounded in attrs:
sb += 'i'
elif PhoneticAttribute.LastVowelBack in attrs and PhoneticAttribute.LastVowelUnrounded in attrs:
sb += "ı"
elif PhoneticAttribute.LastVowelBack in attrs and PhoneticAttribute.LastVowelRounded in attrs:
sb += "u"
else:
if PhoneticAttribute.LastVowelFrontal not in attrs or PhoneticAttribute.LastVowelRounded not in attrs:
raise ValueError("Cannot generate I form!")
sb += "ü"
elif token.type_ == SurfaceTransition.TemplateTokenType.APPEND:
if PhoneticAttribute.LastLetterVowel in attrs:
sb += token.letter
elif token.type_ == SurfaceTransition.TemplateTokenType.DEVOICE_FIRST:
ld = token.letter
if PhoneticAttribute.LastLetterVoiceless in attrs:
ld = SurfaceTransition.alphabet.devoice(ld)
sb += ld
elif token.type_ == SurfaceTransition.TemplateTokenType.LAST_VOICED or token.type_ == SurfaceTransition.TemplateTokenType.LAST_NOT_VOICED:
ld = token.letter
sb += ld
transition.add_to_surface_cache(phonetic_attributes, sb)
return sb
class SuffixTemplateTokenizer:
def __init__(self, generation_word: str):
self.generation_word = generation_word
self.pointer = 0
def has_next(self) -> bool:
return self.generation_word is not None and self.pointer < len(self.generation_word)
def __iter__(self):
return self
def __next__(self) -> 'SurfaceTransition.SuffixTemplateToken':
if not self.has_next():
raise StopIteration
else:
c = self.generation_word[self.pointer]
self.pointer += 1
c_next = 0 # char
if self.pointer < len(self.generation_word):
c_next = ord(self.generation_word[self.pointer])
undefined = 0 # char
if c == "!":
self.pointer += 1
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.LAST_NOT_VOICED,
chr(c_next))
elif c == "+":
self.pointer += 1
if c_next == ord("I"):
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.I_WOVEL,
chr(undefined), True)
elif c_next == ord("A"):
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.A_WOVEL,
chr(undefined), True)
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.APPEND,
chr(c_next))
elif c == ">":
self.pointer += 1
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.DEVOICE_FIRST,
chr(c_next))
elif c == "A":
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.A_WOVEL,
chr(undefined))
elif c == "I":
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.I_WOVEL,
chr(undefined))
elif c == "~":
self.pointer += 1
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.LAST_VOICED,
chr(c_next))
else:
return SurfaceTransition.SuffixTemplateToken(SurfaceTransition.TemplateTokenType.LETTER, c)
class SuffixTemplateToken:
def __init__(self, type_: 'SurfaceTransition.TemplateTokenType', letter: str, append: bool = False):
self.type_ = type_
self.letter = letter
self.append = append
def get_type(self) -> 'SurfaceTransition.TemplateTokenType':
return self.type_
def get_letter(self) -> str:
return self.letter
class TemplateTokenType(Enum):
I_WOVEL = auto()
A_WOVEL = auto()
DEVOICE_FIRST = auto()
LAST_VOICED = auto()
LAST_NOT_VOICED = auto()
APPEND = auto()
LETTER = auto() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/surface_transitions.py | surface_transitions.py |
from typing import Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
class SingleAnalysisIterator:
def __init__(self, analysis_results: Tuple['SingleAnalysis', ...]):
self.analysis_results = analysis_results
self.index = 0
def __next__(self):
if self.index < len(self.analysis_results):
result = self.analysis_results[self.index]
self.index += 1
return result
raise StopIteration
class WordAnalysis:
EMPTY_INPUT_RESULT: 'WordAnalysis' = None
def __init__(self, inp: str, analysis_results: Tuple['SingleAnalysis', ...], normalized_input: str = None):
self.inp = inp
self.analysis_results = analysis_results
self.normalized_input = self.inp if normalized_input is None else normalized_input
def is_correct(self) -> bool:
return len(self.analysis_results) > 0 and not self.analysis_results[0].is_unknown()
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, WordAnalysis):
if self.inp != other.inp:
return False
else:
return False if self.normalized_input != other.normalized_input else \
self.analysis_results == other.analysis_results
else:
return False
def __hash__(self):
result = hash(self.inp)
result = 31 * result + hash(self.normalized_input)
for x in self.analysis_results:
result = 31 * result + (hash(x) if x else 0)
return result
def __str__(self):
return "WordAnalysis{input='" + self.inp + '\'' + ", normalizedInput='" + self.normalized_input + '\'' + \
", analysisResults=" + ' '.join([str(a) for a in self.analysis_results]) + '}'
def __iter__(self):
return SingleAnalysisIterator(self.analysis_results)
WordAnalysis.EMPTY_INPUT_RESULT = WordAnalysis("", ()) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/word_analysis.py | word_analysis.py |
from enum import Enum, auto
from zemberek.core.turkish import PrimaryPos, SecondaryPos, RootAttribute, TurkishAlphabet, Turkish
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
class WordAnalysisSurfaceFormatter:
ALPHABET = TurkishAlphabet.INSTANCE
def format_(self, analysis: SingleAnalysis, apostrophe: str) -> str:
item = analysis.item
ending = analysis.get_ending()
if apostrophe is None and not self.apostrophe_required(analysis):
return item.normalized_lemma() + ending if RootAttribute.NoQuote in item.attributes else \
analysis.get_stem() + ending
else:
if apostrophe is None:
apostrophe = "'"
return item.normalized_lemma() + apostrophe + ending if len(ending) > 0 else item.normalized_lemma()
def format_to_case(self, analysis: SingleAnalysis, type_: 'WordAnalysisSurfaceFormatter.CaseType',
apostrophe: str) -> str:
formatted = self.format_(analysis, apostrophe)
if type_ == WordAnalysisSurfaceFormatter.CaseType.DEFAULT_CASE:
return formatted
if type_ == WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE:
return formatted.translate(self.ALPHABET.lower_map).lower()
if type_ == WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE:
return formatted.translate(self.ALPHABET.upper_map).upper()
if type_ == WordAnalysisSurfaceFormatter.CaseType.TITLE_CASE:
return Turkish.capitalize(formatted)
if type_ == WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING:
ending = analysis.get_ending()
lemma_upper = analysis.item.normalized_lemma().translate(self.ALPHABET.upper_map).upper()
if len(ending) == 0:
return lemma_upper
else:
if apostrophe is None and not self.apostrophe_required(analysis):
return lemma_upper + ending
if apostrophe is None:
apostrophe = "'"
return lemma_upper + apostrophe + ending
return ""
@staticmethod
def apostrophe_required(analysis: SingleAnalysis) -> bool:
item = analysis.item
return (item.secondary_pos == SecondaryPos.ProperNoun and RootAttribute.NoQuote not in item.attributes) \
or (item.primary_pos == PrimaryPos.Numeral and item.has_attribute(RootAttribute.Runtime)) \
or item.secondary_pos == SecondaryPos.Date
def guess_case(self, inp: str) -> 'WordAnalysisSurfaceFormatter.CaseType':
first_letter_upper_case = False
lower_case_count = 0
upper_case_count = 0
letter_count = 0
for apostrophe_index, c in enumerate(inp):
if c.isalpha():
if apostrophe_index == 0:
first_letter_upper_case = c.isupper()
if first_letter_upper_case:
upper_case_count += 1
else:
lower_case_count += 1
elif c.isupper():
upper_case_count += 1
elif c.islower():
lower_case_count += 1
letter_count += 1
if letter_count == 0:
return WordAnalysisSurfaceFormatter.CaseType.DEFAULT_CASE
elif letter_count == lower_case_count:
return WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE
elif letter_count == upper_case_count:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE
elif first_letter_upper_case and letter_count == lower_case_count + 1:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE if letter_count == 1 else \
WordAnalysisSurfaceFormatter.CaseType.TITLE_CASE
else:
apostrophe_index = inp.find(chr(39)) # chr(39) = "'"
if 0 < apostrophe_index < len(inp) - 1 and self.guess_case(inp[0:apostrophe_index]) == \
WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE and self.guess_case(inp[apostrophe_index + 1:]) == \
WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING
else:
return WordAnalysisSurfaceFormatter.CaseType.MIXED_CASE
class CaseType(Enum):
DEFAULT_CASE = auto()
LOWER_CASE = auto()
UPPER_CASE = auto()
TITLE_CASE = auto()
UPPER_CASE_ROOT_LOWER_CASE_ENDING = auto()
MIXED_CASE = auto() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/word_analysis_surface_formatter.py | word_analysis_surface_formatter.py |
from enum import Enum
from typing import List, Union
class TurkishNumeralEndingMachine:
def __init__(self):
self.ROOT = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.ROOT)
self.states1 = (TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.SIFIR),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.BIR),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.IKI),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.UC),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.DORT),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.BES),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.ALTI),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.YEDI),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.SEKIZ),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.DOKUZ))
self.states10 = (None, TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.ON),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.YIRMI),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.OTUZ),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.KIRK),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.ELLI),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.ALTMIS),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.YETMIS),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.SEKSEN),
TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.DOKSAN))
self.SIFIR = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.SIFIR)
self.YUZ = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.YUZ)
self.BIN_1 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.BIN)
self.BIN_2 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.BIN)
self.BIN_3 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.BIN)
self.MILYON_1 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYON)
self.MILYON_2 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYON)
self.MILYON_3 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYON)
self.MILYAR_1 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYAR)
self.MILYAR_2 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYAR)
self.MILYAR_3 = TurkishNumeralEndingMachine.State(TurkishNumeralEndingMachine.StateId.MILYAR)
self.zero_states = [self.SIFIR, self.YUZ, self.BIN_1, self.BIN_2, self.BIN_3, self.MILYON_1, self.MILYON_2,
self.MILYON_3, self.MILYAR_1, self.MILYAR_2, self.MILYAR_3]
self.build()
def build(self):
self.SIFIR.zero_state = False
for large_state in self.zero_states:
large_state.zero_state = True
for i, ten_state in enumerate(self.states1[1:], 1):
self.ROOT.add_(i, ten_state)
for i, ten_state in enumerate(self.states10[1:], 1):
self.SIFIR.add_(i, ten_state)
self.ROOT.add_(0, self.SIFIR)
self.SIFIR.add_(0, self.YUZ)
self.YUZ.add_(0, self.BIN_1)
self.BIN_1.add_(0, self.BIN_2)
self.BIN_2.add_(0, self.BIN_3)
self.BIN_3.add_(0, self.MILYON_1)
self.MILYON_1.add_(0, self.MILYON_2)
self.MILYON_2.add_(0, self.MILYON_3)
self.MILYON_3.add_(0, self.MILYAR_1)
self.MILYAR_1.add_(0, self.MILYAR_2)
self.MILYAR_2.add_(0, self.MILYAR_3)
def find(self, num_str: str) -> str:
current: 'TurkishNumeralEndingMachine.State' = self.ROOT
for c in reversed(num_str):
k = ord(c) - 48
if k < 0 or k > 9:
if current.zero_state:
return TurkishNumeralEndingMachine.StateId.SIFIR.lemma
break
if k > 0 and current.zero_state:
if current == self.SIFIR:
return current.transitions[k].id_.lemma
break
current = current.transitions[k]
if current is None:
return TurkishNumeralEndingMachine.StateId.ERROR.lemma
if not current.zero_state:
break
return current.id_.lemma
class State:
def __init__(self, id_: 'TurkishNumeralEndingMachine.StateId'):
self.id_ = id_
self.zero_state = None
self.transitions: List[Union[None, 'TurkishNumeralEndingMachine.State']] = [None] * 10
def add_(self, i: int, state: 'TurkishNumeralEndingMachine.State'):
self.transitions[i] = state
class StateId(Enum):
ROOT = ""
ERROR = ""
SIFIR = "sıfır"
BIR = "bir"
IKI = "iki"
UC = "üç"
DORT = "dört"
BES = "beş"
ALTI = "altı"
YEDI = "yedi"
SEKIZ = "sekiz"
DOKUZ = "dokuz"
ON = "on"
YIRMI = "yirmi"
OTUZ = "otuz"
KIRK = "kırk"
ELLI = "elli"
ALTMIS = "altmış"
YETMIS = "yetmiş"
SEKSEN = "seksen"
DOKSAN = "doksan"
YUZ = "yüz"
BIN = "bin"
MILYON = "milyon"
MILYAR = "milyar"
def __init__(self, lemma: str):
self.lemma = lemma | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/tr/turkish_numeral_ending_machine.py | turkish_numeral_ending_machine.py |
import logging
import os
import re
from pkg_resources import resource_filename
from typing import Dict, List
from zemberek.core.turkish import TurkishSyllableExtractor, TurkishAlphabet
from zemberek.morphology.analysis.tr.turkish_numbers import TurkishNumbers
logger = logging.getLogger(__name__)
def load_map(resource: str) -> Dict[str, str]:
d: Dict[str, str] = {}
with open(resource, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("##"):
continue
key, value = line.split("=")
d[key.strip()] = value.strip()
return d
class PronunciationGuesser:
alphabet = TurkishAlphabet.INSTANCE
turkish_letter_prons: Dict[str, str] = load_map(
resource_filename("zemberek", os.path.join("resources", "phonetics", "turkish-letter-names.txt")))
english_letter_prons: Dict[str, str] = load_map(
resource_filename("zemberek", os.path.join("resources", "phonetics", "english-letter-names.txt")))
english_phones_to_turkish: Dict[str, str] = load_map(
resource_filename("zemberek", os.path.join("resources", "phonetics", "english-phones-to-turkish.txt")))
extractor_for_abbrv: TurkishSyllableExtractor = TurkishSyllableExtractor.STRICT
def to_turkish_letter_pronunciations(self, w: str) -> str:
if self.alphabet.contains_digit(w):
return self.to_turkish_letter_pronunciation_with_digit(w)
else:
sb = []
for i, c in enumerate(w):
if c != '-':
if c in self.turkish_letter_prons.keys():
if i == len(w) - 1 and c == "k":
sb.append("ka")
else:
sb.append(self.turkish_letter_prons.get(c))
else:
logger.debug("Cannot guess pronunciation of letter [" + c + "] in :[" + w + "]")
return ''.join(sb)
def to_turkish_letter_pronunciation_with_digit(self, w: str) -> str:
pieces: List[str] = TurkishNumbers.separate_numbers(w)
sb = []
for i, piece in enumerate(pieces):
if self.alphabet.contains_digit(piece):
sb.append(TurkishNumbers.convert_number_to_string(piece))
else:
if i < len(pieces) - 1:
sb.append(self.to_turkish_letter_pronunciations(piece))
else:
sb.append(self.replace_english_specific_chars(piece))
return re.sub("[ ]+", "", ''.join(sb))
@staticmethod
def replace_english_specific_chars(w: str) -> str:
sb = []
for c in w:
if c == '\'' or c == '-':
continue
elif c == 'q':
sb.append("k")
elif c == 'w':
sb.append("v")
elif c == 'x':
sb.append("ks")
else:
sb.append(c)
return ''.join(sb) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/tr/pronunciation_guesser.py | pronunciation_guesser.py |
import os
import re
from pkg_resources import resource_filename
from typing import List, Tuple, Dict
class TurkishNumbers:
NUMBER_SEPARATION = re.compile("[0-9]+|[^0-9 ]+")
thousands: Tuple[str] = ("", "bin", "milyon", "milyar", "trilyon", "katrilyon")
single_digit_numbers: Tuple[str] = ("", "bir", "iki", "üç", "dört", "beş", "altı", "yedi", "sekiz", "dokuz")
ten_to_ninety: Tuple[str] = ("", "on", "yirmi", "otuz", "kırk", "elli", "altmış", "yetmiş", "seksen", "doksan")
roman_numeral_pattern = re.compile("^(M{0,3})(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$", flags=2)
ordinal_map: Dict[str, str] = {}
path = resource_filename("zemberek", os.path.join("resources", "turkish-ordinal-numbers.txt"))
with open(path, "r", encoding="utf-8") as f:
for line in f:
key, value = line.split(':')
ordinal_map[key] = value
@staticmethod
def roman_to_decimal(s: str) -> int:
if s and len(s) > 0 and TurkishNumbers.roman_numeral_pattern.match(s):
matcher = re.compile("M|CM|D|CD|C|XC|L|XL|X|IX|V|IV|I").match(s)
decimal_values: Tuple = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
roman_numerals: Tuple = ("M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I")
result = 0
for gr in matcher.groups():
for i, n in enumerate(roman_numerals):
if n == gr:
result += decimal_values[i]
return result
else:
return -1
@staticmethod
def separate_numbers(s: str) -> List[str]:
return TurkishNumbers.NUMBER_SEPARATION.findall(s)
@staticmethod
def convert_number_to_string(inp: str) -> str:
if inp.startswith("+"):
inp = inp[1:]
sb: List[str] = []
i = 0
while i < len(inp) and inp[i] == 0:
sb.append("sıfır")
i += 1
rest = inp[i:]
if len(rest) > 0:
sb.append(TurkishNumbers.convert_to_string(int(rest)))
return " ".join(sb)
@staticmethod
def convert_to_string(inp: int) -> str:
if inp == 0:
return "sıfır"
elif -999999999999999999 <= inp <= 999999999999999999:
result = ""
giris_pos = abs(inp)
sayac = 0
while giris_pos > 0:
uclu = giris_pos % 1000
if uclu != 0:
if uclu == 1 and sayac == 1:
result = f"{TurkishNumbers.thousands[sayac]} {result}"
else:
result = f"{TurkishNumbers.convert_three_digits(uclu)} {TurkishNumbers.thousands[sayac]}" \
f" {result}"
sayac += 1
giris_pos //= 1000
if inp < 0:
return f"eksi {result.strip()}"
else:
return result.strip()
else:
raise ValueError("Number is out of bounds:" + str(inp))
@staticmethod
def convert_three_digits(three_digit_number: int) -> str:
sonuc = ""
hundreds = three_digit_number // 100
tens = three_digit_number // 10 % 10
single_digit = three_digit_number % 10
if hundreds != 0:
sonuc = "yüz"
if hundreds > 1:
sonuc = f"{TurkishNumbers.single_digit_numbers[hundreds]} {sonuc}"
sonuc = f"{sonuc} {TurkishNumbers.ten_to_ninety[tens]} {TurkishNumbers.single_digit_numbers[single_digit]}"
return sonuc.strip() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/analysis/tr/turkish_numbers.py | turkish_numbers.py |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.core.turkish import PrimaryPos
class Morpheme:
UNKNOWN: 'Morpheme'
def __init__(self, builder: 'Morpheme.Builder'):
self.name = builder.name
self.id_ = builder.id_
self.informal = builder.informal
self.derivational_ = builder.derivational
self.pos = builder.pos
self.mapped_morpheme = builder.mapped_morpheme
def __str__(self):
return self.name + ':' + self.id_
def __hash__(self):
return hash(self.id_)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, Morpheme):
return self.id_ == other.id_
else:
return False
@staticmethod
def instance(name: str, id_: str, pos: PrimaryPos = None) -> 'Morpheme':
return Morpheme.Builder(name, id_, pos=pos).build()
@staticmethod
def builder(name: str, id_: str) -> 'Morpheme.Builder':
return Morpheme.Builder(name, id_)
@staticmethod
def derivational(name: str, id_: str) -> 'Morpheme':
return Morpheme.Builder(name, id_, derivational=True).build()
class Builder:
def __init__(self, name: str, id_: str, derivational: bool = False, informal: bool = False,
pos: PrimaryPos = None, mapped_morpheme: 'Morpheme' = None):
self.name = name
self.id_ = id_
self.derivational = derivational
self.informal = informal
self.pos = pos
self.mapped_morpheme = mapped_morpheme
def informal_(self) -> 'Morpheme.Builder':
self.informal = True
return self
def mapped_morpheme_(self, morpheme: 'Morpheme') -> 'Morpheme.Builder':
self.mapped_morpheme = morpheme
return self
def build(self) -> 'Morpheme':
return Morpheme(self)
Morpheme.UNKNOWN = Morpheme.Builder(name="Unknown", id_="Unknown").build() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/morpheme.py | morpheme.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Set
if TYPE_CHECKING:
from zemberek.morphology.analysis.search_path import SearchPath
from zemberek.morphology.morphotactics.morpheme_state import MorphemeState
from zemberek.core.turkish import TurkishAlphabet, PhoneticAttribute
from zemberek.morphology.analysis.surface_transitions import SurfaceTransition
from zemberek.morphology.morphotactics.conditions import Conditions
from zemberek.morphology.morphotactics.attribute_to_surface_cache import AttributeToSurfaceCache
from zemberek.morphology.morphotactics.morpheme_transition import MorphemeTransition
class SuffixTransition(MorphemeTransition):
def __init__(self, builder: 'SuffixTransition.Builder' = None, surface_template: str = None):
super().__init__()
if surface_template is not None:
self.surface_template = surface_template
else:
self.from_: MorphemeState = builder.from_
self.to: MorphemeState = builder.to
self.surface_template = "" if builder.surface_template is None else builder.surface_template
self.condition = builder.condition
self.conditions_from_template(self.surface_template)
self.token_list = [item for item in SurfaceTransition.SuffixTemplateTokenizer(self.surface_template)]
self.condition_count = self.count_conditions()
self.surface_cache = AttributeToSurfaceCache()
def __str__(self):
return "[" + self.from_.id_ + "→" + self.to.id_ + \
("" if len(self.surface_template) == 0 else ":" + self.surface_template) + "]"
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, SuffixTransition):
this_condition: str = "" if self.condition is None else str(self.condition)
that_condition: str = "" if other.condition is None else str(other.condition)
return self.surface_template == other.surface_template and self.from_ == other.from_ and \
self.to == other.to and this_condition == that_condition
else:
return False
def __hash__(self):
this_condition = "" if self.condition is None else str(self.condition)
result = hash(self.surface_template)
result = 31 * result + hash(self.from_)
result = 31 * result + hash(self.to)
result = 31 * result + hash(this_condition)
return result
def can_pass(self, path: SearchPath) -> bool:
return self.condition is None or self.condition.accept_(path)
def get_copy(self) -> 'SuffixTransition':
st = SuffixTransition(surface_template=self.surface_template)
st.from_ = self.from_
st.to = self.to
st.condition = self.condition
st.token_list = self.token_list.copy()
st.surface_cache = self.surface_cache
return st
def connect(self):
self.from_.add_outgoing((self,))
self.to.add_incoming((self,))
def count_conditions(self) -> int:
if self.condition is None:
return 0
return self.condition.count() if isinstance(self.condition, Conditions.CombinedCondition) else 1
def conditions_from_template(self, template: str):
lower_map = {ord(u'I'): u'ı', ord(u'İ'): u'i'}
if template is not None and len(template) != 0:
lower = template.translate(lower_map).lower()
c = None
first_char_vowel: bool = TurkishAlphabet.INSTANCE.is_vowel(lower[0])
if lower.startswith(">") or not first_char_vowel:
c = Conditions.not_have(p_attribute=PhoneticAttribute.ExpectsVowel)
if lower.startswith("+") and TurkishAlphabet.INSTANCE.is_vowel(lower[2]) or first_char_vowel:
c = Conditions.not_have(p_attribute=PhoneticAttribute.ExpectsConsonant)
if c:
if self.condition is None:
self.condition = c
else:
self.condition = c.and_(self.condition)
def add_to_surface_cache(self, attributes: Set[PhoneticAttribute], value: str):
self.surface_cache.add_surface(attributes=attributes, surface=value)
def get_from_surface_cache(self, attributes: Set[PhoneticAttribute]) -> str:
return self.surface_cache.get_surface(attributes=attributes)
def get_last_template_token(self) -> SurfaceTransition.SuffixTemplateToken:
return None if len(self.token_list) == 0 else self.token_list[-1]
def has_surface_form(self) -> bool:
return len(self.token_list) > 0
class Builder:
def __init__(self, from_: MorphemeState, to: MorphemeState, surface_template: str = None, condition=None):
self.from_ = from_
self.to = to
self.surface_template = surface_template
self.condition = condition
def empty(self) -> 'SuffixTransition.Builder':
self.surface_template = ""
return self
def build(self) -> 'SuffixTransition':
transition = SuffixTransition(builder=self)
transition.connect()
return transition | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/suffix_transition.py | suffix_transition.py |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.lexicon import RootLexicon
from zemberek.core.turkish import PhoneticAttribute
from zemberek.morphology.morphotactics.morpheme import Morpheme
from zemberek.morphology.morphotactics.morpheme_state import MorphemeState
from zemberek.morphology.morphotactics.turkish_morphotactics import TurkishMorphotactics, StemTransitionsMapBased
from zemberek.morphology.morphotactics.conditions import Conditions
class InformalTurkishMorphotactics(TurkishMorphotactics):
def __init__(self, lexicon: RootLexicon):
super().__init__(lexicon)
self.lexicon = lexicon
self.a1plInformal = self.add_to_morpheme_map(
Morpheme.builder("A1pl_Informal", "A1pl_Informal").informal_().mapped_morpheme_(self.a1pl).build())
self.a1sgInformal = self.add_to_morpheme_map(
Morpheme.builder("A1sg_Informal", "A1sg_Informal").informal_().mapped_morpheme_(self.a1sg).build())
self.prog1Informal = self.add_to_morpheme_map(
Morpheme.builder("Prog1_Informal", "Prog1_Informal").informal_().mapped_morpheme_(self.prog1).build())
self.futInformal = self.add_to_morpheme_map(
Morpheme.builder("Fut_Informal", "Fut_Informal").informal_().mapped_morpheme_(self.fut).build())
self.quesSuffixInformal = self.add_to_morpheme_map(
Morpheme.builder("QuesSuffix_Informal", "QuesSuffix_Informal").informal_().mapped_morpheme_(
self.ques).build())
self.negInformal = self.add_to_morpheme_map(
Morpheme.builder("Neg_Informal", "Neg_Informal").informal_().mapped_morpheme_(self.neg).build())
self.unableInformal = self.add_to_morpheme_map(
Morpheme.builder("Unable_Informal", "Unable_Informal").informal_().mapped_morpheme_(self.unable).build())
self.optInformal = self.add_to_morpheme_map(
Morpheme.builder("Opt_Informal", "Opt_Informal").informal_().mapped_morpheme_(self.opt).build())
self.vA1pl_ST_Inf = MorphemeState.terminal("vA1pl_ST_Inf", self.a1plInformal)
self.vA1sg_ST_Inf = MorphemeState.terminal("vA1sg_ST_Inf", self.a1sgInformal)
self.vProgYor_S_Inf = MorphemeState.non_terminal("vProgYor_S_Inf", self.prog1Informal)
self.vFut_S_Inf = MorphemeState.non_terminal("vFut_S_Inf", self.futInformal)
self.vFut_S_Inf2 = MorphemeState.non_terminal("vFut_S_Inf2", self.futInformal)
self.vFut_S_Inf3 = MorphemeState.non_terminal("vFut_S_Inf3", self.futInformal)
self.vQues_S_Inf = MorphemeState.non_terminal("vQues_S_Inf", self.quesSuffixInformal)
self.vNeg_S_Inf = MorphemeState.non_terminal("vNeg_S_Inf", self.negInformal)
self.vUnable_S_Inf = MorphemeState.non_terminal("vUnable_S_Inf", self.unableInformal)
self.vOpt_S_Inf = MorphemeState.non_terminal("vOpt_S_Inf", self.optInformal)
self.vOpt_S_Empty_Inf = MorphemeState.non_terminal("vOpt_S_Empty_Inf", self.optInformal)
self.vOpt_S_Empty_Inf2 = MorphemeState.non_terminal("vOpt_S_Empty_Inf2", self.optInformal)
# self.make_graph()
self.add_graph()
# self.stem_transitions = StemTransitionsMapBased(lexicon, self)
def add_graph(self):
self.verbRoot_S.add_(self.vProgYor_S_Inf, "Iyo",
Conditions.not_have(p_attribute=PhoneticAttribute.LastLetterVowel))
self.verbRoot_VowelDrop_S.add_(self.vProgYor_S_Inf, "Iyo")
self.vProgYor_S_Inf.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "sun").add_(self.vA2sg_ST, "n").add_empty(
self.vA3sg_ST).add_(self.vA1pl_ST, "z").add_(self.vA2pl_ST, "sunuz").add_(self.vA2pl_ST, "nuz").add_(
self.vA3pl_ST, "lar").add_(self.vCond_S, "sa").add_(self.vPastAfterTense_S, "du").add_(
self.vNarrAfterTense_S, "muş").add_(self.vCopBeforeA3pl_S, "dur").add_(self.vWhile_S, "ken")
self.vNegProg1_S.add_(self.vProgYor_S_Inf, "Iyo")
self.vUnableProg1_S.add_(self.vProgYor_S_Inf, "Iyo")
diYiCondition: Conditions.RootSurfaceIsAny = Conditions.RootSurfaceIsAny(("di", "yi"))
self.vDeYeRoot_S.add_(self.vProgYor_S_Inf, "yo", diYiCondition)
self.vOpt_S.add_(self.vA1pl_ST_Inf, "k")
self.verbRoot_S.add_(self.vNeg_S_Inf, "mI")
self.verbRoot_S.add_(self.vUnable_S_Inf, "+yAmI")
self.verbRoot_S.add_(self.vFut_S_Inf, "+ycA~k").add_(self.vFut_S_Inf, "+ycA!ğ").add_(self.vFut_S_Inf2,
"+ycA").add_(
self.vFut_S_Inf2, "+yIcA").add_(self.vFut_S_Inf2, "+yAcA")
self.vNeg_S_Inf.add_(self.vFut_S, "yAcA~k").add_(self.vFut_S, "yAcA!ğ").add_(self.vFut_S_Inf, "ycA~k").add_(
self.vFut_S_Inf, "ycA!ğ").add_(self.vFut_S_Inf2, "ycA")
self.vUnable_S_Inf.add_(self.vFut_S, "yAcA~k").add_(self.vFut_S, "yAcA!ğ").add_(self.vFut_S_Inf, "ycA~k").add_(
self.vFut_S_Inf, "ycA!ğ").add_(self.vFut_S_Inf2, "ycA")
self.vNeg_S.add_(self.vFut_S_Inf, "yAcA").add_(self.vFut_S_Inf, "yAcAk")
self.vUnable_S.add_(self.vFut_S_Inf, "yAcA").add_(self.vFut_S_Inf, "yAcAk")
self.vFut_S_Inf.add_(self.vA1sg_ST, "+Im").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(
self.vA1pl_ST, "Iz").add_(self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr")
self.vFut_S_Inf2.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "n").add_(self.vA1pl_ST, "z").add_(self.vA1pl_ST,
"nIz")
self.vFut_S_Inf.add_(self.vCond_S, "sA")
self.vFut_S_Inf.add_(self.vPastAfterTense_S, "tI")
self.vFut_S_Inf.add_(self.vNarrAfterTense_S, "mIş")
self.vFut_S_Inf.add_(self.vCopBeforeA3pl_S, "tIr")
self.vFut_S_Inf.add_(self.vWhile_S, "ken")
self.verbRoot_S.add_(self.vOpt_S_Inf, "I", Conditions.has(p_attribute=PhoneticAttribute.LastLetterConsonant))
self.verbRoot_VowelDrop_S.add_(self.vOpt_S_Inf, "I")
self.verbRoot_S.add_empty(self.vOpt_S_Empty_Inf, Conditions.has(p_attribute=PhoneticAttribute.LastLetterVowel))
self.vOpt_S_Inf.add_(self.vA1sg_ST_Inf, "+yIm")
self.vOpt_S_Inf.add_(self.vA1sg_ST_Inf, "+yim")
self.vOpt_S_Empty_Inf.add_(self.vA1sg_ST_Inf, "+yim") | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/informal_turkish_morphotactics.py | informal_turkish_morphotactics.py |
from __future__ import annotations
import logging
from typing import List, Union,Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.morphotactics.morpheme import Morpheme
from zemberek.morphology.morphotactics.conditions import Conditions
from zemberek.morphology.morphotactics.morpheme_transition import MorphemeTransition
from zemberek.morphology.morphotactics.suffix_transition import SuffixTransition
logger = logging.getLogger(__name__)
class MorphemeState:
def __init__(self, id_: str, morpheme: Morpheme, terminal: bool, derivative: bool, pos_root: bool):
self.id_ = id_
self.morpheme = morpheme
self.terminal_ = terminal
self.derivative = derivative
self.pos_root = pos_root
self.outgoing: List[Union[SuffixTransition, MorphemeTransition]] = []
self.incoming: List[Union[SuffixTransition, MorphemeTransition]] = []
def __str__(self):
return "[" + self.id_ + ":" + self.morpheme.id_ + "]"
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, MorphemeState):
return self.id_ == other.id_
else:
return False
def __hash__(self):
return hash(self.id_)
@staticmethod
def builder(_id: str, morpheme: Morpheme, pos_root: bool = False):
return MorphemeState.Builder(_id, morpheme, _pos_root=pos_root)
@staticmethod
def terminal(_id: str, morpheme: Morpheme, pos_root: bool = False) -> 'MorphemeState':
return MorphemeState.Builder(_id, morpheme, _pos_root=pos_root, _terminal=True).build()
@staticmethod
def non_terminal(_id: str, morpheme: Morpheme, pos_root: bool = False) -> 'MorphemeState':
return MorphemeState.Builder(_id, morpheme, _pos_root=pos_root).build()
@staticmethod
def terminal_derivative(_id: str, morpheme: Morpheme, pos_root: bool = False) -> 'MorphemeState':
return MorphemeState.Builder(_id, morpheme, _pos_root=pos_root, _terminal=True, _derivative=True).build()
@staticmethod
def non_terminal_derivative(_id: str, morpheme: Morpheme, pos_root: bool = False) -> 'MorphemeState':
return MorphemeState.Builder(_id, morpheme, _pos_root=pos_root, _derivative=True).build()
def add_(self, to: 'MorphemeState', template: str, condition: Conditions.Condition = None) -> 'MorphemeState':
if condition:
SuffixTransition.Builder(from_=self, to=to, surface_template=template, condition=condition).build()
else:
SuffixTransition.Builder(from_=self, to=to, surface_template=template).build()
return self
def add_empty(self, to: 'MorphemeState', condition: Conditions.Condition = None) -> 'MorphemeState':
if condition:
SuffixTransition.Builder(from_=self, to=to, condition=condition).build()
else:
SuffixTransition.Builder(from_=self, to=to).build()
return self
def copy_outgoing_transitions_from(self, state: 'MorphemeState'):
for transition in state.outgoing:
copy = transition.get_copy()
copy.from_ = self
self.add_outgoing((transition,))
def add_outgoing(self, suffix_transitions: Tuple[MorphemeTransition, ...]) -> 'MorphemeState':
for suffix_transition in suffix_transitions:
if suffix_transition in self.outgoing:
logger.debug(f"Outgoing transition already exists{str(suffix_transition)}")
self.outgoing.append(suffix_transition)
return self
def add_incoming(self, suffix_transitions: Tuple[MorphemeTransition, ...]) -> 'MorphemeState':
for suffix_transition in suffix_transitions:
if suffix_transition in self.incoming:
logger.debug(f"Incoming transition already exists{str(suffix_transition)}")
self.incoming.append(suffix_transition)
return self
def remove_transitions_to(self, morpheme: Morpheme):
transitions: List[MorphemeTransition] = []
for transition in self.outgoing:
if transition.to.morpheme == morpheme:
transitions.append(transition)
for item in transitions:
self.outgoing.remove(item)
class Builder:
def __init__(self, _id: str, _morpheme: Morpheme, _terminal: bool = False, _derivative: bool = False,
_pos_root: bool = False):
self._id = _id
self._morpheme = _morpheme
self._terminal = _terminal
self._derivative = _derivative
self._pos_root = _pos_root
def build(self):
return MorphemeState(self._id, self._morpheme, self._terminal, self._derivative, self._pos_root) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/morpheme_state.py | morpheme_state.py |
from __future__ import annotations
import logging
from copy import deepcopy
from typing import Dict, Set, List, Tuple, Union, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.lexicon import RootLexicon
from zemberek.core.utils import ReadWriteLock
from zemberek.core.turkish import PrimaryPos, SecondaryPos, RootAttribute, PhoneticAttribute, TurkishAlphabet
from zemberek.morphology.analysis.attributes_helper import AttributesHelper
from zemberek.morphology.lexicon import DictionaryItem
from zemberek.morphology.morphotactics.morpheme import Morpheme
from zemberek.morphology.morphotactics.morpheme_state import MorphemeState
from zemberek.morphology.morphotactics.conditions import Conditions
from zemberek.morphology.morphotactics.stem_transition import StemTransition
logger = logging.getLogger(__name__)
morpheme_map: Dict[str, Morpheme] = dict()
def add_morpheme(morpheme: Morpheme) -> Morpheme:
morpheme_map[morpheme.id_] = morpheme
return morpheme
def get_morpheme_map() -> Dict[str, Morpheme]:
return morpheme_map
class TurkishMorphotactics:
root = add_morpheme(Morpheme.instance("Root", "Root"))
noun = add_morpheme(Morpheme.instance("Noun", "Noun", PrimaryPos.Noun))
adj = add_morpheme(Morpheme.instance("Adjective", "Adj", PrimaryPos.Adjective))
verb = add_morpheme(Morpheme.instance("Verb", "Verb", PrimaryPos.Verb))
pron = add_morpheme(Morpheme.instance("Pronoun", "Pron", PrimaryPos.Pronoun))
adv = add_morpheme(Morpheme.instance("Adverb", "Adv", PrimaryPos.Adverb))
conj = add_morpheme(Morpheme.instance("Conjunction", "Conj", PrimaryPos.Conjunction))
punc = add_morpheme(Morpheme.instance("Punctuation", "Punc", PrimaryPos.Punctuation))
ques = add_morpheme(Morpheme.instance("Question", "Ques", PrimaryPos.Question))
postp = add_morpheme(Morpheme.instance("PostPositive", "Postp", PrimaryPos.PostPositive))
det = add_morpheme(Morpheme.instance("Determiner", "Det", PrimaryPos.Determiner))
num = add_morpheme(Morpheme.instance("Numeral", "Num", PrimaryPos.Numeral))
dup = add_morpheme(Morpheme.instance("Duplicator", "Dup", PrimaryPos.Duplicator))
interj = add_morpheme(Morpheme.instance("Interjection", "Interj", PrimaryPos.Interjection))
a1sg = add_morpheme(Morpheme.instance("FirstPersonSingular", "A1sg"))
a2sg = add_morpheme(Morpheme.instance("SecondPersonSingular", "A2sg"))
a3sg = add_morpheme(Morpheme.instance("ThirdPersonSingular", "A3sg"))
a1pl = add_morpheme(Morpheme.instance("FirstPersonPlural", "A1pl"))
a2pl = add_morpheme(Morpheme.instance("SecondPersonPlural", "A2pl"))
a3pl = add_morpheme(Morpheme.instance("ThirdPersonPlural", "A3pl"))
pnon = add_morpheme(Morpheme.instance("NoPosession", "Pnon"))
p1sg = add_morpheme(Morpheme.instance("FirstPersonSingularPossessive", "P1sg"))
p2sg = add_morpheme(Morpheme.instance("SecondPersonSingularPossessive", "P2sg"))
p3sg = add_morpheme(Morpheme.instance("ThirdPersonSingularPossessive", "P3sg"))
p1pl = add_morpheme(Morpheme.instance("FirstPersonPluralPossessive", "P1pl"))
p2pl = add_morpheme(Morpheme.instance("SecondPersonPluralPossessive", "P2pl"))
p3pl = add_morpheme(Morpheme.instance("ThirdPersonPluralPossessive", "P3pl"))
nom = add_morpheme(Morpheme.instance("Nominal", "Nom"))
dat = add_morpheme(Morpheme.instance("Dative", "Dat"))
acc = add_morpheme(Morpheme.instance("Accusative", "Acc"))
abl = add_morpheme(Morpheme.instance("Ablative", "Abl"))
loc = add_morpheme(Morpheme.instance("Locative", "Loc"))
ins = add_morpheme(Morpheme.instance("Instrumental", "Ins"))
gen = add_morpheme(Morpheme.instance("Genitive", "Gen"))
equ = add_morpheme(Morpheme.instance("Equ", "Equ"))
dim = add_morpheme(Morpheme.derivational("Diminutive", "Dim"))
ness = add_morpheme(Morpheme.derivational("Ness", "Ness"))
with_ = add_morpheme(Morpheme.derivational("With", "With"))
without = add_morpheme(Morpheme.derivational("Without", "Without"))
related = add_morpheme(Morpheme.derivational("Related", "Related"))
justLike = add_morpheme(Morpheme.derivational("JustLike", "JustLike"))
rel = add_morpheme(Morpheme.derivational("Relation", "Rel"))
agt = add_morpheme(Morpheme.derivational("Agentive", "Agt"))
become = add_morpheme(Morpheme.derivational("Become", "Become"))
acquire = add_morpheme(Morpheme.derivational("Acquire", "Acquire"))
ly = add_morpheme(Morpheme.derivational("Ly", "Ly"))
caus = add_morpheme(Morpheme.derivational("Causative", "Caus"))
recip = add_morpheme(Morpheme.derivational("Reciprocal", "Recip"))
reflex = add_morpheme(Morpheme.derivational("Reflexive", "Reflex"))
able = add_morpheme(Morpheme.derivational("Ability", "Able"))
pass_ = add_morpheme(Morpheme.derivational("Passive", "Pass"))
inf1 = add_morpheme(Morpheme.derivational("Infinitive1", "Inf1"))
inf2 = add_morpheme(Morpheme.derivational("Infinitive2", "Inf2"))
inf3 = add_morpheme(Morpheme.derivational("Infinitive3", "Inf3"))
actOf = add_morpheme(Morpheme.derivational("ActOf", "ActOf"))
pastPart = add_morpheme(Morpheme.derivational("PastParticiple", "PastPart"))
narrPart = add_morpheme(Morpheme.derivational("NarrativeParticiple", "NarrPart"))
futPart = add_morpheme(Morpheme.derivational("FutureParticiple", "FutPart"))
presPart = add_morpheme(Morpheme.derivational("PresentParticiple", "PresPart"))
aorPart = add_morpheme(Morpheme.derivational("AoristParticiple", "AorPart"))
notState = add_morpheme(Morpheme.derivational("NotState", "NotState"))
feelLike = add_morpheme(Morpheme.derivational("FeelLike", "FeelLike"))
everSince = add_morpheme(Morpheme.derivational("EverSince", "EverSince"))
repeat = add_morpheme(Morpheme.derivational("Repeat", "Repeat"))
almost = add_morpheme(Morpheme.derivational("Almost", "Almost"))
hastily = add_morpheme(Morpheme.derivational("Hastily", "Hastily"))
stay = add_morpheme(Morpheme.derivational("Stay", "Stay"))
start = add_morpheme(Morpheme.derivational("Start", "Start"))
asIf = add_morpheme(Morpheme.derivational("AsIf", "AsIf"))
while_ = add_morpheme(Morpheme.derivational("While", "While"))
when = add_morpheme(Morpheme.derivational("When", "When"))
sinceDoingSo = add_morpheme(Morpheme.derivational("SinceDoingSo", "SinceDoingSo"))
asLongAs = add_morpheme(Morpheme.derivational("AsLongAs", "AsLongAs"))
byDoingSo = add_morpheme(Morpheme.derivational("ByDoingSo", "ByDoingSo"))
adamantly = add_morpheme(Morpheme.derivational("Adamantly", "Adamantly"))
afterDoingSo = add_morpheme(Morpheme.derivational("AfterDoingSo", "AfterDoingSo"))
withoutHavingDoneSo = add_morpheme(Morpheme.derivational("WithoutHavingDoneSo", "WithoutHavingDoneSo"))
withoutBeingAbleToHaveDoneSo = add_morpheme(
Morpheme.derivational("WithoutBeingAbleToHaveDoneSo", "WithoutBeingAbleToHaveDoneSo"))
zero = add_morpheme(Morpheme.derivational("Zero", "Zero"))
cop = add_morpheme(Morpheme.instance("Copula", "Cop"))
neg = add_morpheme(Morpheme.instance("Negative", "Neg"))
unable = add_morpheme(Morpheme.instance("Unable", "Unable"))
pres = add_morpheme(Morpheme.instance("PresentTense", "Pres"))
past = add_morpheme(Morpheme.instance("PastTense", "Past"))
narr = add_morpheme(Morpheme.instance("NarrativeTense", "Narr"))
cond = add_morpheme(Morpheme.instance("Condition", "Cond"))
prog1 = add_morpheme(Morpheme.instance("Progressive1", "Prog1"))
prog2 = add_morpheme(Morpheme.instance("Progressive2", "Prog2"))
aor = add_morpheme(Morpheme.instance("Aorist", "Aor"))
fut = add_morpheme(Morpheme.instance("Future", "Fut"))
imp = add_morpheme(Morpheme.instance("Imparative", "Imp"))
opt = add_morpheme(Morpheme.instance("Optative", "Opt"))
desr = add_morpheme(Morpheme.instance("Desire", "Desr"))
neces = add_morpheme(Morpheme.instance("Necessity", "Neces"))
morpheme_map = get_morpheme_map()
def __init__(self, lexicon: RootLexicon):
self.root_S = MorphemeState.non_terminal("root_S", self.root)
self.puncRoot_ST = MorphemeState.terminal("puncRoot_ST", self.punc, pos_root=True)
self.noun_S = MorphemeState.builder("noun_S", self.noun, pos_root=True).build()
self.nounCompoundRoot_S = MorphemeState.builder("nounCompoundRoot_S", self.noun, pos_root=True).build()
self.nounSuRoot_S = MorphemeState.builder("nounSuRoot_S", self.noun, pos_root=True).build()
self.nounInf1Root_S = MorphemeState.builder("nounInf1Root_S", self.noun, pos_root=True).build()
self.nounActOfRoot_S = MorphemeState.builder("nounActOfRoot_S", self.noun, pos_root=True).build()
self.a3sg_S = MorphemeState.non_terminal("a3sg_S", self.a3sg)
self.a3sgSu_S = MorphemeState.non_terminal("a3sgSu_S", self.a3sg)
self.a3sgCompound_S = MorphemeState.non_terminal("a3sgCompound_S", self.a3sg)
self.a3sgInf1_S = MorphemeState.non_terminal("a3sgInf1_S", self.a3sg)
self.a3sgActOf_S = MorphemeState.non_terminal("a3sgActOf_S", self.a3sg)
self.a3pl_S = MorphemeState.non_terminal("a3pl_S", self.a3pl)
self.a3plActOf_S = MorphemeState.non_terminal("a3plActOf_S", self.a3pl)
self.a3plCompound_S = MorphemeState.non_terminal("a3plCompound_S", self.a3pl)
self.a3plCompound2_S = MorphemeState.non_terminal("a3plCompound2_S", self.a3pl)
self.pnon_S = MorphemeState.non_terminal("pnon_S", self.pnon)
self.pnonCompound_S = MorphemeState.non_terminal("pnonCompound_S", self.pnon)
self.pnonCompound2_S = MorphemeState.non_terminal("pnonCompound2_S", self.pnon)
self.pnonInf1_S = MorphemeState.non_terminal("pnonInf1_S", self.pnon)
self.pnonActOf = MorphemeState.non_terminal("pnonActOf", self.pnon)
self.p1sg_S = MorphemeState.non_terminal("p1sg_S", self.p1sg)
self.p2sg_S = MorphemeState.non_terminal("p2sg_S", self.p2sg)
self.p3sg_S = MorphemeState.non_terminal("p3sg_S", self.p3sg)
self.p1pl_S = MorphemeState.non_terminal("p1pl_S", self.p1pl)
self.p2pl_S = MorphemeState.non_terminal("p2pl_S", self.p2pl)
self.p3pl_S = MorphemeState.non_terminal("p3pl_S", self.p3pl)
self.nom_ST = MorphemeState.terminal("nom_ST", self.nom)
self.nom_S = MorphemeState.non_terminal("nom_S", self.nom)
self.dat_ST = MorphemeState.terminal("dat_ST", self.dat)
self.abl_ST = MorphemeState.terminal("abl_ST", self.abl)
self.loc_ST = MorphemeState.terminal("loc_ST", self.loc)
self.ins_ST = MorphemeState.terminal("ins_ST", self.ins)
self.acc_ST = MorphemeState.terminal("acc_ST", self.acc)
self.gen_ST = MorphemeState.terminal("gen_ST", self.gen)
self.equ_ST = MorphemeState.terminal("equ_ST", self.equ)
self.dim_S = MorphemeState.non_terminal_derivative("dim_S", self.dim)
self.ness_S = MorphemeState.non_terminal_derivative("ness_S", self.ness)
self.agt_S = MorphemeState.non_terminal_derivative("agt_S", self.agt)
self.related_S = MorphemeState.non_terminal_derivative("related_S", self.related)
self.rel_S = MorphemeState.non_terminal_derivative("rel_S", self.rel)
self.relToPron_S = MorphemeState.non_terminal_derivative("relToPron_S", self.rel)
self.with_S = MorphemeState.non_terminal_derivative("with_S", self.with_)
self.without_S = MorphemeState.non_terminal_derivative("without_S", self.without)
self.justLike_S = MorphemeState.non_terminal_derivative("justLike_S", self.justLike)
self.nounZeroDeriv_S = MorphemeState.non_terminal_derivative("nounZeroDeriv_S", self.zero)
self.become_S = MorphemeState.non_terminal_derivative("become_S", self.become)
self.acquire_S = MorphemeState.non_terminal_derivative("acquire_S", self.acquire)
self.nounLastVowelDropRoot_S = MorphemeState.builder("nounLastVowelDropRoot_S", self.noun, True).build()
self.adjLastVowelDropRoot_S = MorphemeState.builder("adjLastVowelDropRoot_S", self.adj, True).build()
self.postpLastVowelDropRoot_S = MorphemeState.builder("postpLastVowelDropRoot_S", self.postp, True).build()
self.a3PlLastVowelDrop_S = MorphemeState.non_terminal("a3PlLastVowelDrop_S", self.a3pl)
self.a3sgLastVowelDrop_S = MorphemeState.non_terminal("a3sgLastVowelDrop_S", self.a3sg)
self.pNonLastVowelDrop_S = MorphemeState.non_terminal("pNonLastVowelDrop_S", self.pnon)
self.zeroLastVowelDrop_S = MorphemeState.non_terminal_derivative("zeroLastVowelDrop_S", self.zero)
self.nounProper_S = MorphemeState.builder("nounProper_S", self.noun, pos_root=True).build()
self.nounAbbrv_S = MorphemeState.builder("nounAbbrv_S", self.noun, pos_root=True).build()
self.puncProperSeparator_S = MorphemeState.non_terminal("puncProperSeparator_S", self.punc)
self.nounNoSuffix_S = MorphemeState.builder("nounNoSuffix_S", self.noun, pos_root=True).build()
self.nounA3sgNoSuffix_S = MorphemeState.non_terminal("nounA3sgNoSuffix_S", self.a3sg)
self.nounPnonNoSuffix_S = MorphemeState.non_terminal("nounPnonNoSuffix_S", self.pnon)
self.nounNomNoSuffix_ST = MorphemeState.terminal("nounNomNoSuffix_S", self.nom)
self.adjectiveRoot_ST = MorphemeState.terminal("adjectiveRoot_ST", self.adj, pos_root=True)
self.adjAfterVerb_S = MorphemeState.builder("adjAfterVerb_S", self.adj, pos_root=True).build()
self.adjAfterVerb_ST = MorphemeState.terminal("adjAfterVerb_ST", self.adj, pos_root=True)
self.adjZeroDeriv_S = MorphemeState.non_terminal_derivative("adjZeroDeriv_S", self.zero)
self.aPnon_ST = MorphemeState.terminal("aPnon_ST", self.pnon)
self.aP1sg_ST = MorphemeState.terminal("aP1sg_ST", self.p1sg)
self.aP2sg_ST = MorphemeState.terminal("aP2sg_ST", self.p2sg)
self.aP3sg_ST = MorphemeState.terminal("aP3sg_ST", self.p3sg)
self.aP1pl_ST = MorphemeState.terminal("aP3sg_ST", self.p1pl)
self.aP2pl_ST = MorphemeState.terminal("aP2pl_ST", self.p2pl)
self.aP3pl_ST = MorphemeState.terminal("aP3pl_ST", self.p3pl)
self.aLy_S = MorphemeState.non_terminal_derivative("aLy_S", self.ly)
self.aAsIf_S = MorphemeState.non_terminal_derivative("aAsIf_S", self.asIf)
self.aAgt_S = MorphemeState.non_terminal_derivative("aAgt_S", self.agt)
self.numeralRoot_ST = MorphemeState.terminal("numeralRoot_ST", self.num, pos_root=True)
self.numZeroDeriv_S = MorphemeState.non_terminal_derivative("numZeroDeriv_S", self.zero)
self.nVerb_S = MorphemeState.builder("nVerb_S", self.verb, pos_root=True).build()
self.nVerbDegil_S = MorphemeState.builder("nVerbDegil_S", self.verb, pos_root=True).build()
self.nPresent_S = MorphemeState.non_terminal("nPresent_S", self.pres)
self.nPast_S = MorphemeState.non_terminal("nPast_S", self.past)
self.nNarr_S = MorphemeState.non_terminal("nNarr_S", self.narr)
self.nCond_S = MorphemeState.non_terminal("nCond_S", self.cond)
self.nA1sg_ST = MorphemeState.terminal("nA1sg_ST", self.a1sg)
self.nA2sg_ST = MorphemeState.terminal("nA2sg_ST", self.a2sg)
self.nA1pl_ST = MorphemeState.terminal("nA1pl_ST", self.a1pl)
self.nA2pl_ST = MorphemeState.terminal("nA2pl_ST", self.a2pl)
self.nA3sg_ST = MorphemeState.terminal("nA3sg_ST", self.a3sg)
self.nA3sg_S = MorphemeState.non_terminal("nA3sg_S", self.a3sg)
self.nA3pl_ST = MorphemeState.terminal("nA3pl_ST", self.a3pl)
self.nCop_ST = MorphemeState.terminal("nCop_ST", self.cop)
self.nCopBeforeA3pl_S = MorphemeState.non_terminal("nCopBeforeA3pl_S", self.cop)
self.nNeg_S = MorphemeState.non_terminal("nNeg_S", self.neg)
self.pronPers_S = MorphemeState.builder("pronPers_S", self.pron, pos_root=True).build()
self.pronDemons_S = MorphemeState.builder("pronDemons_S", self.pron, pos_root=True).build()
self.pronQuant_S = MorphemeState.builder("pronQuant_S", self.pron, pos_root=True).build()
self.pronQuantModified_S = MorphemeState.builder("pronQuantModified_S", self.pron, pos_root=True).build()
self.pronQues_S = MorphemeState.builder("pronQues_S", self.pron, pos_root=True).build()
self.pronReflex_S = MorphemeState.builder("pronReflex_S", self.pron, pos_root=True).build()
self.pronPers_Mod_S = MorphemeState.builder("pronPers_Mod_S", self.pron, pos_root=True).build()
self.pronAfterRel_S = MorphemeState.builder("pronAfterRel_S", self.pron, pos_root=True).build()
self.pA1sg_S = MorphemeState.non_terminal("pA1sg_S", self.a1sg)
self.pA2sg_S = MorphemeState.non_terminal("pA2sg_S", self.a2sg)
self.pA1sgMod_S = MorphemeState.non_terminal("pA1sgMod_S", self.a1sg)
self.pA2sgMod_S = MorphemeState.non_terminal("pA2sgMod_S", self.a2sg)
self.pA3sg_S = MorphemeState.non_terminal("pA3sg_S", self.a3sg)
self.pA3sgRel_S = MorphemeState.non_terminal("pA3sgRel_S", self.a3sg)
self.pA1pl_S = MorphemeState.non_terminal("pA1pl_S", self.a1pl)
self.pA2pl_S = MorphemeState.non_terminal("pA2pl_S", self.a2pl)
self.pA3pl_S = MorphemeState.non_terminal("pA3pl_S", self.a3pl)
self.pA3plRel_S = MorphemeState.non_terminal("pA3plRel_S", self.a3pl)
self.pQuantA3sg_S = MorphemeState.non_terminal("pQuantA3sg_S", self.a3sg)
self.pQuantA3pl_S = MorphemeState.non_terminal("pQuantA3pl_S", self.a3pl)
self.pQuantModA3pl_S = MorphemeState.non_terminal("pQuantModA3pl_S", self.a3pl)
self.pQuantA1pl_S = MorphemeState.non_terminal("pQuantA1pl_S", self.a1pl)
self.pQuantA2pl_S = MorphemeState.non_terminal("pQuantA2pl_S", self.a2pl)
self.pQuesA3sg_S = MorphemeState.non_terminal("pQuesA3sg_S", self.a3sg)
self.pQuesA3pl_S = MorphemeState.non_terminal("pQuesA3pl_S", self.a3pl)
self.pReflexA3sg_S = MorphemeState.non_terminal("pReflexA3sg_S", self.a3sg)
self.pReflexA3pl_S = MorphemeState.non_terminal("pReflexA3pl_S", self.a3pl)
self.pReflexA1sg_S = MorphemeState.non_terminal("pReflexA1sg_S", self.a1sg)
self.pReflexA2sg_S = MorphemeState.non_terminal("pReflexA2sg_S", self.a2sg)
self.pReflexA1pl_S = MorphemeState.non_terminal("pReflexA1pl_S", self.a1pl)
self.pReflexA2pl_S = MorphemeState.non_terminal("pReflexA2pl_S", self.a2pl)
self.pPnon_S = MorphemeState.non_terminal("pPnon_S", self.pnon)
self.pPnonRel_S = MorphemeState.non_terminal("pPnonRel_S", self.pnon)
self.pPnonMod_S = MorphemeState.non_terminal("pPnonMod_S", self.pnon)
self.pP1sg_S = MorphemeState.non_terminal("pP1sg_S", self.p1sg)
self.pP2sg_S = MorphemeState.non_terminal("pP2sg_S", self.p2sg)
self.pP3sg_S = MorphemeState.non_terminal("pP3sg_S", self.p3sg)
self.pP1pl_S = MorphemeState.non_terminal("pP1pl_S", self.p1pl)
self.pP2pl_S = MorphemeState.non_terminal("pP2pl_S", self.p2pl)
self.pP3pl_S = MorphemeState.non_terminal("pP3pl_S", self.p3pl)
self.pNom_ST = MorphemeState.terminal("pNom_ST", self.nom)
self.pDat_ST = MorphemeState.terminal("pDat_ST", self.dat)
self.pAcc_ST = MorphemeState.terminal("pAcc_ST", self.acc)
self.pAbl_ST = MorphemeState.terminal("pAbl_ST", self.abl)
self.pLoc_ST = MorphemeState.terminal("pLoc_ST", self.loc)
self.pGen_ST = MorphemeState.terminal("pGen_ST", self.gen)
self.pIns_ST = MorphemeState.terminal("pIns_ST", self.ins)
self.pEqu_ST = MorphemeState.terminal("pEqu_ST", self.equ)
self.pronZeroDeriv_S = MorphemeState.non_terminal_derivative("pronZeroDeriv_S", self.zero)
self.pvPresent_S = MorphemeState.non_terminal("pvPresent_S", self.pres)
self.pvPast_S = MorphemeState.non_terminal("pvPast_S", self.past)
self.pvNarr_S = MorphemeState.non_terminal("pvNarr_S", self.narr)
self.pvCond_S = MorphemeState.non_terminal("pvCond_S", self.cond)
self.pvA1sg_ST = MorphemeState.terminal("pvA1sg_ST", self.a1sg)
self.pvA2sg_ST = MorphemeState.terminal("pvA2sg_ST", self.a2sg)
self.pvA3sg_ST = MorphemeState.terminal("pvA3sg_ST", self.a3sg)
self.pvA3sg_S = MorphemeState.non_terminal("pvA3sg_S", self.a3sg)
self.pvA1pl_ST = MorphemeState.terminal("pvA1pl_ST", self.a1pl)
self.pvA2pl_ST = MorphemeState.terminal("pvA2pl_ST", self.a2pl)
self.pvA3pl_ST = MorphemeState.terminal("pvA3pl_ST", self.a3pl)
self.pvCopBeforeA3pl_S = MorphemeState.non_terminal("pvCopBeforeA3pl_S", self.cop)
self.pvCop_ST = MorphemeState.terminal("pvCop_ST", self.cop)
self.pvVerbRoot_S = MorphemeState.builder("pvVerbRoot_S", self.verb, pos_root=True).build()
self.advRoot_ST = MorphemeState.terminal("advRoot_ST", self.adv, pos_root=True)
self.advNounRoot_ST = MorphemeState.terminal("advRoot_ST", self.adv, pos_root=True)
self.advForVerbDeriv_ST = MorphemeState.terminal("advForVerbDeriv_ST", self.adv, pos_root=True)
self.avNounAfterAdvRoot_ST = MorphemeState.builder("advToNounRoot_ST", self.noun, pos_root=True).build()
self.avA3sg_S = MorphemeState.non_terminal("avA3sg_S", self.a3sg)
self.avPnon_S = MorphemeState.non_terminal("avPnon_S", self.pnon)
self.avDat_ST = MorphemeState.terminal("avDat_ST", self.dat)
self.avZero_S = MorphemeState.non_terminal_derivative("avZero_S", self.zero)
self.avZeroToVerb_S = MorphemeState.non_terminal_derivative("avZeroToVerb_S", self.zero)
self.conjRoot_ST = MorphemeState.terminal("conjRoot_ST", self.conj, pos_root=True)
self.interjRoot_ST = MorphemeState.terminal("interjRoot_ST", self.interj, pos_root=True)
self.detRoot_ST = MorphemeState.terminal("detRoot_ST", self.det, pos_root=True)
self.dupRoot_ST = MorphemeState.terminal("dupRoot_ST", self.dup, pos_root=True)
self.postpRoot_ST = MorphemeState.terminal("postpRoot_ST", self.postp, pos_root=True)
self.postpZero_S = MorphemeState.non_terminal_derivative("postpZero_S", self.zero)
self.po2nRoot_S = MorphemeState.non_terminal("po2nRoot_S", self.noun)
self.po2nA3sg_S = MorphemeState.non_terminal("po2nA3sg_S", self.a3sg)
self.po2nA3pl_S = MorphemeState.non_terminal("po2nA3pl_S", self.a3pl)
self.po2nP3sg_S = MorphemeState.non_terminal("po2nP3sg_S", self.p3sg)
self.po2nP1sg_S = MorphemeState.non_terminal("po2nP1sg_S", self.p1sg)
self.po2nP2sg_S = MorphemeState.non_terminal("po2nP2sg_S", self.p2sg)
self.po2nP1pl_S = MorphemeState.non_terminal("po2nP1pl_S", self.p1pl)
self.po2nP2pl_S = MorphemeState.non_terminal("po2nP2pl_S", self.p2pl)
self.po2nPnon_S = MorphemeState.non_terminal("po2nPnon_S", self.pnon)
self.po2nNom_ST = MorphemeState.terminal("po2nNom_ST", self.nom)
self.po2nDat_ST = MorphemeState.terminal("po2nDat_ST", self.dat)
self.po2nAbl_ST = MorphemeState.terminal("po2nAbl_ST", self.abl)
self.po2nLoc_ST = MorphemeState.terminal("po2nLoc_ST", self.loc)
self.po2nIns_ST = MorphemeState.terminal("po2nIns_ST", self.ins)
self.po2nAcc_ST = MorphemeState.terminal("po2nAcc_ST", self.acc)
self.po2nGen_ST = MorphemeState.terminal("po2nGen_ST", self.gen)
self.po2nEqu_ST = MorphemeState.terminal("po2nEqu_ST", self.equ)
self.verbRoot_S = MorphemeState.builder("verbRoot_S", self.verb, pos_root=True).build()
self.verbLastVowelDropModRoot_S = MorphemeState.builder("verbLastVowelDropModRoot_S",
self.verb, pos_root=True).build()
self.verbLastVowelDropUnmodRoot_S = MorphemeState.builder("verbLastVowelDropUnmodRoot_S", self.verb,
pos_root=True).build()
self.vA1sg_ST = MorphemeState.terminal("vA1sg_ST", self.a1sg)
self.vA2sg_ST = MorphemeState.terminal("vA2sg_ST", self.a2sg)
self.vA3sg_ST = MorphemeState.terminal("vA3sg_ST", self.a3sg)
self.vA1pl_ST = MorphemeState.terminal("vA1pl_ST", self.a1pl)
self.vA2pl_ST = MorphemeState.terminal("vA2pl_ST", self.a2pl)
self.vA3pl_ST = MorphemeState.terminal("vA3pl_ST", self.a3pl)
self.vPast_S = MorphemeState.non_terminal("vPast_S", self.past)
self.vNarr_S = MorphemeState.non_terminal("vNarr_S", self.narr)
self.vCond_S = MorphemeState.non_terminal("vCond_S", self.cond)
self.vCondAfterPerson_ST = MorphemeState.terminal("vCondAfterPerson_ST", self.cond)
self.vPastAfterTense_S = MorphemeState.non_terminal("vPastAfterTense_S", self.past)
self.vNarrAfterTense_S = MorphemeState.non_terminal("vNarrAfterTense_S", self.narr)
self.vPastAfterTense_ST = MorphemeState.terminal("vPastAfterTense_ST", self.past)
self.vNarrAfterTense_ST = MorphemeState.terminal("vNarrAfterTense_ST", self.narr)
self.vCond_ST = MorphemeState.terminal("vCond_ST", self.cond)
self.vProgYor_S = MorphemeState.non_terminal("vProgYor_S", self.prog1)
self.vProgMakta_S = MorphemeState.non_terminal("vProgMakta_S", self.prog2)
self.vFut_S = MorphemeState.non_terminal("vFut_S", self.fut)
self.vCop_ST = MorphemeState.terminal("vCop_ST", self.cop)
self.vCopBeforeA3pl_S = MorphemeState.non_terminal("vCopBeforeA3pl_S", self.cop)
self.vNeg_S = MorphemeState.non_terminal("vNeg_S", self.neg)
self.vUnable_S = MorphemeState.non_terminal("vUnable_S", self.unable)
self.vNegProg1_S = MorphemeState.non_terminal("vNegProg1_S", self.neg)
self.vUnableProg1_S = MorphemeState.non_terminal("vUnableProg1_S", self.unable)
self.vImp_S = MorphemeState.non_terminal("vImp_S", self.imp)
self.vImpYemekYi_S = MorphemeState.non_terminal("vImpYemekYi_S", self.imp)
self.vImpYemekYe_S = MorphemeState.non_terminal("vImpYemekYe_S", self.imp)
self.vCausT_S = MorphemeState.non_terminal_derivative("vCaus_S", self.caus)
self.vCausTir_S = MorphemeState.non_terminal_derivative("vCausTır_S", self.caus) # original is vCausTır_S
self.vRecip_S = MorphemeState.non_terminal_derivative("vRecip_S", self.recip)
self.vImplicitRecipRoot_S = MorphemeState.builder("vImplicitRecipRoot_S", self.verb, pos_root=True).build()
self.vReflex_S = MorphemeState.non_terminal_derivative("vReflex_S", self.reflex)
self.vImplicitReflexRoot_S = MorphemeState.builder("vImplicitReflexRoot_S", self.verb, pos_root=True).build()
self.verbRoot_VowelDrop_S = MorphemeState.builder("verbRoot_VowelDrop_S", self.verb, pos_root=True).build()
self.vAor_S = MorphemeState.non_terminal("vAor_S", self.aor)
self.vAorNeg_S = MorphemeState.non_terminal("vAorNeg_S", self.aor)
self.vAorNegEmpty_S = MorphemeState.non_terminal("vAorNegEmpty_S", self.aor)
self.vAorPartNeg_S = MorphemeState.non_terminal_derivative("vAorPartNeg_S", self.aorPart)
self.vAorPart_S = MorphemeState.non_terminal_derivative("vAorPart_S", self.aorPart)
self.vAble_S = MorphemeState.non_terminal_derivative("vAble_S", self.able)
self.vAbleNeg_S = MorphemeState.non_terminal_derivative("vAbleNeg_S", self.able)
self.vAbleNegDerivRoot_S = MorphemeState.builder("vAbleNegDerivRoot_S", self.verb, pos_root=True).build()
self.vPass_S = MorphemeState.non_terminal_derivative("vPass_S", self.pass_)
self.vOpt_S = MorphemeState.non_terminal("vOpt_S", self.opt)
self.vDesr_S = MorphemeState.non_terminal("vDesr_S", self.desr)
self.vNeces_S = MorphemeState.non_terminal("vNeces_S", self.neces)
self.vInf1_S = MorphemeState.non_terminal_derivative("vInf1_S", self.inf1)
self.vInf2_S = MorphemeState.non_terminal_derivative("vInf2_S", self.inf2)
self.vInf3_S = MorphemeState.non_terminal_derivative("vInf3_S", self.inf3)
self.vAgt_S = MorphemeState.non_terminal_derivative("vAgt_S", self.agt)
self.vActOf_S = MorphemeState.non_terminal_derivative("vActOf_S", self.actOf)
self.vPastPart_S = MorphemeState.non_terminal_derivative("vPastPart_S", self.pastPart)
self.vFutPart_S = MorphemeState.non_terminal_derivative("vFutPart_S", self.futPart)
self.vPresPart_S = MorphemeState.non_terminal_derivative("vPresPart_S", self.presPart)
self.vNarrPart_S = MorphemeState.non_terminal_derivative("vNarrPart_S", self.narrPart)
self.vFeelLike_S = MorphemeState.non_terminal_derivative("vFeelLike_S", self.feelLike)
self.vNotState_S = MorphemeState.non_terminal_derivative("vNotState_S", self.notState)
self.vEverSince_S = MorphemeState.non_terminal_derivative("vEverSince_S", self.everSince)
self.vRepeat_S = MorphemeState.non_terminal_derivative("vRepeat_S", self.repeat)
self.vAlmost_S = MorphemeState.non_terminal_derivative("vAlmost_S", self.almost)
self.vHastily_S = MorphemeState.non_terminal_derivative("vHastily_S", self.hastily)
self.vStay_S = MorphemeState.non_terminal_derivative("vStay_S", self.stay)
self.vStart_S = MorphemeState.non_terminal_derivative("vStart_S", self.start)
self.vWhile_S = MorphemeState.non_terminal_derivative("vWhile_S", self.while_)
self.vWhen_S = MorphemeState.non_terminal_derivative("vWhen_S", self.when)
self.vAsIf_S = MorphemeState.non_terminal_derivative("vAsIf_S", self.asIf)
self.vSinceDoingSo_S = MorphemeState.non_terminal_derivative("vSinceDoingSo_S", self.sinceDoingSo)
self.vAsLongAs_S = MorphemeState.non_terminal_derivative("vAsLongAs_S", self.asLongAs)
self.vByDoingSo_S = MorphemeState.non_terminal_derivative("vByDoingSo_S", self.byDoingSo)
self.vAdamantly_S = MorphemeState.non_terminal_derivative("vAdamantly_S", self.adamantly)
self.vAfterDoing_S = MorphemeState.non_terminal_derivative("vAfterDoing_S", self.afterDoingSo)
self.vWithoutHavingDoneSo_S = MorphemeState.non_terminal_derivative("vWithoutHavingDoneSo_S",
self.withoutHavingDoneSo)
self.vWithoutBeingAbleToHaveDoneSo_S = MorphemeState.non_terminal_derivative("vWithoutBeingAbleToHaveDoneSo_S",
self.withoutBeingAbleToHaveDoneSo)
self.vDeYeRoot_S = MorphemeState.builder("vDeYeRoot_S", self.verb, pos_root=True).build()
self.qPresent_S = MorphemeState.non_terminal("qPresent_S", self.pres)
self.qPast_S = MorphemeState.non_terminal("qPast_S", self.past)
self.qNarr_S = MorphemeState.non_terminal("qNarr_S", self.narr)
self.qA1sg_ST = MorphemeState.terminal("qA1sg_ST", self.a1sg)
self.qA2sg_ST = MorphemeState.terminal("qA2sg_ST", self.a2sg)
self.qA3sg_ST = MorphemeState.terminal("qA3sg_ST", self.a3sg)
self.qA1pl_ST = MorphemeState.terminal("qA1pl_ST", self.a1pl)
self.qA2pl_ST = MorphemeState.terminal("qA2pl_ST", self.a2pl)
self.qA3pl_ST = MorphemeState.terminal("qA3pl_ST", self.a3pl)
self.qCopBeforeA3pl_S = MorphemeState.non_terminal("qCopBeforeA3pl_S", self.cop)
self.qCop_ST = MorphemeState.terminal("qCop_ST", self.cop)
self.questionRoot_S = MorphemeState.builder("questionRoot_S", self.ques, pos_root=True).build()
self.imekRoot_S = MorphemeState.builder("imekRoot_S", self.verb, pos_root=True).build()
self.imekPast_S = MorphemeState.non_terminal("imekPast_S", self.past)
self.imekNarr_S = MorphemeState.non_terminal("imekNarr_S", self.narr)
self.imekCond_S = MorphemeState.non_terminal("imekCond_S", self.cond)
self.imekA1sg_ST = MorphemeState.terminal("imekA1sg_ST", self.a1sg)
self.imekA2sg_ST = MorphemeState.terminal("imekA2sg_ST", self.a2sg)
self.imekA3sg_ST = MorphemeState.terminal("imekA3sg_ST", self.a3sg)
self.imekA1pl_ST = MorphemeState.terminal("imekA1pl_ST", self.a1pl)
self.imekA2pl_ST = MorphemeState.terminal("imekA2pl_ST", self.a2pl)
self.imekA3pl_ST = MorphemeState.terminal("imekA3pl_ST", self.a3pl)
self.imekCop_ST = MorphemeState.terminal("qCop_ST", self.cop)
self.item_root_state_map = {}
self.lexicon = lexicon
self.make_graph()
self.stem_transitions = StemTransitionsMapBased(lexicon, self)
def get_stem_transitions(self) -> StemTransitionsMapBased:
return self.stem_transitions
def get_root_lexicon(self) -> RootLexicon:
return self.lexicon
def add_to_morpheme_map(self, morpheme: Morpheme) -> Morpheme:
self.morpheme_map[morpheme.id_] = morpheme
return morpheme
def make_graph(self):
self.map_special_items_to_root_state()
self.connect_noun_states()
self.connect_proper_nouns_and_abbreviations()
self.connect_adjective_states()
self.connect_numeral_states()
self.connect_verb_after_noun_adj_states()
self.connect_pronoun_states()
self.connect_verb_after_pronoun()
self.connect_verbs()
self.connect_question()
self.connect_adverbs()
self.connect_last_vowel_drop_words()
self.connect_postpositives()
self.connect_imek()
self.handle_post_processing_connections()
def map_special_items_to_root_state(self):
self.item_root_state_map["değil_Verb"] = self.nVerbDegil_S
self.item_root_state_map["imek_Verb"] = self.imekRoot_S
self.item_root_state_map["su_Noun"] = self.nounSuRoot_S
self.item_root_state_map["akarsu_Noun"] = self.nounSuRoot_S
self.item_root_state_map["öyle_Adv"] = self.advForVerbDeriv_ST
self.item_root_state_map["böyle_Adv"] = self.advForVerbDeriv_ST
self.item_root_state_map["şöyle_Adv"] = self.advForVerbDeriv_ST
def connect_noun_states(self):
self.noun_S.add_empty(self.a3sg_S, Conditions.not_have(r_attribute=RootAttribute.ImplicitPlural))
self.noun_S.add_(self.a3pl_S, "lAr", Conditions.not_have(r_attribute=RootAttribute.ImplicitPlural).and_(
Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg)))
self.noun_S.add_empty(self.a3pl_S, Conditions.has(r_attribute=RootAttribute.ImplicitPlural))
self.nounCompoundRoot_S.add_empty(self.a3sgCompound_S,
Conditions.has(r_attribute=RootAttribute.CompoundP3sgRoot))
self.a3sgCompound_S.add_empty(self.pnonCompound_S)
self.a3sgCompound_S.add_(self.p3pl_S, "lArI")
self.pnonCompound_S.add_empty(self.nom_S)
self.nom_S.add_(self.become_S, "lAş")
self.nom_S.add_(self.acquire_S, "lAn")
self.nom_S.add_(self.with_S, "lI", (Conditions.ContainsMorpheme((self.with_, self.without))).not_())
self.nom_S.add_(self.without_S, "sIz", (Conditions.ContainsMorpheme((self.with_, self.without))).not_())
containsNess: Conditions.ContainsMorpheme = Conditions.ContainsMorpheme((self.ness,))
self.nom_S.add_(self.ness_S, "lI~k", Conditions.not_(containsNess))
self.nom_S.add_(self.ness_S, "lI!ğ", Conditions.not_(containsNess))
self.nom_S.add_(self.agt_S, ">cI", Conditions.not_(Conditions.ContainsMorpheme((self.agt,))))
self.nom_S.add_(self.justLike_S, "+msI", Conditions.not_(Conditions.ContainsMorpheme((self.justLike,))))
self.nom_S.add_(self.dim_S, ">cI~k", Conditions.HAS_NO_SURFACE.and_not(
Conditions.ContainsMorpheme((self.dim,))))
self.nom_S.add_(self.dim_S, ">cI!ğ", Conditions.HAS_NO_SURFACE.and_not(
Conditions.ContainsMorpheme((self.dim,))))
self.nom_S.add_(self.dim_S, "cAğIz", Conditions.HAS_NO_SURFACE)
self.nounCompoundRoot_S.add_(self.a3plCompound_S, "lAr",
Conditions.has(r_attribute=RootAttribute.CompoundP3sgRoot))
self.nounCompoundRoot_S.add_(self.a3plCompound2_S, "lArI",
Conditions.has(r_attribute=RootAttribute.CompoundP3sgRoot))
self.a3plCompound_S.add_(self.p3sg_S, "I").add_(self.p2sg_S, "In").add_(self.p1sg_S, "Im").add_(
self.p1pl_S, "ImIz").add_(self.p2pl_S, "InIz").add_(self.p3pl_S, "I")
self.a3plCompound2_S.add_empty(self.pnonCompound2_S)
self.pnonCompound2_S.add_empty(self.nom_ST)
rootIsAbbrv: Conditions.Condition = Conditions.SecondaryPosIs(SecondaryPos.Abbreviation)
possessionCond: Conditions.Condition = Conditions.not_have(r_attribute=RootAttribute.FamilyMember).and_not(
rootIsAbbrv)
self.a3sg_S.add_empty(self.pnon_S,
Conditions.not_have(r_attribute=RootAttribute.FamilyMember)
).add_(self.p1sg_S,
"Im",
possessionCond
).add_(self.p2sg_S,
"In",
possessionCond.and_not(
Conditions.PreviousGroupContainsMorpheme((self.justLike,)))).add_(
self.p3sg_S, "+sI", possessionCond).add_empty(self.p3sg_S,
Conditions.has(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.p1pl_S, "ImIz", possessionCond).add_(
self.p2pl_S, "InIz",
possessionCond.and_not(Conditions.PreviousGroupContainsMorpheme((self.justLike,)))).add_(
self.p3pl_S, "lArI", possessionCond)
self.a3pl_S.add_empty(self.pnon_S, Conditions.not_have(r_attribute=RootAttribute.FamilyMember))
self.a3pl_S.add_(self.p1sg_S, "Im", possessionCond).add_(self.p2sg_S, "In", possessionCond).add_empty(
self.p1sg_S, Conditions.has(r_attribute=RootAttribute.ImplicitP1sg)).add_empty(
self.p2sg_S, Conditions.has(r_attribute=RootAttribute.ImplicitP2sg)).add_(self.p3sg_S, "I",
possessionCond).add_(
self.p1pl_S, "ImIz", possessionCond).add_(self.p2pl_S, "InIz", possessionCond).add_(
self.p3pl_S, "I", possessionCond)
self.nounSuRoot_S.add_empty(self.a3sgSu_S)
self.nounSuRoot_S.add_(self.a3pl_S, "lar")
self.a3sgSu_S.add_empty(self.pnon_S).add_(self.p1sg_S, "yum").add_(self.p2sg_S, "yun").add_(self.p3sg_S,
"yu").add_(
self.p1pl_S, "yumuz").add_(self.p2pl_S, "yunuz").add_(self.p3pl_S, "lArI")
self.pnon_S.add_empty(self.nom_ST, Conditions.not_have(r_attribute=RootAttribute.FamilyMember))
equCond: Conditions.Condition = Conditions.prvious_morpheme_is(self.a3pl).or_(
(Conditions.ContainsMorpheme(
(self.adj, self.futPart, self.presPart, self.narrPart, self.pastPart))).not_()).or_(
Conditions.ContainsMorphemeSequence((self.able, self.verb, self.pastPart)))
self.pnon_S.add_(self.dat_ST, "+yA", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.abl_ST, ">dAn", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.loc_ST, ">dA", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.acc_ST, "+yI", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.gen_ST, "+nIn", Conditions.previous_state_is_not(self.a3sgSu_S)).add_(
self.gen_ST, "yIn", Conditions.previous_state_is(self.a3sgSu_S)).add_(
self.equ_ST, ">cA", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg).and_(equCond)).add_(
self.ins_ST, "+ylA")
self.pnon_S.add_(self.dat_ST, "+nA", Conditions.has(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.abl_ST, "+ndAn", Conditions.has(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.loc_ST, "+ndA", Conditions.has(r_attribute=RootAttribute.CompoundP3sg)).add_(
self.equ_ST, "+ncA", Conditions.has(r_attribute=RootAttribute.CompoundP3sg).and_(equCond)).add_(
self.acc_ST, "+nI", Conditions.has(r_attribute=RootAttribute.CompoundP3sg))
self.pnon_S.add_empty(self.dat_ST, Conditions.has(r_attribute=RootAttribute.ImplicitDative))
self.p1sg_S.add_empty(self.nom_ST).add_(self.dat_ST, "A").add_(self.loc_ST, "dA").add_(self.abl_ST, "dAn").add_(
self.ins_ST, "lA").add_(self.gen_ST, "In").add_(
self.equ_ST, "cA", equCond.or_(Conditions.ContainsMorpheme((self.pastPart,)))).add_(self.acc_ST, "I")
self.p2sg_S.add_empty(self.nom_ST).add_(self.dat_ST, "A").add_(self.loc_ST, "dA").add_(self.abl_ST, "dAn").add_(
self.ins_ST, "lA").add_(self.gen_ST, "In").add_(
self.equ_ST, "cA", equCond.or_(Conditions.ContainsMorpheme((self.pastPart,)))).add_(self.acc_ST, "I")
self.p3sg_S.add_empty(self.nom_ST).add_(self.dat_ST, "nA").add_(self.loc_ST, "ndA").add_(self.abl_ST,
"ndAn").add_(
self.ins_ST, "ylA").add_(self.gen_ST, "nIn").add_(
self.equ_ST, "ncA", equCond.or_(Conditions.ContainsMorpheme((self.pastPart,)))).add_(self.acc_ST, "nI")
self.p1pl_S.add_empty(self.nom_ST).add_(self.dat_ST, "A").add_(self.loc_ST, "dA").add_(self.abl_ST, "dAn").add_(
self.ins_ST, "lA").add_(self.gen_ST, "In").add_(
self.equ_ST, "cA", equCond.or_(Conditions.ContainsMorpheme((self.pastPart,)))).add_(self.acc_ST, "I")
self.p2pl_S.add_empty(self.nom_ST).add_(self.dat_ST, "A").add_(self.loc_ST, "dA").add_(self.abl_ST, "dAn").add_(
self.ins_ST, "lA").add_(self.gen_ST, "In").add_(
self.equ_ST, "cA", equCond.or_(Conditions.ContainsMorpheme((self.pastPart,)))).add_(self.acc_ST, "I")
self.p3pl_S.add_empty(self.nom_ST).add_(self.dat_ST, "nA").add_(self.loc_ST, "ndA").add_(self.abl_ST,
"ndAn").add_(
self.ins_ST, "ylA").add_(self.gen_ST, "nIn").add_(self.equ_ST, "+ncA").add_(self.acc_ST, "nI")
self.nom_ST.add_(self.dim_S, ">cI~k", Conditions.HAS_NO_SURFACE.and_not(rootIsAbbrv))
self.nom_ST.add_(self.dim_S, ">cI!ğ", Conditions.HAS_NO_SURFACE.and_not(rootIsAbbrv))
self.nom_ST.add_(self.dim_S, "cAğIz", Conditions.HAS_NO_SURFACE.and_not(rootIsAbbrv))
self.dim_S.add_empty(self.noun_S)
emptyAdjNounSeq: Conditions.Condition = Conditions.ContainsMorphemeSequence(
(self.adj, self.zero, self.noun, self.a3sg, self.pnon, self.nom))
self.nom_ST.add_(self.ness_S, "lI~k", Conditions.CURRENT_GROUP_EMPTY.and_not(containsNess).and_not(
emptyAdjNounSeq).and_not(rootIsAbbrv))
self.nom_ST.add_(self.ness_S, "lI!ğ", Conditions.CURRENT_GROUP_EMPTY.and_not(containsNess).and_not(
emptyAdjNounSeq).and_not(rootIsAbbrv))
self.ness_S.add_empty(self.noun_S)
self.nom_ST.add_(self.agt_S, ">cI",
Conditions.CURRENT_GROUP_EMPTY.and_not(Conditions.ContainsMorpheme((self.adj, self.agt))))
self.agt_S.add_empty(self.noun_S)
noun2VerbZeroDerivationCondition: Conditions.Condition = Conditions.HAS_TAIL.and_not(
Conditions.CURRENT_GROUP_EMPTY.and_(Conditions.LastDerivationIs(self.adjZeroDeriv_S)))
self.nom_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.dat_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.abl_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.loc_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.ins_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.gen_ST.add_empty(self.nounZeroDeriv_S, noun2VerbZeroDerivationCondition)
self.nounZeroDeriv_S.add_empty(self.nVerb_S)
noSurfaceAfterDerivation: Conditions.Condition = Conditions.NoSurfaceAfterDerivation()
self.nom_ST.add_(self.with_S, "lI", noSurfaceAfterDerivation.and_not(
Conditions.ContainsMorpheme((self.with_, self.without))).and_not(rootIsAbbrv))
self.nom_ST.add_(self.without_S, "sIz", noSurfaceAfterDerivation.and_not(
Conditions.ContainsMorpheme((self.with_, self.without, self.inf1))).and_not(rootIsAbbrv))
self.nom_ST.add_(self.justLike_S, "+msI", noSurfaceAfterDerivation.and_not(
Conditions.ContainsMorpheme((self.justLike, self.futPart, self.pastPart, self.presPart, self.adj))).and_not(
rootIsAbbrv))
self.nom_ST.add_(self.justLike_S, "ImsI",
Conditions.not_have(p_attribute=PhoneticAttribute.LastLetterVowel).and_(
noSurfaceAfterDerivation).and_not(
Conditions.ContainsMorpheme(
(self.justLike, self.futPart, self.pastPart, self.presPart, self.adj))).and_not(
rootIsAbbrv))
self.nom_ST.add_(self.related_S, "sAl", noSurfaceAfterDerivation.and_not(
Conditions.ContainsMorpheme((self.with_, self.without, self.related))).and_not(rootIsAbbrv))
self.with_S.add_empty(self.adjectiveRoot_ST)
self.without_S.add_empty(self.adjectiveRoot_ST)
self.related_S.add_empty(self.adjectiveRoot_ST)
self.justLike_S.add_empty(self.adjectiveRoot_ST)
notRelRepetition: Conditions.Condition = (Conditions.HasTailSequence((self.rel, self.adj, self.zero, self.noun,
self.a3sg, self.pnon, self.loc))).not_()
self.loc_ST.add_(self.rel_S, "ki", notRelRepetition)
self.rel_S.add_empty(self.adjectiveRoot_ST)
time: Conditions.Condition = Conditions.CURRENT_GROUP_EMPTY.and_(Conditions.SecondaryPosIs(SecondaryPos.Time))
dun: DictionaryItem = self.lexicon.get_item_by_id("dün_Noun_Time")
gun: DictionaryItem = self.lexicon.get_item_by_id("gün_Noun_Time")
bugun: DictionaryItem = self.lexicon.get_item_by_id("bugün_Noun_Time")
ileri: DictionaryItem = self.lexicon.get_item_by_id("ileri_Noun")
geri: DictionaryItem = self.lexicon.get_item_by_id("geri_Noun")
ote: DictionaryItem = self.lexicon.get_item_by_id("öte_Noun")
beri: DictionaryItem = self.lexicon.get_item_by_id("beri_Noun")
time2: Conditions.Condition = Conditions.root_is_any((dun, gun, bugun))
self.nom_ST.add_(self.rel_S, "ki", time.and_not(time2))
self.nom_ST.add_(self.rel_S, "ki", Conditions.root_is_any((ileri, geri, ote, beri)))
self.nom_ST.add_(self.rel_S, "kü", time2.and_(time))
self.gen_ST.add_(self.relToPron_S, "ki")
self.relToPron_S.add_empty(self.pronAfterRel_S)
verbDeriv: Conditions.ContainsMorpheme = Conditions.ContainsMorpheme((self.inf1, self.inf2, self.inf3,
self.pastPart, self.futPart))
self.nom_ST.add_(self.become_S, "lAş",
noSurfaceAfterDerivation.and_not(Conditions.ContainsMorpheme((self.adj,))).and_not(
verbDeriv).and_not(rootIsAbbrv))
self.become_S.add_empty(self.verbRoot_S)
self.nom_ST.add_(self.acquire_S, "lAn",
noSurfaceAfterDerivation.and_not(Conditions.ContainsMorpheme((self.adj,))).and_not(
verbDeriv).and_not(rootIsAbbrv))
self.acquire_S.add_empty(self.verbRoot_S)
self.nounInf1Root_S.add_empty(self.a3sgInf1_S)
self.a3sgInf1_S.add_empty(self.pnonInf1_S)
self.pnonInf1_S.add_empty(self.nom_ST)
self.pnonInf1_S.add_(self.abl_ST, "tAn")
self.pnonInf1_S.add_(self.loc_ST, "tA")
self.pnonInf1_S.add_(self.ins_ST, "lA")
self.nounActOfRoot_S.add_empty(self.a3sgActOf_S)
self.nounActOfRoot_S.add_(self.a3plActOf_S, "lar")
self.a3sgActOf_S.add_empty(self.pnonActOf)
self.a3plActOf_S.add_empty(self.pnonActOf)
self.pnonActOf.add_empty(self.nom_ST)
def connect_proper_nouns_and_abbreviations(self):
self.nounProper_S.add_empty(self.a3sg_S)
self.nounProper_S.add_(self.a3pl_S, "lAr")
self.puncProperSeparator_S.add_empty(self.a3sg_S)
self.puncProperSeparator_S.add_(self.a3pl_S, "lAr")
self.nounAbbrv_S.add_empty(self.a3sg_S)
self.nounAbbrv_S.add_(self.a3pl_S, "lAr")
self.nounNoSuffix_S.add_empty(self.nounA3sgNoSuffix_S)
self.nounA3sgNoSuffix_S.add_empty(self.nounPnonNoSuffix_S)
self.nounPnonNoSuffix_S.add_empty(self.nounNomNoSuffix_ST)
def connect_adjective_states(self):
self.adjectiveRoot_ST.add_empty(self.adjZeroDeriv_S, Conditions.HAS_TAIL)
self.adjZeroDeriv_S.add_empty(self.noun_S)
self.adjZeroDeriv_S.add_empty(self.nVerb_S)
self.adjectiveRoot_ST.add_(self.aLy_S, ">cA")
self.aLy_S.add_empty(self.advRoot_ST)
self.adjectiveRoot_ST.add_(self.aAsIf_S, ">cA",
(Conditions.ContainsMorpheme(
(self.asIf, self.ly, self.agt, self.with_, self.justLike))).not_())
self.aAsIf_S.add_empty(self.adjectiveRoot_ST)
self.adjectiveRoot_ST.add_(self.aAgt_S, ">cI",
(Conditions.ContainsMorpheme(
(self.asIf, self.ly, self.agt, self.with_, self.justLike))).not_())
self.aAgt_S.add_empty(self.noun_S)
self.adjectiveRoot_ST.add_(self.justLike_S, "+msI", (Conditions.NoSurfaceAfterDerivation()).and_(
(Conditions.ContainsMorpheme((self.justLike,))).not_()))
self.adjectiveRoot_ST.add_(self.justLike_S, "ImsI",
Conditions.not_have(p_attribute=PhoneticAttribute.LastLetterVowel).and_(
Conditions.NoSurfaceAfterDerivation()).and_(
Conditions.ContainsMorpheme((self.justLike,)).not_()))
self.adjectiveRoot_ST.add_(self.become_S, "lAş", Conditions.NoSurfaceAfterDerivation())
self.adjectiveRoot_ST.add_(self.acquire_S, "lAn", Conditions.NoSurfaceAfterDerivation())
c1: Conditions.Condition = Conditions.PreviousMorphemeIsAny((self.futPart, self.pastPart))
self.adjAfterVerb_S.add_empty(self.aPnon_ST, c1)
self.adjAfterVerb_S.add_(self.aP1sg_ST, "Im", c1)
self.adjAfterVerb_S.add_(self.aP2sg_ST, "In", c1)
self.adjAfterVerb_S.add_(self.aP3sg_ST, "I", c1)
self.adjAfterVerb_S.add_(self.aP1pl_ST, "ImIz", c1)
self.adjAfterVerb_S.add_(self.aP2pl_ST, "InIz", c1)
self.adjAfterVerb_S.add_(self.aP3pl_ST, "lArI", c1)
self.adjectiveRoot_ST.add_(self.ness_S, "lI~k")
self.adjectiveRoot_ST.add_(self.ness_S, "lI!ğ")
self.adjAfterVerb_ST.add_(self.ness_S, "lI~k", Conditions.PreviousMorphemeIs(self.aorPart))
self.adjAfterVerb_ST.add_(self.ness_S, "lI!ğ", Conditions.PreviousMorphemeIs(self.aorPart))
def connect_numeral_states(self):
self.numeralRoot_ST.add_(self.ness_S, "lI~k")
self.numeralRoot_ST.add_(self.ness_S, "lI!ğ")
self.numeralRoot_ST.add_empty(self.numZeroDeriv_S, Conditions.HAS_TAIL)
self.numZeroDeriv_S.add_empty(self.noun_S)
self.numZeroDeriv_S.add_empty(self.nVerb_S)
self.numeralRoot_ST.add_(self.justLike_S, "+msI", Conditions.NoSurfaceAfterDerivation().and_(
Conditions.ContainsMorpheme((self.justLike,)).not_()))
self.numeralRoot_ST.add_(self.justLike_S, "ImsI",
Conditions.not_have(p_attribute=PhoneticAttribute.LastLetterVowel).and_(
Conditions.NoSurfaceAfterDerivation()).and_(
Conditions.ContainsMorpheme((self.justLike,)).not_()))
def connect_verb_after_noun_adj_states(self):
self.nVerb_S.add_empty(self.nPresent_S)
self.nVerb_S.add_(self.nPast_S, "+y>dI")
self.nVerb_S.add_(self.nNarr_S, "+ymIş")
self.nVerb_S.add_(self.nCond_S, "+ysA")
self.nVerb_S.add_(self.vWhile_S, "+yken")
degilRoot: DictionaryItem = self.lexicon.get_item_by_id("değil_Verb")
self.nVerbDegil_S.add_empty(self.nNeg_S, Conditions.root_is(degilRoot))
self.nNeg_S.copy_outgoing_transitions_from(self.nVerb_S)
noFamily: Conditions.Condition = Conditions.not_have(r_attribute=RootAttribute.FamilyMember)
verbDeriv: Conditions.ContainsMorpheme = Conditions.ContainsMorpheme(
(self.inf1, self.inf2, self.inf3, self.pastPart, self.futPart))
allowA1sgTrans: Conditions.Condition = noFamily.and_not(
Conditions.ContainsMorphemeSequence((self.p1sg, self.nom))).and_not(verbDeriv)
allowA2sgTrans: Conditions.Condition = noFamily.and_not(
Conditions.ContainsMorphemeSequence((self.p2sg, self.nom))).and_not(verbDeriv)
allowA3plTrans: Conditions.Condition = noFamily.and_not(
Conditions.PreviousGroupContains((self.a3pl_S,))).and_not(
Conditions.ContainsMorphemeSequence((self.p3pl, self.nom))).and_not(verbDeriv)
allowA2plTrans: Conditions.Condition = noFamily.and_not(
Conditions.ContainsMorphemeSequence((self.p2pl, self.nom))).and_not(verbDeriv)
allowA1plTrans: Conditions.Condition = noFamily.and_not(
Conditions.ContainsMorphemeSequence((self.p1sg, self.nom))).and_not(
Conditions.ContainsMorphemeSequence((self.p1pl, self.nom))).and_not(verbDeriv)
self.nPresent_S.add_(self.nA1sg_ST, "+yIm", allowA1sgTrans)
self.nPresent_S.add_(self.nA2sg_ST, "sIn", allowA2sgTrans)
self.nPresent_S.add_empty(self.nA3sg_S)
self.nPresent_S.add_empty(self.nA3sg_ST, Conditions.root_is(degilRoot))
self.nPresent_S.add_(self.nA3pl_ST, "lAr", Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg).and_not(
Conditions.PreviousGroupContainsMorpheme((self.inf1,))).and_(allowA3plTrans))
self.nPast_S.add_(self.nA1sg_ST, "m", allowA1sgTrans)
self.nNarr_S.add_(self.nA1sg_ST, "Im", allowA1sgTrans)
self.nPast_S.add_(self.nA2sg_ST, "n", allowA2sgTrans)
self.nNarr_S.add_(self.nA2sg_ST, "sIn", allowA2sgTrans)
self.nPast_S.add_(self.nA1pl_ST, "k", allowA1plTrans)
self.nNarr_S.add_(self.nA1pl_ST, "Iz", allowA1plTrans)
self.nPresent_S.add_(self.nA1pl_ST, "+yIz", allowA1plTrans)
self.nPast_S.add_(self.nA2pl_ST, "InIz", allowA2plTrans)
self.nNarr_S.add_(self.nA2pl_ST, "sInIz", allowA2plTrans)
self.nPresent_S.add_(self.nA2pl_ST, "sInIz", allowA2plTrans)
self.nPast_S.add_(self.nA3pl_ST, "lAr",
Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg).and_(allowA3plTrans))
self.nNarr_S.add_(self.nA3pl_ST, "lAr",
Conditions.not_have(r_attribute=RootAttribute.CompoundP3sg).and_(allowA3plTrans))
self.nPast_S.add_empty(self.nA3sg_ST)
self.nNarr_S.add_empty(self.nA3sg_ST)
self.nNarr_S.add_(self.nCond_S, "sA")
self.nCond_S.add_(self.nA1sg_ST, "m", allowA1sgTrans)
self.nCond_S.add_(self.nA2sg_ST, "n", allowA2sgTrans)
self.nCond_S.add_(self.nA1pl_ST, "k", allowA1plTrans)
self.nCond_S.add_(self.nA2pl_ST, "nIz", allowA2plTrans)
self.nCond_S.add_empty(self.nA3sg_ST)
self.nCond_S.add_(self.nA3pl_ST, "lAr")
rejectNoCopula: Conditions.Condition = (
Conditions.CurrentGroupContainsAny((self.nPast_S, self.nCond_S, self.nCopBeforeA3pl_S))).not_()
self.nA1sg_ST.add_(self.nCop_ST, "dIr", rejectNoCopula)
self.nA2sg_ST.add_(self.nCop_ST, "dIr", rejectNoCopula)
self.nA1pl_ST.add_(self.nCop_ST, "dIr", rejectNoCopula)
self.nA2pl_ST.add_(self.nCop_ST, "dIr", rejectNoCopula)
self.nA3sg_S.add_(self.nCop_ST, ">dIr", rejectNoCopula)
self.nA3pl_ST.add_(self.nCop_ST, "dIr", rejectNoCopula)
asIfCond: Conditions.PreviousMorphemeIsAny = Conditions.PreviousMorphemeIsAny((self.narr,))
self.nA3sg_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nA1sg_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nA2sg_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nA1pl_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nA2pl_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nA3pl_ST.add_(self.vAsIf_S, ">cAsInA", asIfCond)
self.nPresent_S.add_(self.nCopBeforeA3pl_S, ">dIr")
self.nCopBeforeA3pl_S.add_(self.nA3pl_ST, "lAr")
def connect_pronoun_states(self):
ben: DictionaryItem = self.lexicon.get_item_by_id("ben_Pron_Pers")
sen: DictionaryItem = self.lexicon.get_item_by_id("sen_Pron_Pers")
o: DictionaryItem = self.lexicon.get_item_by_id("o_Pron_Pers")
biz: DictionaryItem = self.lexicon.get_item_by_id("biz_Pron_Pers")
siz: DictionaryItem = self.lexicon.get_item_by_id("siz_Pron_Pers")
falan: DictionaryItem = self.lexicon.get_item_by_id("falan_Pron_Pers")
falanca: DictionaryItem = self.lexicon.get_item_by_id("falanca_Pron_Pers")
self.pronPers_S.add_empty(self.pA1sg_S, Conditions.root_is(ben))
self.pronPers_S.add_empty(self.pA2sg_S, Conditions.root_is(sen))
self.pronPers_S.add_empty(self.pA3sg_S, Conditions.root_is_any((o, falan, falanca)))
self.pronPers_S.add_(self.pA3pl_S, "nlAr", Conditions.root_is(o))
self.pronPers_S.add_(self.pA3pl_S, "lAr", Conditions.root_is_any((falan, falanca)))
self.pronPers_S.add_empty(self.pA1pl_S, Conditions.root_is(biz))
self.pronPers_S.add_(self.pA1pl_S, "lAr", Conditions.root_is(biz))
self.pronPers_S.add_empty(self.pA2pl_S, Conditions.root_is(siz))
self.pronPers_S.add_(self.pA2pl_S, "lAr", Conditions.root_is(siz))
self.pronPers_Mod_S.add_empty(self.pA1sgMod_S, Conditions.root_is(ben))
self.pronPers_Mod_S.add_empty(self.pA2sgMod_S, Conditions.root_is(sen))
self.pA1sgMod_S.add_empty(self.pPnonMod_S)
self.pA2sgMod_S.add_empty(self.pPnonMod_S)
self.pPnonMod_S.add_(self.pDat_ST, "A")
self.pA1sg_S.add_empty(self.pPnon_S)
self.pA2sg_S.add_empty(self.pPnon_S)
self.pA3sg_S.add_empty(self.pPnon_S)
self.pA1pl_S.add_empty(self.pPnon_S)
self.pA2pl_S.add_empty(self.pPnon_S)
self.pA3pl_S.add_empty(self.pPnon_S)
self.pronAfterRel_S.add_empty(self.pA3sgRel_S)
self.pronAfterRel_S.add_(self.pA3plRel_S, "lAr")
self.pA3sgRel_S.add_empty(self.pPnonRel_S)
self.pA3plRel_S.add_empty(self.pPnonRel_S)
self.pPnonRel_S.add_empty(self.pNom_ST)
self.pPnonRel_S.add_(self.pDat_ST, "+nA")
self.pPnonRel_S.add_(self.pAcc_ST, "+nI")
self.pPnonRel_S.add_(self.pAbl_ST, "+ndAn")
self.pPnonRel_S.add_(self.pLoc_ST, "+ndA")
self.pPnonRel_S.add_(self.pIns_ST, "+ylA")
self.pPnonRel_S.add_(self.pGen_ST, "+nIn")
bu: DictionaryItem = self.lexicon.get_item_by_id("bu_Pron_Demons")
su: DictionaryItem = self.lexicon.get_item_by_id("şu_Pron_Demons")
o_demons: DictionaryItem = self.lexicon.get_item_by_id("o_Pron_Demons")
self.pronDemons_S.add_empty(self.pA3sg_S)
self.pronDemons_S.add_(self.pA3pl_S, "nlAr")
birbiri: DictionaryItem = self.lexicon.get_item_by_id("birbiri_Pron_Quant")
biri: DictionaryItem = self.lexicon.get_item_by_id("biri_Pron_Quant")
bazi: DictionaryItem = self.lexicon.get_item_by_id("bazı_Pron_Quant")
bircogu: DictionaryItem = self.lexicon.get_item_by_id("birçoğu_Pron_Quant")
birkaci: DictionaryItem = self.lexicon.get_item_by_id("birkaçı_Pron_Quant")
beriki: DictionaryItem = self.lexicon.get_item_by_id("beriki_Pron_Quant")
cogu: DictionaryItem = self.lexicon.get_item_by_id("çoğu_Pron_Quant")
cumlesi: DictionaryItem = self.lexicon.get_item_by_id("cümlesi_Pron_Quant")
hep: DictionaryItem = self.lexicon.get_item_by_id("hep_Pron_Quant")
herbiri: DictionaryItem = self.lexicon.get_item_by_id("herbiri_Pron_Quant")
herkes: DictionaryItem = self.lexicon.get_item_by_id("herkes_Pron_Quant")
hicbiri: DictionaryItem = self.lexicon.get_item_by_id("hiçbiri_Pron_Quant")
hepsi: DictionaryItem = self.lexicon.get_item_by_id("hepsi_Pron_Quant")
kimi: DictionaryItem = self.lexicon.get_item_by_id("kimi_Pron_Quant")
kimse: DictionaryItem = self.lexicon.get_item_by_id("kimse_Pron_Quant")
oburku: DictionaryItem = self.lexicon.get_item_by_id("öbürkü_Pron_Quant")
oburu: DictionaryItem = self.lexicon.get_item_by_id("öbürü_Pron_Quant")
tumu: DictionaryItem = self.lexicon.get_item_by_id("tümü_Pron_Quant")
topu: DictionaryItem = self.lexicon.get_item_by_id("topu_Pron_Quant")
umum: DictionaryItem = self.lexicon.get_item_by_id("umum_Pron_Quant")
self.pronQuant_S.add_empty(self.pQuantA3sg_S,
Conditions.root_is_none((herkes, umum, hepsi, cumlesi, hep, tumu, birkaci, topu)))
self.pronQuant_S.add_(self.pQuantA3pl_S, "lAr", Conditions.root_is_none(
(hep, hepsi, birkaci, umum, cumlesi, cogu, bircogu, herbiri, tumu, hicbiri, topu, oburu)))
self.pronQuant_S.add_(self.pQuantA1pl_S, "lAr", Conditions.root_is_any((bazi,)))
self.pronQuant_S.add_(self.pQuantA2pl_S, "lAr", Conditions.root_is_any((bazi,)))
self.pronQuant_S.add_empty(self.pQuantA3pl_S, Conditions.root_is_any(
(herkes, umum, birkaci, hepsi, cumlesi, cogu, bircogu, tumu, topu)))
self.pronQuant_S.add_empty(self.a3sg_S, Conditions.root_is(kimse))
self.pronQuant_S.add_(self.a3pl_S, "lAr", Conditions.root_is_any((kimse,)))
self.pronQuant_S.add_empty(self.pQuantA1pl_S, Conditions.root_is_any(
(biri, bazi, birbiri, birkaci, herbiri, hep, kimi, cogu, bircogu, tumu, topu, hicbiri)))
self.pronQuant_S.add_empty(self.pQuantA2pl_S, Conditions.root_is_any(
(biri, bazi, birbiri, birkaci, herbiri, hep, kimi, cogu, bircogu, tumu, topu, hicbiri)))
self.pronQuantModified_S.add_empty(self.pQuantModA3pl_S)
self.pQuantModA3pl_S.add_(self.pP3pl_S, "lArI")
self.pQuantA3sg_S.add_empty(self.pP3sg_S, Conditions.root_is_any(
(biri, birbiri, kimi, herbiri, hicbiri, oburu, oburku, beriki)).and_(
Conditions.not_have(p_attribute=PhoneticAttribute.ModifiedPronoun)))
self.pQuantA3sg_S.add_(self.pP3sg_S, "sI",
Conditions.root_is_any((biri, bazi, kimi, birbiri, herbiri, hicbiri, oburku)).and_(
Conditions.not_have(p_attribute=PhoneticAttribute.ModifiedPronoun)))
self.pQuantA3pl_S.add_(self.pP3pl_S, "I", Conditions.root_is_any((biri, bazi, birbiri, kimi, oburku, beriki)))
self.pQuantA3pl_S.add_empty(self.pP3pl_S,
Conditions.root_is_any((hepsi, birkaci, cumlesi, cogu, tumu, topu, bircogu)))
self.pQuantA3pl_S.add_empty(self.pPnon_S, Conditions.root_is_any((herkes, umum, oburku, beriki)))
self.pQuantA1pl_S.add_(self.pP1pl_S, "ImIz")
self.pQuantA2pl_S.add_(self.pP2pl_S, "InIz")
ne: DictionaryItem = self.lexicon.get_item_by_id("ne_Pron_Ques")
nere: DictionaryItem = self.lexicon.get_item_by_id("nere_Pron_Ques")
kim: DictionaryItem = self.lexicon.get_item_by_id("kim_Pron_Ques")
self.pronQues_S.add_empty(self.pQuesA3sg_S)
self.pronQues_S.add_(self.pQuesA3pl_S, "lAr")
self.pQuesA3sg_S.add_empty(self.pPnon_S).add_(self.pP3sg_S, "+sI").add_(self.pP1sg_S, "Im",
Conditions.root_is_not(ne)).add_(
self.pP1sg_S, "yIm", Conditions.root_is(ne)).add_(self.pP2sg_S, "In", Conditions.root_is_not(ne)).add_(
self.pP2sg_S, "yIn", Conditions.root_is(ne)).add_(self.pP1pl_S, "ImIz", Conditions.root_is_not(ne)).add_(
self.pP1pl_S, "yImIz", Conditions.root_is(ne))
self.pQuesA3pl_S.add_empty(self.pPnon_S).add_(self.pP3sg_S, "I").add_(self.pP1sg_S, "Im").add_(self.pP1pl_S,
"ImIz")
kendi: DictionaryItem = self.lexicon.get_item_by_id("kendi_Pron_Reflex")
self.pronReflex_S.add_empty(self.pReflexA1sg_S).add_empty(self.pReflexA2sg_S).add_empty(
self.pReflexA3sg_S).add_empty(self.pReflexA1pl_S).add_empty(self.pReflexA2pl_S).add_empty(
self.pReflexA3pl_S)
self.pReflexA1sg_S.add_(self.pP1sg_S, "Im")
self.pReflexA2sg_S.add_(self.pP2sg_S, "In")
self.pReflexA3sg_S.add_(self.pP3sg_S, "+sI").add_empty(self.pP3sg_S)
self.pReflexA1pl_S.add_(self.pP1pl_S, "ImIz")
self.pReflexA2pl_S.add_(self.pP2pl_S, "InIz")
self.pReflexA3pl_S.add_(self.pP3pl_S, "lArI")
nGroup: Conditions.Condition = Conditions.root_is_none((ne, nere, falan, falanca, hep, herkes))
yGroup: Conditions.Condition = Conditions.root_is_any((ne, nere, falan, falanca, hep, herkes))
self.pPnon_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", Conditions.root_is_none(
(ben, sen, ne, nere, falan, falanca, herkes))).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(
self.pLoc_ST, "+ndA", nGroup).add_(self.pLoc_ST, ">dA", yGroup).add_(self.pAbl_ST, "+ndAn", nGroup).add_(
self.pAbl_ST, ">dAn", yGroup).add_(self.pGen_ST, "+nIn",
nGroup.and_(Conditions.root_is_none((biz, ben, sen)))).add_(
self.pGen_ST, "im", Conditions.root_is_any((ben, biz))).add_(self.pGen_ST, "in",
Conditions.root_is(sen)).add_(
self.pGen_ST, "+yIn", yGroup.and_(Conditions.root_is_none((biz,)))).add_(self.pEqu_ST, ">cA", yGroup).add_(
self.pEqu_ST, ">cA", nGroup).add_(self.pIns_ST, "+ylA", yGroup).add_(self.pIns_ST, "+nlA", nGroup).add_(
self.pIns_ST, "+nInlA", nGroup.and_(Conditions.root_is_any((bu, su, o, sen)))).add_(
self.pIns_ST, "inle", Conditions.root_is(siz)).add_(self.pIns_ST, "imle",
Conditions.root_is_any((biz, ben)))
conditionpP1sg_S: Conditions.Condition = Conditions.root_is_any((kim, ben, ne, nere, kendi))
self.pP1sg_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA",
Conditions.root_is_any((kendi,))).add_(
self.pAbl_ST, "+ndAn", Conditions.root_is_any((kendi,))).add_(self.pEqu_ST, "+ncA",
Conditions.root_is_any((kendi,))).add_(
self.pIns_ST, "+nlA", conditionpP1sg_S).add_(self.pGen_ST, "+nIn", conditionpP1sg_S)
conditionP2sg: Conditions.Condition = Conditions.root_is_any((kim, sen, ne, nere, kendi))
self.pP2sg_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA",
Conditions.root_is_any((kendi,))).add_(
self.pAbl_ST, "+ndAn", Conditions.root_is_any((kendi,))).add_(self.pEqu_ST, "+ncA",
Conditions.root_is_any((kendi,))).add_(
self.pIns_ST, "+nlA", conditionP2sg).add_(self.pGen_ST, "+nIn", conditionP2sg)
p3sgCond: Conditions.Condition = Conditions.root_is_any(
(kendi, kim, ne, nere, o, bazi, biri, birbiri, herbiri, hep, kimi, hicbiri))
self.pP3sg_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA", p3sgCond).add_(
self.pAbl_ST, "+ndAn", p3sgCond).add_(self.pGen_ST, "+nIn", p3sgCond).add_(self.pEqu_ST, "ncA",
p3sgCond).add_(
self.pIns_ST, "+ylA", p3sgCond)
hepCnd: Conditions.Condition = Conditions.root_is_any((kendi, kim, ne, nere, biz, siz, biri, birbiri, birkaci,
herbiri, hep, kimi, cogu, bircogu, tumu, topu, bazi,
hicbiri))
self.pP1pl_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA", hepCnd).add_(
self.pAbl_ST, "+ndAn", hepCnd).add_(self.pGen_ST, "+nIn", hepCnd).add_(self.pEqu_ST, "+ncA", hepCnd).add_(
self.pIns_ST, "+nlA", hepCnd)
self.pP2pl_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA", hepCnd).add_(
self.pAbl_ST, "+ndAn", hepCnd).add_(self.pGen_ST, "+nIn", hepCnd).add_(self.pEqu_ST, "+ncA", hepCnd).add_(
self.pIns_ST, "+nlA", hepCnd)
hepsiCnd: Conditions.Condition = Conditions.root_is_any((kendi, kim, ne, nere, o, bazi, biri, herkes, umum,
birkaci, hepsi, cumlesi, cogu, bircogu, birbiri, tumu,
kimi, topu))
self.pP3pl_S.add_empty(self.pNom_ST).add_(self.pDat_ST, "+nA", nGroup).add_(self.pAcc_ST, "+nI", nGroup).add_(
self.pDat_ST, "+yA", yGroup).add_(self.pAcc_ST, "+yI", yGroup).add_(self.pLoc_ST, "+ndA", hepsiCnd).add_(
self.pAbl_ST, "+ndAn", hepsiCnd).add_(self.pGen_ST, "+nIn",
hepsiCnd.or_(Conditions.root_is_any((sen, siz)))).add_(
self.pEqu_ST, "+ncA", hepsiCnd).add_(self.pIns_ST, "+ylA", hepsiCnd)
self.pNom_ST.add_(self.with_S, "+nlI", Conditions.root_is_any((bu, su, o_demons, ben, sen, o, biz, siz)))
self.pNom_ST.add_(self.with_S, "lI", Conditions.root_is_any((nere,)))
self.pNom_ST.add_(self.with_S, "+ylI", Conditions.root_is_any((ne,)))
self.pNom_ST.add_(self.without_S, "+nsIz",
Conditions.root_is_any((nere, bu, su, o_demons, ben, sen, o, biz, siz)))
self.pNom_ST.add_(self.without_S, "+ysIz", Conditions.root_is_any((ne,)))
self.pGen_ST.add_(self.rel_S, "ki", Conditions.root_is_any((nere, bu, su, o_demons, ne, sen, o, biz, siz)))
notRelRepetition: Conditions.Condition = (Conditions.HasTailSequence((self.rel, self.adj, self.zero, self.noun,
self.a3sg, self.pnon, self.loc))).not_()
self.pLoc_ST.add_(self.rel_S, "ki", notRelRepetition)
self.pIns_ST.add_(self.vWhile_S, "+yken")
self.pNom_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pDat_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pLoc_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pAbl_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pGen_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pIns_ST.add_empty(self.pronZeroDeriv_S, Conditions.HAS_TAIL)
self.pronZeroDeriv_S.add_empty(self.pvVerbRoot_S)
def connect_verb_after_pronoun(self):
self.pvVerbRoot_S.add_empty(self.pvPresent_S)
self.pvVerbRoot_S.add_(self.vWhile_S, "+yken")
self.pvVerbRoot_S.add_(self.pvPast_S, "+ydI")
self.pvVerbRoot_S.add_(self.pvNarr_S, "+ymIş")
self.pvVerbRoot_S.add_(self.pvCond_S, "+ysA")
allowA1sgTrans = (Conditions.PreviousGroupContains((self.pA1pl_S, self.pP1sg_S))).not_()
allowA1plTrans = (
Conditions.PreviousGroupContains((self.pA1sg_S, self.pA2sg_S, self.pP1sg_S, self.pP2sg_S))).not_()
allowA2sgTrans = (Conditions.PreviousGroupContains((self.pA2pl_S, self.pP2sg_S))).not_()
allowA2plTrans = (Conditions.PreviousGroupContains((self.pA2sg_S, self.pP2pl_S))).not_()
self.pvPresent_S.add_(self.pvA1sg_ST, "+yIm", allowA1sgTrans)
self.pvPresent_S.add_(self.pvA2sg_ST, "sIn", allowA2sgTrans)
self.pvPresent_S.add_empty(self.nA3sg_S)
self.pvPresent_S.add_(self.pvA1pl_ST, "+yIz", allowA1plTrans)
self.pvPresent_S.add_(self.pvA2pl_ST, "sInIz")
self.pvPresent_S.add_(self.pvA3pl_ST, "lAr", Conditions.PreviousGroupContains((self.pLoc_ST,)))
self.pvPast_S.add_(self.pvA1sg_ST, "m", allowA1sgTrans)
self.pvPast_S.add_(self.pvA2sg_ST, "n", allowA2sgTrans)
self.pvPast_S.add_(self.pvA1pl_ST, "k", allowA1plTrans)
self.pvPast_S.add_(self.pvA2pl_ST, "InIz")
self.pvPast_S.add_(self.pvA3pl_ST, "lAr")
self.pvPast_S.add_empty(self.pvA3sg_ST)
self.pvNarr_S.add_(self.pvA1sg_ST, "Im", allowA1sgTrans)
self.pvNarr_S.add_(self.pvA2sg_ST, "sIn", allowA2sgTrans)
self.pvNarr_S.add_(self.pvA1pl_ST, "Iz", allowA1plTrans)
self.pvNarr_S.add_(self.pvA2pl_ST, "sInIz")
self.pvNarr_S.add_(self.pvA3pl_ST, "lAr")
self.pvNarr_S.add_empty(self.pvA3sg_ST)
self.pvNarr_S.add_(self.pvCond_S, "sA")
self.pvCond_S.add_(self.pvA1sg_ST, "m", allowA1sgTrans)
self.pvCond_S.add_(self.pvA2sg_ST, "n", allowA2sgTrans)
self.pvCond_S.add_(self.pvA1pl_ST, "k", allowA1plTrans)
self.pvCond_S.add_(self.pvA2pl_ST, "nIz", allowA2plTrans)
self.pvCond_S.add_empty(self.pvA3sg_ST)
self.pvCond_S.add_(self.pvA3pl_ST, "lAr")
rejectNoCopula = (
Conditions.CurrentGroupContainsAny((self.pvPast_S, self.pvCond_S, self.pvCopBeforeA3pl_S))).not_()
self.pvA1sg_ST.add_(self.pvCop_ST, "dIr", rejectNoCopula)
self.pvA2sg_ST.add_(self.pvCop_ST, "dIr", rejectNoCopula)
self.pvA1pl_ST.add_(self.pvCop_ST, "dIr", rejectNoCopula)
self.pvA2pl_ST.add_(self.pvCop_ST, "dIr", rejectNoCopula)
self.pvA3sg_S.add_(self.pvCop_ST, ">dIr", rejectNoCopula)
self.pvA3pl_ST.add_(self.pvCop_ST, "dIr", rejectNoCopula)
self.pvPresent_S.add_(self.pvCopBeforeA3pl_S, ">dIr")
self.pvCopBeforeA3pl_S.add_(self.pvA3pl_ST, "lAr")
def connect_verbs(self):
self.verbRoot_S.add_empty(self.vImp_S)
self.vImp_S.add_empty(self.vA2sg_ST).add_(self.vA2sg_ST, "sAnA").add_(self.vA3sg_ST, "sIn").add_(self.vA2pl_ST,
"+yIn").add_(
self.vA2pl_ST, "+yInIz").add_(self.vA2pl_ST, "sAnIzA").add_(self.vA3pl_ST, "sInlAr")
self.verbRoot_S.add_(self.vCausT_S, "t", Conditions.has(r_attribute=RootAttribute.Causative_t).or_(
Conditions.LastDerivationIs(self.vCausTir_S)).and_not(
Conditions.LastDerivationIsAny((self.vCausT_S, self.vPass_S, self.vAble_S))))
self.verbRoot_S.add_(self.vCausTir_S, ">dIr",
Conditions.has(p_attribute=PhoneticAttribute.LastLetterConsonant).and_not(
Conditions.LastDerivationIsAny((self.vCausTir_S, self.vPass_S, self.vAble_S))))
self.vCausT_S.add_empty(self.verbRoot_S)
self.vCausTir_S.add_empty(self.verbRoot_S)
self.verbRoot_S.add_(self.vProgYor_S, "Iyor",
Conditions.not_have(p_attribute=PhoneticAttribute.LastLetterVowel))
self.verbRoot_VowelDrop_S.add_(self.vProgYor_S, "Iyor")
self.vProgYor_S.add_(self.vA1sg_ST, "um").add_(self.vA2sg_ST, "sun").add_empty(self.vA3sg_ST).add_(
self.vA1pl_ST, "uz").add_(self.vA2pl_ST, "sunuz").add_(self.vA3pl_ST, "lar").add_(self.vCond_S, "sa").add_(
self.vPastAfterTense_S, "du").add_(self.vNarrAfterTense_S, "muş").add_(self.vCopBeforeA3pl_S, "dur").add_(
self.vWhile_S, "ken")
self.verbRoot_S.add_(self.vProgMakta_S, "mAktA")
self.vProgMakta_S.add_(self.vA1sg_ST, "yIm").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(
self.vA1pl_ST, "yIz").add_(self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr").add_(self.vCond_S,
"ysA").add_(
self.vPastAfterTense_S, "ydI").add_(self.vNarrAfterTense_S, "ymIş").add_(self.vCopBeforeA3pl_S, "dIr").add_(
self.vWhile_S, "yken")
self.verbRoot_S.add_(self.vAor_S, "Ir",
Conditions.has(r_attribute=RootAttribute.Aorist_I).or_(Conditions.HAS_SURFACE))
self.verbRoot_S.add_(self.vAor_S, "Ar",
Conditions.has(r_attribute=RootAttribute.Aorist_A).and_(Conditions.HAS_NO_SURFACE))
self.vAor_S.add_(self.vA1sg_ST, "Im").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"Iz").add_(
self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr").add_(self.vPastAfterTense_S, "dI").add_(
self.vNarrAfterTense_S, "mIş").add_(self.vCond_S, "sA").add_(self.vCopBeforeA3pl_S, "dIr").add_(
self.vWhile_S, "ken")
self.verbRoot_S.add_(self.vNeg_S, "mA", Conditions.previous_morpheme_is_not(self.able))
self.vNeg_S.add_empty(self.vImp_S).add_(self.vPast_S, "dI").add_(self.vFut_S, "yAcA~k").add_(self.vFut_S,
"yAcA!ğ").add_(
self.vNarr_S, "mIş").add_(self.vProgMakta_S, "mAktA").add_(self.vOpt_S, "yA").add_(self.vDesr_S, "sA").add_(
self.vNeces_S, "mAlI").add_(self.vInf1_S, "mAk").add_(self.vInf2_S, "mA").add_(self.vInf3_S, "yIş").add_(
self.vActOf_S, "mAcA").add_(self.vPastPart_S, "dI~k").add_(self.vPastPart_S, "dI!ğ").add_(self.vFutPart_S,
"yAcA~k").add_(
self.vFutPart_S, "yAcA!ğ").add_(self.vPresPart_S, "yAn").add_(self.vNarrPart_S, "mIş").add_(
self.vSinceDoingSo_S, "yAlI").add_(self.vByDoingSo_S, "yArAk").add_(self.vHastily_S, "yIver").add_(
self.vEverSince_S, "yAgör").add_(self.vAfterDoing_S, "yIp").add_(self.vWhen_S, "yIncA").add_(
self.vAsLongAs_S, "dIkçA").add_(self.vNotState_S, "mAzlI~k").add_(self.vNotState_S, "mAzlI!ğ").add_(
self.vFeelLike_S, "yAsI")
self.verbRoot_S.add_(self.vNegProg1_S, "m")
self.vNegProg1_S.add_(self.vProgYor_S, "Iyor")
self.vNeg_S.add_(self.vAorNeg_S, "z")
self.vNeg_S.add_empty(self.vAorNegEmpty_S)
self.vAorNeg_S.add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA2pl_ST, "sInIz").add_(
self.vA3pl_ST, "lAr").add_(self.vPastAfterTense_S, "dI").add_(self.vNarrAfterTense_S, "mIş").add_(
self.vCond_S, "sA").add_(self.vCopBeforeA3pl_S, "dIr").add_(self.vWhile_S, "ken")
self.vAorNegEmpty_S.add_(self.vA1sg_ST, "m").add_(self.vA1pl_ST, "yIz")
self.vNeg_S.add_(self.vAorPartNeg_S, "z")
self.vAorPartNeg_S.add_empty(self.adjAfterVerb_ST)
self.verbRoot_S.add_(self.vAble_S, "+yAbil", Conditions.last_derivation_is(self.vAble_S).not_())
self.vAble_S.add_empty(self.verbRoot_S)
self.vAbleNeg_S.add_empty(self.vAbleNegDerivRoot_S)
self.vAbleNegDerivRoot_S.add_(self.vNeg_S, "mA")
self.vAbleNegDerivRoot_S.add_(self.vNegProg1_S, "m")
self.vNeg_S.add_(self.vAble_S, "yAbil")
self.verbRoot_S.add_(self.vUnable_S, "+yAmA", Conditions.previous_morpheme_is_not(self.able))
self.vUnable_S.copy_outgoing_transitions_from(self.vNeg_S)
self.verbRoot_S.add_(self.vUnableProg1_S, "+yAm")
self.vUnableProg1_S.add_(self.vProgYor_S, "Iyor")
self.verbRoot_S.add_(self.vInf1_S, "mA~k")
self.vInf1_S.add_empty(self.nounInf1Root_S)
self.verbRoot_S.add_(self.vInf2_S, "mA")
self.vInf2_S.add_empty(self.noun_S)
self.verbRoot_S.add_(self.vInf3_S, "+yIş")
self.vInf3_S.add_empty(self.noun_S)
self.verbRoot_S.add_(self.vAgt_S, "+yIcI")
self.vAgt_S.add_empty(self.noun_S)
self.vAgt_S.add_empty(self.adjAfterVerb_ST)
self.verbRoot_S.add_(self.vActOf_S, "mAcA")
self.vActOf_S.add_empty(self.nounActOfRoot_S)
self.verbRoot_S.add_(self.vPastPart_S, ">dI~k")
self.verbRoot_S.add_(self.vPastPart_S, ">dI!ğ")
self.vPastPart_S.add_empty(self.noun_S)
self.vPastPart_S.add_empty(self.adjAfterVerb_S)
self.verbRoot_S.add_(self.vFutPart_S, "+yAcA~k")
self.verbRoot_S.add_(self.vFutPart_S, "+yAcA!ğ")
self.vFutPart_S.add_empty(self.noun_S, Conditions.HAS_TAIL)
self.vFutPart_S.add_empty(self.adjAfterVerb_S)
self.verbRoot_S.add_(self.vNarrPart_S, "mIş")
self.vNarrPart_S.add_empty(self.adjectiveRoot_ST)
self.verbRoot_S.add_(self.vAorPart_S, "Ir",
Conditions.has(r_attribute=RootAttribute.Aorist_I).or_(Conditions.HAS_SURFACE))
self.verbRoot_S.add_(self.vAorPart_S, "Ar",
Conditions.has(r_attribute=RootAttribute.Aorist_A).and_(Conditions.HAS_NO_SURFACE))
self.vAorPart_S.add_empty(self.adjAfterVerb_ST)
self.verbRoot_S.add_(self.vPresPart_S, "+yAn")
self.vPresPart_S.add_empty(self.noun_S, Conditions.HAS_TAIL)
self.vPresPart_S.add_empty(self.adjAfterVerb_ST)
self.verbRoot_S.add_(self.vFeelLike_S, "+yAsI")
self.vFeelLike_S.add_empty(self.noun_S, Conditions.HAS_TAIL)
self.vFeelLike_S.add_empty(self.adjAfterVerb_ST)
self.verbRoot_S.add_(self.vNotState_S, "mAzlI~k")
self.verbRoot_S.add_(self.vNotState_S, "mAzlI!ğ")
self.vNotState_S.add_empty(self.noun_S)
self.vRecip_S.add_empty(self.verbRoot_S)
self.vImplicitRecipRoot_S.add_empty(self.vRecip_S)
self.vImplicitReflexRoot_S.add_empty(self.vReflex_S)
self.vReflex_S.add_empty(self.verbRoot_S)
self.verbRoot_S.add_(self.vPass_S, "In", Conditions.has(r_attribute=RootAttribute.Passive_In).and_not(
Conditions.ContainsMorpheme((self.pass_,))))
self.verbRoot_S.add_(self.vPass_S, "InIl", Conditions.has(r_attribute=RootAttribute.Passive_In).and_not(
Conditions.ContainsMorpheme((self.pass_,))))
self.verbRoot_S.add_(self.vPass_S, "+nIl",
Conditions.PreviousStateIsAny((self.vCausT_S, self.vCausTir_S)).or_(
Conditions.not_have(r_attribute=RootAttribute.Passive_In).and_not(
Conditions.ContainsMorpheme((self.pass_,)))))
self.vPass_S.add_empty(self.verbRoot_S)
self.vCond_S.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "n").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"k").add_(
self.vA2pl_ST, "nIz").add_(self.vA3pl_ST, "lAr")
self.verbRoot_S.add_(self.vPast_S, ">dI")
self.vPast_S.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "n").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"k").add_(
self.vA2pl_ST, "nIz").add_(self.vA3pl_ST, "lAr")
self.vPast_S.add_(self.vCond_S, "ysA")
self.verbRoot_S.add_(self.vNarr_S, "mIş")
self.vNarr_S.add_(self.vA1sg_ST, "Im").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"Iz").add_(
self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr")
self.vNarr_S.add_(self.vCond_S, "sA")
self.vNarr_S.add_(self.vPastAfterTense_S, "tI")
self.vNarr_S.add_(self.vCopBeforeA3pl_S, "tIr")
self.vNarr_S.add_(self.vWhile_S, "ken")
self.vNarr_S.add_(self.vNarrAfterTense_S, "mIş")
self.vPastAfterTense_S.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "n").add_empty(self.vA3sg_ST).add_(
self.vA1pl_ST, "k").add_(self.vA2pl_ST, "nIz").add_(self.vA3pl_ST, "lAr")
self.vNarrAfterTense_S.add_(self.vA1sg_ST, "Im").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(
self.vA1pl_ST, "Iz").add_(self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr")
self.vNarrAfterTense_S.add_(self.vWhile_S, "ken")
self.vNarrAfterTense_S.add_(self.vCopBeforeA3pl_S, "tIr")
self.verbRoot_S.add_(self.vFut_S, "+yAcA~k")
self.verbRoot_S.add_(self.vFut_S, "+yAcA!ğ")
self.vFut_S.add_(self.vA1sg_ST, "Im").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"Iz").add_(
self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr")
self.vFut_S.add_(self.vCond_S, "sA")
self.vFut_S.add_(self.vPastAfterTense_S, "tI")
self.vFut_S.add_(self.vNarrAfterTense_S, "mIş")
self.vFut_S.add_(self.vCopBeforeA3pl_S, "tIr")
self.vFut_S.add_(self.vWhile_S, "ken")
diYiCondition = Conditions.RootSurfaceIsAny(("di", "yi"))
deYeCondition = Conditions.RootSurfaceIsAny(("de", "ye"))
cMultiVerb = Conditions.PreviousMorphemeIsAny(
(self.everSince, self.repeat, self.almost, self.hastily, self.stay, self.start)).not_()
self.vDeYeRoot_S.add_(self.vFut_S, "yece~k", diYiCondition).add_(self.vFut_S, "yece!ğ", diYiCondition).add_(
self.vProgYor_S, "yor", diYiCondition).add_(self.vAble_S, "yebil", diYiCondition).add_(self.vAbleNeg_S,
"ye",
diYiCondition).add_(
self.vInf3_S, "yiş", Conditions.RootSurfaceIsAny(("yi",))).add_(self.vFutPart_S, "yece~k",
diYiCondition).add_(self.vFutPart_S,
"yece!ğ",
diYiCondition).add_(
self.vPresPart_S, "yen", diYiCondition).add_(self.vEverSince_S, "yegel",
diYiCondition.and_(cMultiVerb)).add_(self.vRepeat_S, "yedur",
diYiCondition.and_(
cMultiVerb)).add_(
self.vRepeat_S, "yegör", diYiCondition.and_(cMultiVerb)).add_(self.vAlmost_S, "yeyaz",
diYiCondition.and_(cMultiVerb)).add_(
self.vStart_S, "yekoy", diYiCondition.and_(cMultiVerb)).add_(self.vSinceDoingSo_S, "yeli",
diYiCondition).add_(self.vByDoingSo_S, "yerek",
diYiCondition).add_(
self.vFeelLike_S, "yesi", diYiCondition).add_(self.vAfterDoing_S, "yip", diYiCondition).add_(
self.vWithoutBeingAbleToHaveDoneSo_S, "yemeden", diYiCondition).add_(self.vOpt_S, "ye", diYiCondition)
self.vDeYeRoot_S.add_(self.vCausTir_S, "dir", deYeCondition).add_(self.vPass_S, "n", deYeCondition).add_(
self.vPass_S, "nil", deYeCondition).add_(self.vPast_S, "di", deYeCondition).add_(self.vNarr_S, "miş",
deYeCondition).add_(
self.vAor_S, "r", deYeCondition).add_(self.vNeg_S, "me", deYeCondition).add_(self.vNegProg1_S, "m",
deYeCondition).add_(
self.vProgMakta_S, "mekte", deYeCondition).add_(self.vDesr_S, "se", deYeCondition).add_(self.vInf1_S,
"mek",
deYeCondition).add_(
self.vInf2_S, "me", deYeCondition).add_(self.vInf3_S, "yiş", Conditions.RootSurfaceIsAny(("de",))).add_(
self.vPastPart_S, "di~k", deYeCondition).add_(
self.vPastPart_S, "di!ğ", deYeCondition).add_(self.vNarrPart_S, "miş", deYeCondition).add_(
self.vHastily_S, "yiver", diYiCondition.and_(cMultiVerb)).add_(
self.vAsLongAs_S, "dikçe").add_(self.vWithoutHavingDoneSo_S, "meden").add_(self.vWithoutHavingDoneSo_S,
"meksizin").add_(
self.vNeces_S, "meli").add_(self.vNotState_S, "mezli~k").add_(self.vNotState_S,
"mezli!ğ").add_empty(
self.vImp_S, Conditions.RootSurfaceIs("de")).add_empty(self.vImpYemekYe_S,
Conditions.RootSurfaceIs("ye")).add_empty(
self.vImpYemekYi_S, Conditions.RootSurfaceIs("yi"))
self.vImpYemekYi_S.add_(self.vA2pl_ST, "yin").add_(self.vA2pl_ST, "yiniz")
self.vImpYemekYe_S.add_empty(self.vA2sg_ST).add_(self.vA2sg_ST, "sene").add_(self.vA3sg_ST, "sin").add_(
self.vA2pl_ST, "senize").add_(self.vA3pl_ST, "sinler")
self.verbRoot_S.add_(self.vOpt_S, "+yA")
self.vOpt_S.add_(self.vA1sg_ST, "yIm").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"lIm").add_(
self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr").add_(self.vPastAfterTense_S, "ydI").add_(
self.vNarrAfterTense_S, "ymIş")
self.verbRoot_S.add_(self.vDesr_S, "sA")
self.vDesr_S.add_(self.vA1sg_ST, "m").add_(self.vA2sg_ST, "n").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"k").add_(
self.vA2pl_ST, "nIz").add_(self.vA3pl_ST, "lAr").add_(self.vPastAfterTense_S, "ydI").add_(
self.vNarrAfterTense_S, "ymIş")
self.verbRoot_S.add_(self.vNeces_S, "mAlI")
self.vNeces_S.add_(self.vA1sg_ST, "yIm").add_(self.vA2sg_ST, "sIn").add_empty(self.vA3sg_ST).add_(self.vA1pl_ST,
"yIz").add_(
self.vA2pl_ST, "sInIz").add_(self.vA3pl_ST, "lAr").add_(self.vPastAfterTense_S, "ydI").add_(self.vCond_S,
"ysA").add_(
self.vNarrAfterTense_S, "ymIş").add_(self.vCopBeforeA3pl_S, "dIr").add_(self.vWhile_S, "yken")
previousNotPastNarrCond = (
Conditions.PreviousStateIsAny((self.vPastAfterTense_S, self.vNarrAfterTense_S, self.vCond_S))).not_()
self.vA3pl_ST.add_(self.vPastAfterTense_ST, "dI", previousNotPastNarrCond)
self.vA3pl_ST.add_(self.vNarrAfterTense_ST, "mIş", previousNotPastNarrCond)
self.vA3pl_ST.add_(self.vCond_ST, "sA", previousNotPastNarrCond)
a3plCopWhile = Conditions.PreviousMorphemeIsAny(
(self.prog1, self.prog2, self.neces, self.fut, self.narr, self.aor))
self.vA3pl_ST.add_(self.vCop_ST, "dIr", a3plCopWhile)
self.vA3pl_ST.add_(self.vWhile_S, "ken", a3plCopWhile)
a3sgCopWhile = Conditions.PreviousMorphemeIsAny(
(self.prog1, self.prog2, self.neces, self.fut, self.narr, self.aor))
self.vA1sg_ST.add_(self.vCop_ST, "dIr", a3sgCopWhile)
self.vA2sg_ST.add_(self.vCop_ST, "dIr", a3sgCopWhile)
self.vA3sg_ST.add_(self.vCop_ST, ">dIr", a3sgCopWhile)
self.vA1pl_ST.add_(self.vCop_ST, "dIr", a3sgCopWhile)
self.vA2pl_ST.add_(self.vCop_ST, "dIr", a3sgCopWhile)
self.vCopBeforeA3pl_S.add_(self.vA3pl_ST, "lAr")
previousPast = Conditions.PreviousMorphemeIs(self.past).and_not(
Conditions.ContainsMorpheme((self.cond, self.desr)))
self.vA2pl_ST.add_(self.vCondAfterPerson_ST, "sA", previousPast)
self.vA2sg_ST.add_(self.vCondAfterPerson_ST, "sA", previousPast)
self.vA1sg_ST.add_(self.vCondAfterPerson_ST, "sA", previousPast)
self.vA1pl_ST.add_(self.vCondAfterPerson_ST, "sA", previousPast)
self.verbRoot_S.add_(self.vEverSince_S, "+yAgel", cMultiVerb)
self.verbRoot_S.add_(self.vRepeat_S, "+yAdur", cMultiVerb)
self.verbRoot_S.add_(self.vRepeat_S, "+yAgör", cMultiVerb)
self.verbRoot_S.add_(self.vAlmost_S, "+yAyaz", cMultiVerb)
self.verbRoot_S.add_(self.vHastily_S, "+yIver", cMultiVerb)
self.verbRoot_S.add_(self.vStay_S, "+yAkal", cMultiVerb)
self.verbRoot_S.add_(self.vStart_S, "+yAkoy", cMultiVerb)
self.vEverSince_S.add_empty(self.verbRoot_S)
self.vRepeat_S.add_empty(self.verbRoot_S)
self.vAlmost_S.add_empty(self.verbRoot_S)
self.vHastily_S.add_empty(self.verbRoot_S)
self.vStay_S.add_empty(self.verbRoot_S)
self.vStart_S.add_empty(self.verbRoot_S)
self.vA3sg_ST.add_(self.vAsIf_S, ">cAsInA", Conditions.PreviousMorphemeIsAny((self.aor, self.narr)))
self.verbRoot_S.add_(self.vWhen_S, "+yIncA")
self.verbRoot_S.add_(self.vSinceDoingSo_S, "+yAlI")
self.verbRoot_S.add_(self.vByDoingSo_S, "+yArAk")
self.verbRoot_S.add_(self.vAdamantly_S, "+yAsIyA")
self.verbRoot_S.add_(self.vAfterDoing_S, "+yIp")
self.verbRoot_S.add_(self.vWithoutBeingAbleToHaveDoneSo_S, "+yAmAdAn")
self.verbRoot_S.add_(self.vAsLongAs_S, ">dIkçA")
self.verbRoot_S.add_(self.vWithoutHavingDoneSo_S, "mAdAn")
self.verbRoot_S.add_(self.vWithoutHavingDoneSo_S, "mAksIzIn")
self.vAsIf_S.add_empty(self.advRoot_ST)
self.vSinceDoingSo_S.add_empty(self.advRoot_ST)
self.vByDoingSo_S.add_empty(self.advRoot_ST)
self.vAdamantly_S.add_empty(self.advRoot_ST)
self.vAfterDoing_S.add_empty(self.advRoot_ST)
self.vWithoutBeingAbleToHaveDoneSo_S.add_empty(self.advRoot_ST)
self.vAsLongAs_S.add_empty(self.advRoot_ST)
self.vWithoutHavingDoneSo_S.add_empty(self.advRoot_ST)
self.vWhile_S.add_empty(self.advRoot_ST)
self.vWhen_S.add_empty(self.advNounRoot_ST)
def connect_question(self):
self.questionRoot_S.add_empty(self.qPresent_S)
self.questionRoot_S.add_(self.qPast_S, "ydI")
self.questionRoot_S.add_(self.qNarr_S, "ymIş")
self.qPresent_S.add_(self.qA1sg_ST, "yIm")
self.qPresent_S.add_(self.qA2sg_ST, "sIn")
self.qPresent_S.add_empty(self.qA3sg_ST)
self.qPast_S.add_(self.qA1sg_ST, "m")
self.qNarr_S.add_(self.qA1sg_ST, "Im")
self.qPast_S.add_(self.qA2sg_ST, "n")
self.qNarr_S.add_(self.qA2sg_ST, "sIn")
self.qPast_S.add_(self.qA1pl_ST, "k")
self.qNarr_S.add_(self.qA1pl_ST, "Iz")
self.qPresent_S.add_(self.qA1pl_ST, "+yIz")
self.qPast_S.add_(self.qA2pl_ST, "InIz")
self.qNarr_S.add_(self.qA2pl_ST, "sInIz")
self.qPresent_S.add_(self.qA2pl_ST, "sInIz")
self.qPast_S.add_(self.qA3pl_ST, "lAr")
self.qNarr_S.add_(self.qA3pl_ST, "lAr")
self.qPast_S.add_empty(self.qA3sg_ST)
self.qNarr_S.add_empty(self.qA3sg_ST)
reject_no_copula = Conditions.CurrentGroupContainsAny((self.qPast_S,)).not_()
self.qA1sg_ST.add_(self.qCop_ST, "dIr", reject_no_copula)
self.qA2sg_ST.add_(self.qCop_ST, "dIr", reject_no_copula)
self.qA3sg_ST.add_(self.qCop_ST, ">dIr", reject_no_copula)
self.qA1pl_ST.add_(self.qCop_ST, "dIr", reject_no_copula)
self.qA2pl_ST.add_(self.qCop_ST, "dIr", reject_no_copula)
self.qPresent_S.add_(self.pvCopBeforeA3pl_S, "dIr")
self.qCopBeforeA3pl_S.add_(self.qA3pl_ST, "lAr")
def connect_adverbs(self):
self.advNounRoot_ST.add_empty(self.avZero_S)
self.avZero_S.add_empty(self.avNounAfterAdvRoot_ST)
self.avNounAfterAdvRoot_ST.add_empty(self.avA3sg_S)
self.avA3sg_S.add_empty(self.avPnon_S)
self.avPnon_S.add_(self.avDat_ST, "+yA")
self.advForVerbDeriv_ST.add_empty(self.avZeroToVerb_S)
self.avZeroToVerb_S.add_empty(self.nVerb_S)
def connect_last_vowel_drop_words(self):
self.nounLastVowelDropRoot_S.add_empty(self.a3sgLastVowelDrop_S)
self.nounLastVowelDropRoot_S.add_(self.a3PlLastVowelDrop_S, "lAr")
self.a3sgLastVowelDrop_S.add_empty(self.pNonLastVowelDrop_S)
self.a3PlLastVowelDrop_S.add_empty(self.pNonLastVowelDrop_S)
self.pNonLastVowelDrop_S.add_(self.loc_ST, ">dA")
self.pNonLastVowelDrop_S.add_(self.abl_ST, ">dAn")
self.adjLastVowelDropRoot_S.add_empty(self.zeroLastVowelDrop_S)
self.postpLastVowelDropRoot_S.add_empty(self.zeroLastVowelDrop_S)
self.zeroLastVowelDrop_S.add_empty(self.nounLastVowelDropRoot_S)
def connect_postpositives(self):
self.postpRoot_ST.add_empty(self.postpZero_S)
self.postpZero_S.add_empty(self.nVerb_S)
gibi_gen: DictionaryItem = self.lexicon.get_item_by_id("gibi_Postp_PCGen")
gibi_nom: DictionaryItem = self.lexicon.get_item_by_id("gibi_Postp_PCNom")
sonra_abl: DictionaryItem = self.lexicon.get_item_by_id("sonra_Postp_PCAbl")
self.postpZero_S.add_empty(self.po2nRoot_S, Conditions.root_is_any((gibi_gen, gibi_nom, sonra_abl)))
self.po2nRoot_S.add_empty(self.po2nA3sg_S)
self.po2nRoot_S.add_(self.po2nA3pl_S, "lAr")
self.po2nA3sg_S.add_(self.po2nP3sg_S, "+sI")
self.po2nA3sg_S.add_(self.po2nP1sg_S, "m", Conditions.root_is_any((gibi_gen, gibi_nom)))
self.po2nA3sg_S.add_(self.po2nP2sg_S, "n", Conditions.root_is_any((gibi_gen, gibi_nom)))
self.po2nA3sg_S.add_(self.po2nP1pl_S, "miz", Conditions.root_is_any((gibi_gen, gibi_nom)))
self.po2nA3sg_S.add_(self.po2nP2pl_S, "niz", Conditions.root_is_any((gibi_gen, gibi_nom)))
self.po2nA3pl_S.add_(self.po2nP3sg_S, "+sI")
self.po2nA3pl_S.add_empty(self.po2nPnon_S)
self.po2nP3sg_S.add_empty(self.po2nNom_ST).add_(self.po2nDat_ST, "nA").add_(self.po2nLoc_ST, "ndA").add_(
self.po2nAbl_ST, "ndAn").add_(self.po2nIns_ST, "ylA").add_(self.po2nGen_ST, "nIn").add_(self.po2nAcc_ST,
"nI")
self.po2nPnon_S.add_empty(self.po2nNom_ST).add_(self.po2nDat_ST, "A").add_(self.po2nLoc_ST, "dA").add_(
self.po2nAbl_ST, "dAn").add_(self.po2nIns_ST, "lA").add_(self.po2nGen_ST, "In").add_(self.po2nEqu_ST,
"cA").add_(
self.po2nAcc_ST, "I")
self.po2nP1sg_S.add_(self.po2nDat_ST, "e")
self.po2nP2sg_S.add_(self.po2nDat_ST, "e")
self.po2nP1pl_S.add_(self.po2nDat_ST, "e")
self.po2nP2pl_S.add_(self.po2nDat_ST, "e")
def connect_imek(self):
self.imekRoot_S.add_(self.imekPast_S, "di")
self.imekRoot_S.add_(self.imekNarr_S, "miş")
self.imekRoot_S.add_(self.imekCond_S, "se")
self.imekPast_S.add_(self.imekA1sg_ST, "m")
self.imekPast_S.add_(self.imekA2sg_ST, "n")
self.imekPast_S.add_empty(self.imekA3sg_ST)
self.imekPast_S.add_(self.imekA1pl_ST, "k")
self.imekPast_S.add_(self.imekA2pl_ST, "niz")
self.imekPast_S.add_(self.imekA3pl_ST, "ler")
self.imekNarr_S.add_(self.imekA1sg_ST, "im")
self.imekNarr_S.add_(self.imekA2sg_ST, "sin")
self.imekNarr_S.add_empty(self.imekA3sg_ST)
self.imekNarr_S.add_(self.imekA1pl_ST, "iz")
self.imekNarr_S.add_(self.imekA2pl_ST, "siniz")
self.imekNarr_S.add_(self.imekA3pl_ST, "ler")
self.imekPast_S.add_(self.imekCond_S, "yse")
self.imekNarr_S.add_(self.imekCond_S, "se")
self.imekCond_S.add_(self.imekA1sg_ST, "m")
self.imekCond_S.add_(self.imekA2sg_ST, "n")
self.imekCond_S.add_empty(self.imekA3sg_ST)
self.imekCond_S.add_(self.imekA1pl_ST, "k")
self.imekCond_S.add_(self.imekA2pl_ST, "niz")
self.imekCond_S.add_(self.imekA3pl_ST, "ler")
reject_no_copula: Conditions.Condition = Conditions.CurrentGroupContainsAny((self.imekPast_S,)).not_()
self.imekA1sg_ST.add_(self.imekCop_ST, "dir", reject_no_copula)
self.imekA2sg_ST.add_(self.imekCop_ST, "dir", reject_no_copula)
self.imekA3sg_ST.add_(self.imekCop_ST, "tir", reject_no_copula)
self.imekA1pl_ST.add_(self.imekCop_ST, "dir", reject_no_copula)
self.imekA2pl_ST.add_(self.imekCop_ST, "dir", reject_no_copula)
self.imekA3pl_ST.add_(self.imekCop_ST, "dir", reject_no_copula)
def handle_post_processing_connections(self):
self.verbLastVowelDropModRoot_S.add_(self.vPass_S, template="Il")
self.verbLastVowelDropUnmodRoot_S.copy_outgoing_transitions_from(self.verbRoot_S)
self.verbLastVowelDropUnmodRoot_S.remove_transitions_to(self.pass_)
def get_root_state(self, item: DictionaryItem, phonetic_attributes: Set[PhoneticAttribute]) -> MorphemeState:
root: MorphemeState = self.item_root_state_map.get(item.id_)
if root is not None:
return root
elif PhoneticAttribute.LastLetterDropped in phonetic_attributes:
return self.verbRoot_VowelDrop_S
elif item.has_attribute(RootAttribute.Reciprocal):
return self.vImplicitRecipRoot_S
elif item.has_attribute(RootAttribute.Reflexive):
return self.vImplicitReflexRoot_S
else:
if item.primary_pos == PrimaryPos.Noun:
if item.secondary_pos == SecondaryPos.ProperNoun:
return self.nounProper_S
elif item.secondary_pos == SecondaryPos.Abbreviation:
return self.nounAbbrv_S
elif (item.secondary_pos == SecondaryPos.Email) or (item.secondary_pos == SecondaryPos.Url) or \
(item.secondary_pos == SecondaryPos.HashTag) or (item.secondary_pos == SecondaryPos.Mention):
return self.nounProper_S
elif item.secondary_pos == SecondaryPos.Emoticon or item.secondary_pos == SecondaryPos.RomanNumeral:
return self.nounNoSuffix_S
else:
if item.has_attribute(RootAttribute.CompoundP3sg):
return self.nounCompoundRoot_S
return self.noun_S
elif item.primary_pos == PrimaryPos.Adjective:
return self.adjectiveRoot_ST
elif item.primary_pos == PrimaryPos.Pronoun:
if item.secondary_pos == SecondaryPos.PersonalPron:
return self.pronPers_S
elif item.secondary_pos == SecondaryPos.DemonstrativePron:
return self.pronDemons_S
elif item.secondary_pos == SecondaryPos.QuantitivePron:
return self.pronQuant_S
elif item.secondary_pos == SecondaryPos.QuestionPron:
return self.pronQues_S
elif item.secondary_pos == SecondaryPos.ReflexivePron:
return self.pronReflex_S
else:
return self.pronQuant_S
elif item.primary_pos == PrimaryPos.Adverb:
return self.advRoot_ST
elif item.primary_pos == PrimaryPos.Conjunction:
return self.conjRoot_ST
elif item.primary_pos == PrimaryPos.Question:
return self.questionRoot_S
elif item.primary_pos == PrimaryPos.Interjection:
return self.interjRoot_ST
elif item.primary_pos == PrimaryPos.Verb:
return self.verbRoot_S
elif item.primary_pos == PrimaryPos.Punctuation:
return self.puncRoot_ST
elif item.primary_pos == PrimaryPos.Determiner:
return self.detRoot_ST
elif item.primary_pos == PrimaryPos.PostPositive:
return self.postpRoot_ST
elif item.primary_pos == PrimaryPos.Numeral:
return self.numeralRoot_ST
elif item.primary_pos == PrimaryPos.Duplicator:
return self.dupRoot_ST
else:
return self.noun_S
# This class was under morphology/analysis/ but it has a circular import dependency
# with TurkishMorphotactics class and due to the restrictions of Python on circular imports
# we needed to move it here
class StemTransitionsBase:
def __init__(self, morphotactics: TurkishMorphotactics):
self.alphabet = TurkishAlphabet.INSTANCE
self.morphotactics = morphotactics
self.modifiers = {RootAttribute.Doubling, RootAttribute.LastVowelDrop, RootAttribute.ProgressiveVowelDrop,
RootAttribute.InverseHarmony, RootAttribute.Voicing, RootAttribute.CompoundP3sg,
RootAttribute.CompoundP3sgRoot}
self.special_roots = {"içeri_Noun", "içeri_Adj", "dışarı_Adj", "şura_Noun", "bura_Noun", "ora_Noun",
"dışarı_Noun", "dışarı_Postp", "yukarı_Noun", "yukarı_Adj", "ileri_Noun", "ben_Pron_Pers",
"sen_Pron_Pers", "demek_Verb", "yemek_Verb", "imek_Verb", "birbiri_Pron_Quant",
"çoğu_Pron_Quant", "öbürü_Pron_Quant", "birçoğu_Pron_Quant"}
def generate(self, item: DictionaryItem) -> Tuple[StemTransition, ...]:
if item.id_ in self.special_roots:
return self.handle_special_roots(item)
elif self.has_modifier_attribute(item):
return self.generate_modified_root_nodes(item)
else:
phonetic_attributes: Set[PhoneticAttribute] = self.calculate_attributes(item.pronunciation)
transition = StemTransition(item.root, item, phonetic_attributes,
self.morphotactics.get_root_state(item, phonetic_attributes))
return (transition,)
def has_modifier_attribute(self, item: DictionaryItem) -> bool:
if self.modifiers & item.attributes:
return True
return False
@staticmethod
def calculate_attributes(input_: str) -> Set[PhoneticAttribute]:
return AttributesHelper.get_morphemic_attributes(input_)
def generate_modified_root_nodes(self, dic_item: DictionaryItem) -> Tuple[StemTransition, ...]:
modified_seq = dic_item.pronunciation
original_attrs = self.calculate_attributes(dic_item.pronunciation)
modified_attrs = deepcopy(original_attrs)
modified_root_state = None # MorphemeState
unmodified_root_state = None # MorphemeState
for attribute in dic_item.attributes:
if attribute == RootAttribute.Voicing:
last = self.alphabet.last_char(modified_seq)
voiced = self.alphabet.voice(last)
if last == voiced:
raise Exception("Voicing letter is not proper in: " + dic_item.id_)
if dic_item.lemma.endswith("nk"):
voiced = 'g'
modified_seq = modified_seq[: -1] + voiced
try:
modified_attrs.remove(PhoneticAttribute.LastLetterVoicelessStop)
except KeyError as ke:
logger.debug("Key error in modified_attrs in Voicing branch: " + str(ke))
original_attrs.add(PhoneticAttribute.ExpectsConsonant)
modified_attrs.add(PhoneticAttribute.ExpectsVowel)
modified_attrs.add(PhoneticAttribute.CannotTerminate)
elif attribute == RootAttribute.Doubling:
modified_seq = modified_seq + self.alphabet.last_char(modified_seq)
original_attrs.add(PhoneticAttribute.ExpectsConsonant)
modified_attrs.add(PhoneticAttribute.ExpectsVowel)
modified_attrs.add(PhoneticAttribute.CannotTerminate)
elif attribute == RootAttribute.LastVowelDrop:
last_letter = self.alphabet.get_last_letter(modified_seq)
if last_letter.is_vowel():
modified_seq = modified_seq[:-1]
modified_attrs.add(PhoneticAttribute.ExpectsConsonant)
modified_attrs.add(PhoneticAttribute.CannotTerminate)
else:
modified_seq = modified_seq[: -2] + modified_seq[-1:]
if dic_item.primary_pos != PrimaryPos.Verb:
original_attrs.add(PhoneticAttribute.ExpectsConsonant)
else:
unmodified_root_state = self.morphotactics.verbLastVowelDropUnmodRoot_S
modified_root_state = self.morphotactics.verbLastVowelDropModRoot_S
modified_attrs.add(PhoneticAttribute.ExpectsVowel)
modified_attrs.add(PhoneticAttribute.CannotTerminate)
elif attribute == RootAttribute.InverseHarmony:
original_attrs.add(PhoneticAttribute.LastVowelFrontal)
modified_attrs.add(PhoneticAttribute.LastVowelFrontal)
try:
original_attrs.remove(PhoneticAttribute.LastVowelBack)
except KeyError as ke:
logger.debug("Non existent key original_attrs: " + str(ke))
try:
modified_attrs.remove(PhoneticAttribute.LastVowelBack)
except KeyError as ke:
logger.debug("Non existent key modified_attrs: " + str(ke))
elif attribute == RootAttribute.ProgressiveVowelDrop:
if len(modified_seq) > 1:
modified_seq = modified_seq[:-1]
if self.alphabet.contains_vowel(modified_seq):
modified_attrs = self.calculate_attributes(modified_seq)
modified_attrs.add(PhoneticAttribute.LastLetterDropped)
if unmodified_root_state is None:
unmodified_root_state = self.morphotactics.get_root_state(dic_item, original_attrs)
original: StemTransition = StemTransition(dic_item.root, dic_item, original_attrs, unmodified_root_state)
if modified_root_state is None:
modified_root_state = self.morphotactics.get_root_state(dic_item, modified_attrs)
modified: StemTransition = StemTransition(modified_seq, dic_item, modified_attrs, modified_root_state)
if original == modified:
return (original,)
else:
return original, modified
def handle_special_roots(self, item: DictionaryItem) -> Tuple[StemTransition, ...]:
id_ = item.id_
original_attrs = self.calculate_attributes(item.pronunciation)
unmodified_root_state = self.morphotactics.get_root_state(item, original_attrs)
if id_ == "içeri_Noun" or id_ == "içeri_Adj" or id_ == "dışarı_Adj" or id_ == "dışarı_Noun" or \
id_ == "dışarı_Postp" or id_ == "yukarı_Noun" or id_ == "ileri_Noun" or id_ == "yukarı_Adj" or \
id_ == "şura_Noun" or id_ == "bura_Noun" or id_ == "ora_Noun":
original = StemTransition(item.root, item, original_attrs, unmodified_root_state)
if item.primary_pos == PrimaryPos.Noun:
root_for_modified = self.morphotactics.nounLastVowelDropRoot_S
elif item.primary_pos == PrimaryPos.Adjective:
root_for_modified = self.morphotactics.adjLastVowelDropRoot_S
elif item.primary_pos == PrimaryPos.PostPositive:
root_for_modified = self.morphotactics.adjLastVowelDropRoot_S
else:
raise Exception("No root morpheme state found for " + item.id_)
m = item.root[:-1]
modified = StemTransition(m, item, self.calculate_attributes(m), root_for_modified)
modified.phonetic_attributes.add(PhoneticAttribute.ExpectsConsonant)
modified.phonetic_attributes.add(PhoneticAttribute.CannotTerminate)
return original, modified
elif id_ == "ben_Pron_Pers" or id_ == "sen_Pron_Pers":
original = StemTransition(item.root, item, original_attrs, unmodified_root_state)
if item.lemma == "ben":
modified = StemTransition("ban", item, self.calculate_attributes("ban"),
self.morphotactics.pronPers_Mod_S)
else:
modified = StemTransition("san", item, self.calculate_attributes("san"),
self.morphotactics.pronPers_Mod_S)
original.phonetic_attributes.add(PhoneticAttribute.UnModifiedPronoun)
modified.phonetic_attributes.add(PhoneticAttribute.ModifiedPronoun)
return original, modified
elif id_ == "demek_Verb" or id_ == "yemek_Verb":
original = StemTransition(item.root, item, original_attrs, self.morphotactics.vDeYeRoot_S)
if item.lemma == "demek":
modified = StemTransition("di", item, self.calculate_attributes("di"),
self.morphotactics.vDeYeRoot_S)
else:
modified = StemTransition("yi", item, self.calculate_attributes("yi"),
self.morphotactics.vDeYeRoot_S)
return original, modified
elif id_ == "imek_Verb":
original = StemTransition(item.root, item, original_attrs, self.morphotactics.imekRoot_S)
return (original,)
elif id_ == "birbiri_Pron_Quant" or id_ == "çoğu_Pron_Quant" or id_ == "öbürü_Pron_Quant" or \
id_ == "birçoğu_Pron_Quant":
original = StemTransition(item.root, item, original_attrs, self.morphotactics.pronQuant_S)
if item.lemma == "birbiri":
modified = StemTransition("birbir", item, self.calculate_attributes("birbir"),
self.morphotactics.pronQuantModified_S)
elif item.lemma == "çoğu":
modified = StemTransition("çok", item, self.calculate_attributes("çok"),
self.morphotactics.pronQuantModified_S)
elif item.lemma == "öbürü":
modified = StemTransition("öbür", item, self.calculate_attributes("öbür"),
self.morphotactics.pronQuantModified_S)
else:
modified = StemTransition("birçok", item, self.calculate_attributes("birçok"),
self.morphotactics.pronQuantModified_S)
original.phonetic_attributes.add(PhoneticAttribute.UnModifiedPronoun)
modified.phonetic_attributes.add(PhoneticAttribute.ModifiedPronoun)
return original, modified
else:
raise Exception("Lexicon Item with special stem change cannot be handled:" + item.id_)
# This class was under morphology/analysis/ but it has a circular import dependency
# with TurkishMorphotactics class and due to the restrictions of Python on circular imports
# we needed to move it here
class StemTransitionsMapBased(StemTransitionsBase):
def __init__(self, lexicon: RootLexicon, morphotactics: TurkishMorphotactics):
super().__init__(morphotactics)
self.lexicon = lexicon
self.morphotactics = morphotactics
self.multi_stems: Dict[str, List[StemTransition]] = dict()
self.single_stems: Dict[str, StemTransition] = dict()
self.different_stem_items: Dict[DictionaryItem, List[StemTransition]] = dict()
self.ascii_keys = None # MultiMap <String, String>
self.lock = ReadWriteLock()
for item in lexicon:
self.add_dictionary_item(item)
def add_dictionary_item(self, item: DictionaryItem):
self.lock.acquire_write()
try:
transitions: Tuple[StemTransition] = self.generate(item)
for transition in transitions:
self.add_stem_transition(transition)
if len(transitions) > 1 or (len(transitions) == 1 and transitions[0].surface != item.root):
if item in self.different_stem_items.keys():
self.different_stem_items[item].extend(transitions)
else:
self.different_stem_items[item] = list(transitions)
except ValueError:
logger.debug('Cannot generate stem transition for %s: '.format(item.id_))
finally:
self.lock.release_write()
def remove_dictionary_item(self, item):
self.lock.acquire_write()
try:
transitions: Tuple[StemTransition] = self.generate(item)
for transition in transitions:
self.remove_stem_node(transition)
if item in self.different_stem_items.keys():
self.different_stem_items.pop(item)
except ValueError as e:
logger.warning("Cannot remove" + str(e))
finally:
self.lock.release_write()
def remove_stem_node(self, stem_transition: StemTransition):
surface_form = stem_transition.surface
if surface_form in self.multi_stems.keys():
self.multi_stems[surface_form].remove(stem_transition)
elif surface_form in self.single_stems.keys() and self.single_stems.get(
surface_form).item == stem_transition.item:
self.single_stems.pop(surface_form)
# BURADA HATA OLABILIR DIKKAT ET
# THERE WAS A NEGATION THAT NEGATES WHOLE IF STATEMENT BELOW, CHECK RESULTS
if not (stem_transition.item in self.different_stem_items.keys() and stem_transition in
self.different_stem_items[stem_transition.item]):
try:
self.different_stem_items[stem_transition.item].remove(stem_transition)
except KeyError:
logger.debug(f"Hata: {str(stem_transition.item)}")
def add_stem_transition(self, stem_transition: StemTransition):
surface_form = stem_transition.surface
if surface_form in self.multi_stems.keys():
self.multi_stems[surface_form].append(stem_transition)
elif surface_form in self.single_stems.keys():
self.multi_stems[surface_form] = [self.single_stems[surface_form], stem_transition]
self.single_stems.pop(surface_form)
else:
self.single_stems[surface_form] = stem_transition
def get_transitions(self, stem: str = None) -> Union[Set[StemTransition], Tuple[StemTransition, ...]]:
if not stem:
result = set(self.single_stems.values())
for value in self.multi_stems.values():
result |= set(value)
return result
else:
self.lock.acquire_read()
try:
if stem in self.single_stems.keys():
return (self.single_stems[stem],)
if stem in self.multi_stems.keys():
return tuple(self.multi_stems[stem])
finally:
self.lock.release_read()
return ()
def get_transitions_for_item(self, item: DictionaryItem) -> Tuple[StemTransition]:
self.lock.acquire_read()
try:
if item in self.different_stem_items.keys():
return tuple(self.different_stem_items[item])
else:
transitions: Tuple[StemTransition] = self.get_transitions(item.root)
return tuple(s for s in transitions if s.item == item)
finally:
self.lock.release_read()
def get_transitions_ascii_tolerant(self, stem: str) -> Set[StemTransition]:
self.lock.acquire_read()
try:
result: Set[StemTransition] = set()
if stem in self.single_stems.keys():
result.add(self.single_stems[stem])
elif stem in self.multi_stems.keys():
result |= set(self.multi_stems[stem])
ascii_stems: Set[str] = set(self.ascii_keys.get(TurkishAlphabet.INSTANCE.to_ascii(stem), []))
for st in ascii_stems:
if st in self.single_stems.keys():
result.add(self.single_stems[st])
elif st in self.multi_stems.keys():
result |= set(self.multi_stems[st])
return result
finally:
self.lock.release_read()
def get_prefix_matches(self, inp: str, ascii_tolerant: bool) -> Tuple[StemTransition]:
if self.ascii_keys is None and ascii_tolerant:
self.generate_ascii_tolerant_map()
self.lock.acquire_read()
try:
matches: List[StemTransition] = []
for i in range(1, len(inp) + 1):
stem = inp[0:i]
if ascii_tolerant:
matches.extend(self.get_transitions_ascii_tolerant(stem))
else:
matches.extend(self.get_transitions(stem=stem))
return tuple(matches)
finally:
self.lock.release_read()
def generate_ascii_tolerant_map(self):
self.lock.acquire_write()
self.ascii_keys: Dict[str, List[str]] = {}
try:
for s in self.single_stems.keys():
ascii_ = TurkishAlphabet.INSTANCE.to_ascii(s)
if TurkishAlphabet.INSTANCE.contains_ascii_related(s):
if ascii_ not in self.ascii_keys.keys():
self.ascii_keys[ascii_] = [s]
else:
self.ascii_keys[ascii_].append(s)
for sts in self.multi_stems.values():
for st in sts:
s = st.surface
ascii_ = TurkishAlphabet.INSTANCE.to_ascii(s)
if TurkishAlphabet.INSTANCE.contains_ascii_related(s):
if ascii_ in self.ascii_keys.keys():
self.ascii_keys[ascii_].append(s)
else:
self.ascii_keys[ascii_] = [s]
finally:
self.lock.release_write() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/turkish_morphotactics.py | turkish_morphotactics.py |
from __future__ import annotations
from typing import List, Tuple, TYPE_CHECKING
from abc import ABC
if TYPE_CHECKING:
from zemberek.core.turkish import SecondaryPos, RootAttribute, PhoneticAttribute
from zemberek.morphology.lexicon import DictionaryItem
from zemberek.morphology.analysis.search_path import SearchPath
from zemberek.morphology.morphotactics.morpheme import Morpheme
from zemberek.morphology.morphotactics.morpheme_state import MorphemeState
from zemberek.morphology.morphotactics.operator import Operator
class Conditions(ABC):
HAS_TAIL = None
HAS_SURFACE = None
HAS_NO_SURFACE = None
CURRENT_GROUP_EMPTY = None
@staticmethod
def not_(condition):
return Conditions.NotCondition(condition)
@staticmethod
def not_have(p_attribute: PhoneticAttribute = None, r_attribute: RootAttribute = None) -> 'Conditions.Condition':
if r_attribute:
return Conditions.HasRootAttribute(r_attribute).not_()
return Conditions.HasPhoneticAttribute(p_attribute).not_()
@staticmethod
def condition(operator: Operator, left: 'Conditions.Condition', right: 'Conditions.Condition') -> \
'Conditions.Condition':
return Conditions.CombinedCondition.of(operator, left, right)
@staticmethod
def and_(left: 'Conditions.Condition', right: 'Conditions.Condition'):
return Conditions.condition(Operator.AND, left, right)
@staticmethod
def root_is(item: DictionaryItem) -> 'Conditions.Condition':
return Conditions.DictionaryItemIs(item)
@staticmethod
def root_is_not(item: DictionaryItem) -> 'Conditions.Condition':
return Conditions.DictionaryItemIs(item).not_()
@staticmethod
def root_is_any(items: Tuple[DictionaryItem, ...]) -> 'Conditions.Condition':
return Conditions.DictionaryItemIsAny(items)
@staticmethod
def root_is_none(items: Tuple[DictionaryItem, ...]) -> 'Conditions.Condition':
return Conditions.DictionaryItemIsNone(items)
@staticmethod
def has(r_attribute: RootAttribute = None, p_attribute: PhoneticAttribute = None) -> 'Conditions.Condition':
if p_attribute:
return Conditions.HasPhoneticAttribute(p_attribute)
return Conditions.HasRootAttribute(r_attribute)
@staticmethod
def or_(left: 'Conditions.Condition', right: 'Conditions.Condition') -> 'Conditions.Condition':
return Conditions.condition(Operator.OR, left, right)
@staticmethod
def prvious_morpheme_is(morpheme: Morpheme) -> 'Conditions.Condition':
return Conditions.PreviousMorphemeIs(morpheme)
@staticmethod
def previous_morpheme_is_not(morpheme: Morpheme) -> 'Conditions.Condition':
return Conditions.PreviousMorphemeIs(morpheme).not_()
@staticmethod
def previous_state_is(state) -> 'Conditions.Condition': # state: MorphemeState
return Conditions.PreviousStateIs(state)
@staticmethod
def previous_state_is_not(state) -> 'Conditions.Condition': # state: MorphemeState
return Conditions.PreviousStateIsNot(state)
@staticmethod
def last_derivation_is(state) -> 'Conditions.Condition': # state: MorphemeState
return Conditions.LastDerivationIs(state)
class Condition(ABC):
def not_(self):
raise NotImplementedError
def and_(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
raise NotImplementedError
def or_(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
raise NotImplementedError
def and_not(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
raise NotImplementedError
def accept_(self, path: SearchPath) -> bool:
raise NotImplementedError
class AbstractCondition(Condition):
def not_(self) -> 'Conditions.Condition':
return Conditions.not_(self)
def and_(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
return Conditions.and_(self, other)
def or_(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
return Conditions.or_(self, other)
def and_not(self, other: 'Conditions.Condition') -> 'Conditions.Condition':
return self.and_(other.not_())
def accept_(self, path: SearchPath) -> bool:
raise NotImplementedError
class HasPhoneticAttribute(AbstractCondition):
def __init__(self, attribute: PhoneticAttribute):
super().__init__()
self.attribute = attribute
def accept_(self, visitor: SearchPath) -> bool:
return self.attribute in visitor.phonetic_attributes
def __str__(self):
return "HasPhoneticAttribute{" + self.attribute.name + '}'
class NotCondition(AbstractCondition):
def __init__(self, condition: 'Conditions.Condition'):
super().__init__()
self.condition = condition
def accept_(self, visitor: SearchPath) -> bool:
return not self.condition.accept_(visitor)
def __str__(self):
return "Not(" + str(self.condition) + ")"
class CombinedCondition(AbstractCondition):
def __init__(self, operator: Operator, left: 'Conditions.Condition', right: 'Conditions.Condition'):
super().__init__()
self.operator = operator
self.conditions = []
self.add_(operator, left)
self.add_(operator, right)
@classmethod
def convert_to_combined(cls, obj) -> 'Conditions.CombinedCondition':
obj.__class__ = Conditions.CombinedCondition
return obj
def add_(self, op: Operator, condition: 'Conditions.Condition') -> 'Conditions.CombinedCondition':
if isinstance(condition, Conditions.CombinedCondition):
combined_condition = Conditions.CombinedCondition.convert_to_combined(condition)
if combined_condition.operator == op:
self.conditions.extend(combined_condition.conditions)
else:
self.conditions.append(condition)
else:
if condition is None:
raise ValueError("The argument 'conditions' must not contain none")
self.conditions.append(condition)
return self
@staticmethod
def of(operator: Operator, left: 'Conditions.Condition', right: 'Conditions.Condition') -> \
'Conditions.Condition':
return Conditions.CombinedCondition(operator, left, right)
def count(self) -> int:
if len(self.conditions) == 0:
return 0
elif len(self.conditions) == 1:
first = self.conditions[0]
return Conditions.CombinedCondition.convert_to_combined(first).count() if \
isinstance(first, Conditions.CombinedCondition) else 1
else:
cnt = 0
for condition in self.conditions:
if isinstance(condition, Conditions.CombinedCondition):
cnt += Conditions.CombinedCondition.convert_to_combined(condition).count()
else:
cnt += 1
return cnt
def accept_(self, path: SearchPath) -> bool:
if len(self.conditions) == 0:
return True
elif len(self.conditions) == 1:
return self.conditions[0].accept_(path)
else:
if self.operator == Operator.AND:
for condition in self.conditions:
if not condition.accept_(path):
return False
return True
else:
for condition in self.conditions:
if condition.accept_(path):
return True
return False
def __str__(self):
if len(self.conditions) == 0:
return "[No-Condition]"
elif len(self.conditions) == 1:
return str(self.conditions[0])
else:
string = ""
i = 0
if self.operator == Operator.AND:
for condition in self.conditions:
string += str(condition)
if i < len(self.conditions) - 1:
string += " AND "
i += 1
return string
class CurrentGroupContainsAny(AbstractCondition):
def __init__(self, states: Tuple[MorphemeState, ...]): # states: Tuple[MorphemeState]
self.states = set(states)
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
for sf in reversed(suffixes[1:]):
if sf.get_state() in self.states:
return True
if sf.get_state().derivative:
return False
return False
def __str__(self):
return "CurrentGroupContainsAny{" + str(self.states) + "}"
class DictionaryItemIsAny(AbstractCondition):
def __init__(self, items: Tuple[DictionaryItem, ...]):
self.items = set(items)
def accept_(self, visitor: SearchPath) -> bool:
return visitor.get_dictionary_item() in self.items
def __str__(self):
return "DictionaryItemIsAny{" + str(self.items) + "}"
class HasRootAttribute(AbstractCondition):
def __init__(self, attribute: RootAttribute):
self.attribute = attribute
def accept_(self, visitor: SearchPath) -> bool:
return visitor.get_dictionary_item().has_attribute(self.attribute)
def __str__(self):
return "HasRootAttribute{" + self.attribute.name + "}"
class LastDerivationIs(AbstractCondition):
def __init__(self, state): # state: MorphemeState
self.state = state
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
for sf in reversed(suffixes[1:]):
if sf.get_state().derivative:
return sf.get_state() == self.state
return False
def __str__(self):
return "LastDerivationIs{" + str(self.state) + "}"
class LastDerivationIsAny(AbstractCondition):
def __init__(self, states: Tuple[MorphemeState, ...]): # states: Tuple[MorphemeState]
self.states = set(states)
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
for sf in reversed(suffixes[1:]):
if sf.get_state().derivative:
return sf.get_state() in self.states
return False
def __str__(self):
return "LastDerivationIsAny{" + str(self.states) + "}"
class HasAnySuffixSurface(AbstractCondition):
def __init__(self):
pass
def accept_(self, visitor: SearchPath) -> bool:
return visitor.contains_suffix_with_surface_()
def __str__(self):
return "HasAnySuffixSurface{}"
class PreviousMorphemeIs(AbstractCondition):
def __init__(self, morpheme: Morpheme):
self.morpheme = morpheme
def accept_(self, visitor: SearchPath) -> bool:
previous_state = visitor.get_previous_state()
return previous_state is not None and previous_state.morpheme == self.morpheme
def __str__(self):
return "PreviousMorphemeIs{" + str(self.morpheme) + "}"
class HasTail(AbstractCondition):
def __init__(self):
pass
def accept_(self, visitor: SearchPath) -> bool:
return len(visitor.tail) != 0
def __str__(self):
return "HasTail{}"
class ContainsMorpheme(AbstractCondition):
def __init__(self, morphemes: Tuple[Morpheme, ...]):
self.morphemes = set(morphemes)
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
for suffix in suffixes:
if suffix.get_state().morpheme in self.morphemes:
return True
return False
def __str__(self):
return "ContainsMorpheme{" + str(self.morphemes) + "}"
class PreviousStateIs(AbstractCondition):
def __init__(self, state): # state: MorphemeState
self.state = state
def accept_(self, visitor: SearchPath) -> bool:
previous_state = visitor.get_previous_state()
return previous_state is not None and previous_state == self.state
def __str__(self):
return "PreviousStateIs{" + str(self.state) + "}"
class PreviousStateIsNot(AbstractCondition):
def __init__(self, state): # state: MorphemeState
self.state = state
def accept_(self, visitor: SearchPath) -> bool:
previous_state = visitor.get_previous_state()
return previous_state is None or not previous_state == self.state
def __str__(self):
return "PreviousStateIsNot{" + str(self.state) + "}"
class PreviousStateIsAny(AbstractCondition):
def __init__(self, states: Tuple[MorphemeState, ...]): # state: MorphemeState
self.states = set(states)
def accept_(self, visitor: SearchPath) -> bool:
previous_state = visitor.get_previous_state()
return previous_state is not None and previous_state in self.states
def __str__(self):
return "PreviousStateIsAny{}"
class RootSurfaceIs(AbstractCondition):
def __init__(self, surface: str):
self.surface = surface
def accept_(self, visitor: SearchPath) -> bool:
return visitor.get_stem_transition().surface == self.surface
def __str__(self):
return "RootSurfaceIs§{" + self.surface + "}"
class RootSurfaceIsAny(AbstractCondition):
def __init__(self, surfaces: Tuple[str, ...]):
self.surfaces = surfaces
def accept_(self, visitor: SearchPath) -> bool:
for s in self.surfaces:
if visitor.get_stem_transition().surface == s:
return True
return False
def __str__(self):
return "RootSurfaceIsAny{" + str(self.surfaces) + "}"
class PreviousMorphemeIsAny(AbstractCondition):
def __init__(self, morphemes: Tuple[Morpheme, ...]):
self.morphemes = morphemes
def accept_(self, visitor: SearchPath) -> bool:
previous_state = visitor.get_previous_state()
return previous_state is not None and previous_state.morpheme in self.morphemes
def __str__(self):
return "PreviousMorphemeIsAny{" + str(self.morphemes) + "}"
class PreviousGroupContains(AbstractCondition):
def __init__(self, states: Tuple[MorphemeState, ...]): # state: MorphemeState
self.states = states
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
last_index = len(suffixes) - 1
sf = suffixes[last_index]
while not sf.get_state().derivative:
if last_index == 0:
return False
last_index -= 1
sf = suffixes[last_index]
for i in range(last_index - 1, 0, -1):
sf = suffixes[i]
if sf.get_state() in self.states:
return True
if sf.get_state().derivative:
return False
return False
def __str__(self):
return "PreviousGroupContains{" + str(self.states) + "}"
class DictionaryItemIs(AbstractCondition):
def __init__(self, item: DictionaryItem):
self.item = item
def accept_(self, visitor: SearchPath) -> bool:
return self.item is not None and visitor.has_dictionary_item(self.item)
def __str__(self):
return "DictionaryItemIs{" + str(self.item) + "}"
class DictionaryItemIsNone(AbstractCondition):
def __init__(self, items: Tuple[DictionaryItem, ...]):
self.items = items
def accept_(self, visitor: SearchPath) -> bool:
return visitor.get_dictionary_item() not in self.items
def __str__(self):
return "DictionaryItemIsNone{" + str(self.items) + "}"
class HasTailSequence(AbstractCondition):
def __init__(self, morphemes: Tuple[Morpheme, ...]):
self.morphemes = morphemes
def accept_(self, visitor: SearchPath) -> bool:
forms = visitor.transitions
if len(forms) < len(self.morphemes):
return False
else:
i = 0
j = len(forms) - len(self.morphemes)
if i >= len(self.morphemes):
return True
while self.morphemes[i] == forms[j].get_morpheme():
i += 1
j += 1
if i >= len(self.morphemes):
return True
return False
def __str__(self):
return "HasTailSequence{" + str(self.morphemes) + "}"
class ContainsMorphemeSequence(AbstractCondition):
def __init__(self, morphemes: Tuple[Morpheme, ...]):
self.morphemes = morphemes
def accept_(self, visitor: SearchPath) -> bool:
forms = visitor.transitions
if len(forms) < len(self.morphemes):
return False
else:
m = 0
for form in forms:
if form.get_morpheme() == self.morphemes[m]:
m += 1
if m == len(self.morphemes):
return True
else:
m = 0
return False
def __str__(self):
return "ContainsMorphemeSequence{" + str(self.morphemes) + "}"
class PreviousGroupContainsMorpheme(AbstractCondition):
def __init__(self, morphemes: Tuple[Morpheme, ...]):
self.morphemes = morphemes
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
last_index = len(suffixes) - 1
sf = suffixes[last_index]
while not sf.get_state().derivative:
if last_index == 0:
return False
last_index -= 1
sf = suffixes[last_index]
for i in range(last_index - 1, 0, -1):
sf = suffixes[i]
if sf.get_state().morpheme in self.morphemes:
return True
if sf.get_state().derivative:
return False
return False
def __str__(self):
return "PreviousGroupContainsMorpheme" + str(self.morphemes) + "}"
class NoSurfaceAfterDerivation(AbstractCondition):
def __init__(self):
pass
def accept_(self, visitor: SearchPath) -> bool:
suffixes = visitor.transitions
for sf in reversed(suffixes[1:]):
if sf.get_state().derivative:
return True
if len(sf.surface) != 0:
return False
return True
def __str__(self):
return "NoSurfaceAfterDerivation{}"
class SecondaryPosIs(AbstractCondition):
def __init__(self, pos: SecondaryPos):
self.pos = pos
def accept_(self, visitor: SearchPath) -> bool:
return visitor.get_dictionary_item().secondary_pos == self.pos
def __str__(self):
return "SecondaryPosIs{" + self.pos.name + "}"
Conditions.HAS_TAIL = Conditions.HasTail()
Conditions.HAS_SURFACE = Conditions.HasAnySuffixSurface()
Conditions.HAS_NO_SURFACE = Conditions.HasAnySuffixSurface().not_()
Conditions.CURRENT_GROUP_EMPTY = Conditions.NoSurfaceAfterDerivation() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/morphotactics/conditions.py | conditions.py |
from __future__ import annotations
from typing import List, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from zemberek.morphology.morphotactics import TurkishMorphotactics
from zemberek.morphology.morphotactics.stem_transition import StemTransition
from zemberek.morphology.morphotactics.suffix_transition import SuffixTransition
from zemberek.morphology.morphotactics.morpheme import Morpheme
from zemberek.morphology.lexicon import DictionaryItem
from zemberek.core.turkish import PhoneticAttribute
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
from zemberek.morphology.analysis.search_path import SearchPath
from zemberek.morphology.analysis.surface_transitions import SurfaceTransition
from zemberek.morphology.analysis.attributes_helper import AttributesHelper
class WordGenerator:
def __init__(self, morphotactics: TurkishMorphotactics):
self.morphotactics = morphotactics
self.stem_transitions = morphotactics.stem_transitions
def generate(self, item: DictionaryItem = None, morphemes: Tuple[Morpheme, ...] = None,
candidates: Tuple[StemTransition, ...] = None) -> Tuple['WordGenerator.Result', ...]:
if item:
candidates_st: Tuple[StemTransition, ...] = self.stem_transitions.get_transitions_for_item(item)
return self.generate(candidates=candidates_st, morphemes=morphemes)
# no item means generate(List<StemTransition> candidates, List<Morpheme> morphemes) is called
paths: List['WordGenerator.GenerationPath'] = []
for candidate in candidates:
search_path: SearchPath = SearchPath.initial_path(candidate, " ")
# morphemes_in_path: Tuple[Morpheme]
if len(morphemes) > 0:
if morphemes[0] == search_path.current_state.morpheme:
morphemes_in_path = morphemes[1:]
else:
morphemes_in_path = morphemes
else:
morphemes_in_path = ()
paths.append(WordGenerator.GenerationPath(search_path, morphemes_in_path))
# search graph
result_paths: Tuple['WordGenerator.GenerationPath'] = self.search(paths)
result: List['WordGenerator.Result'] = []
for path in result_paths:
analysis = SingleAnalysis.from_search_path(path.path)
result.append(WordGenerator.Result(analysis.surface_form(), analysis))
return tuple(result)
def search(self, current_paths: List['WordGenerator.GenerationPath']) -> Tuple['WordGenerator.GenerationPath', ...]:
result: List['WordGenerator.GenerationPath'] = []
while len(current_paths) > 0:
all_new_paths: List['WordGenerator.GenerationPath'] = []
for path in current_paths:
if len(path.morphemes) == 0:
if path.path.terminal and PhoneticAttribute.CannotTerminate not in path.path.phonetic_attributes:
result.append(path)
continue
new_paths: List['WordGenerator.GenerationPath'] = self.advance(path)
all_new_paths.extend(new_paths)
current_paths = all_new_paths
return tuple(result)
@staticmethod
def advance(g_path: 'WordGenerator.GenerationPath') -> List['WordGenerator.GenerationPath']:
new_paths: List['WordGenerator.GenerationPath'] = []
for transition in g_path.path.current_state.outgoing:
suffix_transition = transition
if len(g_path.morphemes) == 0 and suffix_transition.has_surface_form():
continue
if not g_path.matches(suffix_transition):
continue
if not suffix_transition.can_pass(g_path.path):
continue
if not suffix_transition.has_surface_form():
p_copy: SearchPath = g_path.path.get_copy_for_generation(SurfaceTransition("", suffix_transition),
g_path.path.phonetic_attributes)
new_paths.append(g_path.copy_(p_copy))
continue
surface = SurfaceTransition.generate_surface(suffix_transition, g_path.path.phonetic_attributes)
surface_transition = SurfaceTransition(surface, suffix_transition)
attributes = AttributesHelper.get_morphemic_attributes(surface, g_path.path.phonetic_attributes)
attributes.discard(PhoneticAttribute.CannotTerminate)
last_token: SurfaceTransition.SuffixTemplateToken = suffix_transition.get_last_template_token()
if last_token.type_ == SurfaceTransition.TemplateTokenType.LAST_VOICED:
attributes.add(PhoneticAttribute.ExpectsConsonant)
elif last_token.type_ == SurfaceTransition.TemplateTokenType.LAST_NOT_VOICED:
attributes.add(PhoneticAttribute.ExpectsVowel)
attributes.add(PhoneticAttribute.CannotTerminate)
p: SearchPath = g_path.path.get_copy_for_generation(surface_transition, attributes)
new_paths.append(g_path.copy_(p))
return new_paths
class Result:
def __init__(self, surface: str, analysis: SingleAnalysis):
self.surface = surface
self.analysis = analysis
def __str__(self):
return self.surface + "-" + str(self.analysis)
class GenerationPath:
def __init__(self, path: SearchPath, morphemes: Tuple[Morpheme]):
self.path = path
self.morphemes = morphemes
def copy_(self, path: SearchPath) -> 'WordGenerator.GenerationPath':
last_transition: SurfaceTransition = path.get_last_transition()
m: Morpheme = last_transition.get_morpheme()
if len(last_transition.surface) == 0:
if len(self.morphemes) == 0:
return WordGenerator.GenerationPath(path, self.morphemes)
if m == self.morphemes[0]:
return WordGenerator.GenerationPath(path, self.morphemes[1:])
else:
return WordGenerator.GenerationPath(path, self.morphemes)
if m != self.morphemes[0]:
raise Exception("Cannot generate Generation copy because transition morpheme and first morpheme to "
"consume does not match.")
return WordGenerator.GenerationPath(path, self.morphemes[1:])
def matches(self, transition: SuffixTransition):
if not transition.has_surface_form():
return True
if len(self.morphemes) > 0 and transition.to.morpheme == self.morphemes[0]:
return True
return False | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/generator/word_generator.py | word_generator.py |
from __future__ import annotations
from typing import List, TYPE_CHECKING, DefaultDict, Any, Optional, Tuple, Dict
from operator import attrgetter
from collections import defaultdict, OrderedDict
import numpy as np
if TYPE_CHECKING:
from zemberek.core.data.weight_lookup import WeightLookup
from zemberek.core.data.compressed_weights import CompressedWeights
from zemberek.core.turkish.secondary_pos import SecondaryPos
from zemberek.morphology.ambiguity.ambiguity_resolver import AmbiguityResolver
from zemberek.morphology.analysis.sentence_analysis import SentenceAnalysis
from zemberek.morphology.analysis.word_analysis import WordAnalysis
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
from zemberek.morphology.analysis.sentence_word_analysis import SentenceWordAnalysis
class PerceptronAmbiguityResolver(AmbiguityResolver):
sentence_begin: SingleAnalysis = SingleAnalysis.unknown("<s>")
sentence_end: SingleAnalysis = SingleAnalysis.unknown("</s>")
def __init__(self, averaged_model: WeightLookup, extractor: 'PerceptronAmbiguityResolver.FeatureExtractor'):
self.decoder = PerceptronAmbiguityResolver.Decoder(averaged_model, extractor)
@classmethod
def from_resource(cls, resource_path: str) -> 'PerceptronAmbiguityResolver':
lookup = CompressedWeights.deserialize(resource_path)
extractor = cls.FeatureExtractor(use_cache=False)
return cls(lookup, extractor)
def disambiguate(self, sentence: str, all_analyses: List[WordAnalysis]) -> SentenceAnalysis:
best: PerceptronAmbiguityResolver.DecodeResult = self.decoder.best_path(all_analyses)
l: List[SentenceWordAnalysis] = [
SentenceWordAnalysis(best.best_parse[i], word_analysis) for i, word_analysis in enumerate(all_analyses)
]
return SentenceAnalysis(sentence, l)
class WordData:
def __init__(self, lemma: str, igs: List[str]):
self.lemma = lemma
self.igs = igs
@classmethod
def from_analysis(cls, sa: SingleAnalysis) -> 'PerceptronAmbiguityResolver.WordData':
lemma = sa.item.lemma
sec_pos: SecondaryPos = sa.item.secondary_pos
sp: str = '' if sec_pos == SecondaryPos.None_ else sec_pos.name
igs: List[str] = []
for i in range(sa.group_boundaries.shape[0]):
s: str = sa.get_group(0).lexical_form()
if i == 0:
s = sp + s
igs.append(s)
return cls(lemma, igs)
def last_group(self) -> str:
return self.igs[-1]
class FeatureExtractor:
feature_cache: Dict[Tuple[SingleAnalysis, ...], DefaultDict[Any, np.int32]] = dict()
def __init__(self, use_cache: bool):
self.use_cache = use_cache
def extract_from_trigram(self, trigram: List[SingleAnalysis]) -> DefaultDict[Any, np.int32]:
if self.use_cache:
# raise ValueError(f"feature cache for FeatureExtractor has not been implemented yet!")
cached = self.feature_cache.get(tuple(trigram))
if cached is not None:
return cached
feats = defaultdict(np.int32)
w1: 'PerceptronAmbiguityResolver.WordData' = PerceptronAmbiguityResolver.WordData.from_analysis(
trigram[0]
)
w2: 'PerceptronAmbiguityResolver.WordData' = PerceptronAmbiguityResolver.WordData.from_analysis(
trigram[1]
)
w3: 'PerceptronAmbiguityResolver.WordData' = PerceptronAmbiguityResolver.WordData.from_analysis(
trigram[2]
)
r1: str = w1.lemma
r2: str = w2.lemma
r3: str = w3.lemma
# ig1: str = '+'.join(w1.igs)
ig2: str = '+'.join(w2.igs)
ig3: str = '+'.join(w3.igs)
# r1Ig1 = f"{r1}+{ig1}"
r2Ig2 = f"{r2}+{ig2}"
r3Ig3 = f"{r3}+{ig3}"
feats["2:" + r1 + ig2 + r3Ig3] += 1
feats["3:" + r2Ig2 + "-" + r3Ig3] += 1
feats["4:" + r3Ig3] += 1
feats["9:" + r2 + "-" + r3] += 1
feats["10:" + r3] += 1
feats["10b:" + r2] += 1
feats["10c:" + r1] += 1
w1_last_group: str = w1.last_group()
w2_last_group: str = w2.last_group()
for ig in w3.igs:
feats["15:" + w1_last_group + "-" + w2_last_group + "-" + ig] += 1
feats["17:" + w2_last_group + ig] += 1
for k, ig in enumerate(w3.igs):
feats["20:" + str(k) + "-" + ig] += 1
feats[f"22:{trigram[2].group_boundaries.shape[0]}"] += 1
# do this outside
# for k in feats.keys():
# feats[k] = np.int32(feats[k])
if self.use_cache:
self.feature_cache[tuple(trigram)] = feats
return feats
class Decoder:
def __init__(self, model: WeightLookup, extractor: 'PerceptronAmbiguityResolver.FeatureExtractor'):
self.model = model
self.extractor = extractor
def best_path(self, sentence: List[WordAnalysis]) -> 'PerceptronAmbiguityResolver.DecodeResult':
if len(sentence) == 0:
raise ValueError("bestPath cannot be called with empty sentence.")
current_list: List['PerceptronAmbiguityResolver.Hypothesis'] = [
PerceptronAmbiguityResolver.Hypothesis(
PerceptronAmbiguityResolver.sentence_begin,
PerceptronAmbiguityResolver.sentence_begin,
previous=None,
score=np.float32(0)
)
]
# current_list: OrderedDict['PerceptronAmbiguityResolver.Hypothesis', np.float32] = OrderedDict(
# [
# (PerceptronAmbiguityResolver.Hypothesis(
# PerceptronAmbiguityResolver.sentence_begin,
# PerceptronAmbiguityResolver.sentence_begin,
# previous=None,
# score=np.float32(0)
# ), np.float32(0))
# ]
# )
for analysis_data in sentence:
next_list: List['PerceptronAmbiguityResolver.Hypothesis'] = []
# next_list: OrderedDict['PerceptronAmbiguityResolver.Hypothesis', np.float32] = OrderedDict()
analyses: List[SingleAnalysis] = list(analysis_data.analysis_results)
if len(analyses) == 0:
analyses = [SingleAnalysis.unknown(analysis_data.inp)]
for analysis in analyses:
for h in current_list:
trigram: List[SingleAnalysis] = [h.prev, h.current, analysis]
features = self.extractor.extract_from_trigram(trigram)
trigram_score = np.float32(0)
for key in features.keys():
trigram_score += np.float32(self.model.get_(key) * np.float32(features.get(key)))
new_hyp = PerceptronAmbiguityResolver.Hypothesis(
h.current,
analysis,
h,
score=np.float32(h.score + trigram_score)
)
i, found = next(((i, c) for i, c in enumerate(next_list) if new_hyp == c), (None, None))
if found is not None and new_hyp.score > found.score:
next_list[i] = new_hyp
elif found is None:
next_list.append(new_hyp)
# if new_hyp in next_list:
# new_hyp.score = max(next_list[new_hyp], new_hyp.score)
# next_list[new_hyp] = new_hyp.score
# next_list.append(new_hyp)
current_list = next_list
for h in current_list:
trigram: List[SingleAnalysis] = [h.prev, h.current, PerceptronAmbiguityResolver.sentence_end]
features = self.extractor.extract_from_trigram(trigram)
trigram_score = np.float32(0)
for key in features.keys():
trigram_score += np.float32(self.model.get_(key) * np.float32(features.get(key)))
h.score += trigram_score
best = max(current_list, key=attrgetter('score'))
best_score = best.score
result: List[SingleAnalysis] = []
while best.previous is not None:
result.append(best.current)
best = best.previous
return PerceptronAmbiguityResolver.DecodeResult(list(reversed(result)), best_score)
class DecodeResult:
def __init__(self, best_parse: List[SingleAnalysis], score: np.float32):
self.best_parse = best_parse
self.score = score
class Hypothesis:
def __init__(
self,
prev: SingleAnalysis,
current: SingleAnalysis,
previous: Optional['PerceptronAmbiguityResolver.Hypothesis'],
score: np.float32
):
self.prev = prev
self.current = current
self.previous = previous
self.score = score
def __hash__(self) -> int:
result = hash(self.prev)
result = 31 * result + hash(self.current)
return result
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, PerceptronAmbiguityResolver.Hypothesis):
if self.prev != other.prev:
return False
return self.current == other.current
else:
return False
def __str__(self):
return f"Hypothesis[prev='{self.prev}', current='{self.current}', score={self.score}]" | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/ambiguity/perceptron_ambiguity_resolver.py | perceptron_ambiguity_resolver.py |
from typing import Set
from zemberek.core.turkish import RootAttribute, PrimaryPos, SecondaryPos
class DictionaryItem:
"""
A class used to represent a word and its properties
in the lexicon dictionary
...
Attributes
----------
lemma : str
the lemma of the word
root : str
the root of the word
primary_pos : PrimaryPos
primary POS tag assigned to the word
secondary_pos : SecondaryPos
secondary POS tag assigned to word if exists
Default is SecondaryPos.None_
attributes : Set[RootAttribute]
attributes that are related to the word
pronunciation : str
pronunciation of the word. Default is None
index : int
non-unique index to words. Default is 0
reference_item : DictionaryItem
a reference object associated with current object
Methods
-------
says(sound=None)
Prints the animals name and what sound it makes
"""
UNKNOWN: 'DictionaryItem'
def __init__(self, lemma: str, root: str, primary_pos: PrimaryPos,
secondary_pos: SecondaryPos, attributes: Set[RootAttribute] = None,
pronunciation: str = None, index: int = None):
"""
Initializes a DictionaryItem object with given parameters that were read
from lexicon
:param lemma: lemma of the word
:param root: root of the word
:param primary_pos: primary POS tag for the word
:param secondary_pos: secondary POS tag for the word
:param attributes: set of predefined attributes given to word. Default is None
:param pronunciation: pronunciation of the word. Default is None
:param index: non-unique index for each word. Default is None
"""
self.lemma = lemma
self.root = root
self.primary_pos = primary_pos
self.secondary_pos = secondary_pos
self.pronunciation = root if pronunciation is None else pronunciation
self.index = 0 if index is None else index
self.attributes = set() if attributes is None else attributes
self.reference_item = None
self.id_: str = self.generate_id(lemma, primary_pos, secondary_pos, self.index)
@staticmethod
def generate_id(lemma: str, pos: PrimaryPos, spos: SecondaryPos,
index: int) -> str:
"""
generates and id for a word with given parameters
generating formula is:
word-lemma_word-ppos_word-spor_word-index
kalem -> kalem_Noun_None_0
:param lemma: lemma of the word
:param pos: primary POS tag pre-assigned to word
:param spos: secondary POS tag pre-assigned to word
:param index: index of the word
:return: generated string id
"""
item_id = f"{lemma}_{pos.short_form}"
if spos and spos != SecondaryPos.None_:
item_id = f"{item_id}_{spos.short_form}"
if index > 0:
item_id = f"{item_id}_{str(index)}"
return item_id
def has_attribute(self, attribute: RootAttribute) -> bool:
return attribute in self.attributes
def set_reference_item(self, reference_item: 'DictionaryItem'):
"""
sets a reference word for the given word
:param reference_item: another DictinaryItem object aka another word
related to current word
"""
self.reference_item = reference_item
def __str__(self):
# return self.lemma + self.root + self.id
string = self.lemma + " [P:" + self.primary_pos.short_form
if self.secondary_pos and self.secondary_pos != SecondaryPos.None_:
string += ", " + self.secondary_pos.short_form
if self.attributes and len(self.attributes) == 0:
string += "]"
else:
string = self.print_attributes(string, self.attributes)
return string
def normalized_lemma(self) -> str:
return self.lemma[0: len(self.lemma) - 3] if self.primary_pos == PrimaryPos.Verb else self.lemma
@staticmethod
def print_attributes(string: str, attrs: Set[RootAttribute]) -> str:
if attrs and len(attrs) > 0:
string += "; A:"
i = 0
for attribute in attrs:
string += attribute.name
if i < len(attrs) - 1:
string += ", "
i += 1
string += "]"
return string
def is_unknown(self) -> bool:
return self == DictionaryItem.UNKNOWN
def __hash__(self):
return hash(self.id_)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, DictionaryItem):
return self.id_ == other.id_
return False
DictionaryItem.UNKNOWN = DictionaryItem(lemma="UNK", root="UNK", pronunciation="UNK", primary_pos=PrimaryPos.Unknown,
secondary_pos=SecondaryPos.UnknownSec) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/lexicon/dictionary_item.py | dictionary_item.py |
import csv
import os
import time
from pkg_resources import resource_filename
from typing import List, Dict, Set, Tuple
from logging import Logger
from zemberek.core.turkish import RootAttribute, SecondaryPos, PrimaryPos
from zemberek.morphology.lexicon.dictionary_item import DictionaryItem
logger = Logger("logger")
class DictionaryReader:
"""
A class that reads the lexicon into memory and creates a RootLexicon
object
...
Methods
-------
load_from_resources(resource_path: str) -> RootLexicon
Reads the lexicon dictionary from lexicon.csv file in the given path
"""
def __init__(self):
pass
@staticmethod
def load_from_resources(resource_path: str) -> 'RootLexicon':
"""
Reads the lexicon.csv file in the given path and creates and returns
a RootLexicon object
:param resource_path: path to the lexicon.csv file to be read
:return: RootLexicon instance with the read lexicon dictionary
"""
items = list()
csv.field_size_limit(100000000)
# relative path: os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), "resources/lexicon.csv"
with open(resource_path, 'r', encoding='utf-8') as f:
lex = list(csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE))
for i, line in enumerate(lex):
item = DictionaryReader.make_dict_item_from_line(line)
if line[7] != 'null':
reference_item_line = None
iterator = iter(lex)
while reference_item_line is None:
line_ref = next(iterator)
if line[7] == line_ref[0]:
reference_item_line = line_ref
assert reference_item_line is not None
item.set_reference_item(DictionaryReader.make_dict_item_from_line(reference_item_line))
items.append(item)
return RootLexicon(items)
@staticmethod
def make_dict_item_from_line(line: List[str]) -> 'DictionaryItem':
"""
Creates a DictionaryItem instance for a given lexicon.csv file line
:param line: list of elements of a line in lexicon.csv file
:return: a DictionaryItem instance with the parameters parsed from the line
"""
item_lemma, item_root, item_pron = line[1], line[2], line[5]
item_ppos, item_spos = PrimaryPos(line[3]), SecondaryPos(line[4])
item_index = int(line[6])
if line[8] == '0':
item_attrs = None
else:
item_attrs = set([RootAttribute[attr] for attr in line[8].split()])
return DictionaryItem(item_lemma, item_root, item_ppos, item_spos,
item_attrs, item_pron, item_index)
class DictionaryItemIterator:
"""
An iterator class to iterate over RootLexicon instance
Attributes
----------
dict_items : Tuple[DictionaryItem]
tuple of DictionaryItem (words) to iterate over
index : int
current item's position
"""
def __init__(self, dict_items: Tuple[DictionaryItem]):
self.dict_items = dict_items
self.index = 0
def __next__(self):
"""
returns the next item from the list if exists
else raises StopIteration exception
:return: next item in the list
"""
if self.index < len(self.dict_items):
_item = self.dict_items[self.index]
self.index += 1
return _item
raise StopIteration
class RootLexicon:
"""
An iterable class to represent the lexicon dictionary
Attributes
----------
item_list : List[DictionaryItem]
list of DictionaryItem (words) to store words read from a lexicon file
"""
def __init__(self, item_list: List[DictionaryItem]):
self.id_map: Dict[str, DictionaryItem] = dict()
self.item_set: Set[DictionaryItem] = set()
self.item_map: Dict[str, List[DictionaryItem]] = dict()
for item in item_list:
self.add_(item)
def __len__(self):
return len(self.item_set)
def __iter__(self):
return DictionaryItemIterator(tuple(self.item_set))
def __contains__(self, item):
return item in self.item_set
def add_(self, item: DictionaryItem):
if item in self.item_set:
logger.warning("Duplicated item")
elif item.id_ in self.id_map.keys():
logger.warning(f"Duplicated item. ID {self.id_map.get(item.id_)}")
else:
self.item_set.add(item)
self.id_map[item.id_] = item
if item.lemma in self.item_map.keys():
self.item_map[item.lemma].append(item)
else:
self.item_map[item.lemma] = [item]
def get_item_by_id(self, id_: str) -> DictionaryItem:
return self.id_map.get(id_)
@staticmethod
def get_default() -> 'RootLexicon':
start = time.time()
lexicon_path = resource_filename("zemberek", os.path.join("resources", "lexicon.csv"))
lexicon = DictionaryReader.load_from_resources(lexicon_path)
logger.debug(f"Dictionary generated in {time.time() - start} seconds")
return lexicon | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/morphology/lexicon/root_lexicon.py | root_lexicon.py |
from typing import List, Dict
from zemberek.core.text import TextUtil
from zemberek.core.turkish.turkic_letter import TurkicLetter
class TurkishAlphabet:
r"""
The class that represents Turkish alphabet. It stores all the letters and
special characters in Turkish.
Attributes:
lower_map Dict[str, str]:
A dictionary to map capital "I" and "İ" to their correct lowers.
upper_map Dict[str, str]:
A dictionary to map "i" to its correct capital
"""
INSTANCE: 'TurkishAlphabet'
lower_map = {ord(u'I'): u'ı', ord(u"İ"): u"i"}
upper_map = {ord(u'i'): u'İ'}
def __init__(self):
self.lowercase = "abcçdefgğhıijklmnoöprsştuüvyzxwqâîû"
self.uppercase = self.lowercase.translate(self.upper_map).upper()
self.all_letters = self.lowercase + self.uppercase
self.vowels_lowercase = "aeıioöuüâîû"
self.vowels_uppercase = self.vowels_lowercase.translate(self.upper_map).upper()
self.vowels = set(self.vowels_lowercase + self.vowels_uppercase)
self.circumflex = "âîû"
self.circumflex_upper = "ÂÎÛ"
self.circumflexes = set(self.circumflex + self.circumflex_upper)
self.apostrophe = set("′´`’‘'")
self.stop_consonants = "çkptÇKPT"
self.voiceless_consonants = "çfhkpsştÇFHKPSŞT"
self.turkish_specific = "çÇğĞıİöÖşŞüÜâîûÂÎÛ"
self.turkish_specific_lookup = set("çÇğĞıİöÖşŞüÜâîûÂÎÛ")
self.turkish_ascii = "cCgGiIoOsSuUaiuAIU"
self.ascii_eq_tr = "cCgGiIoOsSuUçÇğĞıİöÖşŞüÜ"
self.ascii_eq_tr_set = set(self.ascii_eq_tr)
self.ascii_eq = "çÇğĞıİöÖşŞüÜcCgGiIoOsSuU"
self.foreign_diacritics = "ÀÁÂÃÄÅÈÉÊËÌÍÎÏÑÒÓÔÕÙÚÛàáâãäåèéêëìíîïñòóôõùúû"
self.diacritics_to_turkish = "AAAAAAEEEEIIIINOOOOUUUaaaaaaeeeeiiiinoooouuu"
self.voicing_map = {}
self.devoicing_map = {}
self.circumflex_map = {}
self.letter_map = {}
letters = self.generate_letters()
for letter in letters:
self.letter_map[letter.char_value] = letter
self.ascii_equal_map: Dict[str, str] = {}
for in_, out_ in zip(self.ascii_eq_tr, self.ascii_eq):
self.ascii_equal_map[in_] = out_
self.turkish_to_ascii_map = {}
self.foreign_diacritics_map = {}
self.generate_voicing_devoicing_lookups()
self.populate_dict(self.turkish_to_ascii_map, self.turkish_specific, self.turkish_ascii)
self.populate_dict(self.foreign_diacritics_map, self.foreign_diacritics, self.diacritics_to_turkish)
def is_turkish_specific(self, c: str) -> bool:
return c in self.turkish_specific_lookup
def contains_ascii_related(self, s: str) -> bool:
for c in s:
if c in self.ascii_eq_tr_set:
return True
return False
def to_ascii(self, inp: str) -> str:
sb = []
for c in inp:
res = self.turkish_to_ascii_map.get(c)
map_ = c if res is None else res
sb.append(map_)
return ''.join(sb)
def is_ascii_equal(self, c1: str, c2: str) -> bool:
if c1 == c2:
return True
a1 = self.ascii_equal_map.get(c1)
if a1 is None:
return False
return a1 == c2
def equals_ignore_diacritics(self, s1: str, s2: str) -> bool:
if s1 is None or s2 is None:
return False
if len(s1) != len(s2):
return False
for c1, c2 in zip(s1, s2):
if not self.is_ascii_equal(c1, c2):
return False
return True
def starts_with_ignore_diacritics(self, s1: str, s2: str) -> bool:
if s1 is None or s2 is None:
return False
if len(s1) < len(s2):
return False
for c1, c2 in zip(s1, s2):
if not self.is_ascii_equal(c1, c2):
return False
return True
@staticmethod
def contains_digit(s: str) -> bool:
if len(s) == 0:
return False
else:
for c in s:
if "0" <= c <= "9":
return True
return False
def contains_apostrophe(self, s: str) -> bool:
for c in s:
if c in self.apostrophe:
return True
return False
def normalize_apostrophe(self, s: str) -> str:
if not self.contains_apostrophe(s):
return s
else:
sb = []
for c in s:
if c in self.apostrophe:
sb.append("\'")
else:
sb.append(c)
return ''.join(sb)
def contains_foreign_diacritics(self, s: str) -> bool:
for c in s:
if c in self.foreign_diacritics:
return True
return False
def foreign_diacritics_to_turkish(self, inp: str) -> str:
sb = ""
for c in inp:
res = self.foreign_diacritics_map.get(c)
map_ = c if res is None else res
sb += map_
return sb
def contains_circumflex(self, s: str) -> bool:
for c in s:
if c in self.circumflexes:
return True
return False
def normalize_circumflex(self, s: str) -> str:
if len(s) == 1:
res = self.circumflex_map.get(s)
return s if res is None else res
else:
if not self.contains_circumflex(s):
return s
else:
sb = []
for c in s:
if c in self.circumflexes:
sb.append(self.circumflex_map.get(c))
else:
sb.append(c)
return ''.join(sb)
def normalize(self, inp: str) -> str:
inp = TextUtil.normalize_apostrophes(inp.translate(self.lower_map).lower())
sb = []
for c in inp:
if c in self.letter_map.keys() or c == '.' or c == '-':
sb.append(c)
else:
sb.append("?")
return ''.join(sb)
def is_vowel(self, c: str) -> bool:
return c in self.vowels
def contains_vowel(self, s: str) -> bool:
if len(s) == 0:
return False
for c in s:
if self.is_vowel(c):
return True
return False
def generate_voicing_devoicing_lookups(self):
voicing_in = "çgkpt"
voicing_out = "cğğbd"
devoicing_in = "bcdgğ"
devoicing_out = "pçtkk"
self.populate_dict(self.voicing_map, voicing_in + voicing_in.upper(), voicing_out + voicing_out.upper())
self.populate_dict(self.devoicing_map, devoicing_in + devoicing_in.upper(),
devoicing_out + devoicing_out.upper())
circumflex_normalized = "aiu"
self.populate_dict(self.circumflex_map, self.circumflex + self.circumflex.upper(),
circumflex_normalized + circumflex_normalized.translate(self.upper_map).upper())
@staticmethod
def populate_dict(dictionary: Dict, in_str: str, out_str: str):
for in_, out in zip(in_str, out_str):
dictionary[in_] = out
@staticmethod
def generate_letters() -> List[TurkicLetter]:
letters = [TurkicLetter('a', vowel=True), TurkicLetter('e', vowel=True, frontal=True),
TurkicLetter('ı', vowel=True), TurkicLetter('i', vowel=True, frontal=True),
TurkicLetter('o', vowel=True, rounded=True),
TurkicLetter('ö', vowel=True, frontal=True, rounded=True),
TurkicLetter('u', vowel=True, rounded=True),
TurkicLetter('ü', vowel=True, rounded=True, frontal=True), TurkicLetter('â', vowel=True),
TurkicLetter('î', vowel=True, frontal=True),
TurkicLetter('û', vowel=True, frontal=True, rounded=True), TurkicLetter('b'), TurkicLetter('c'),
TurkicLetter('ç', voiceless=True), TurkicLetter('d'),
TurkicLetter('f', continuant=True, voiceless=True), TurkicLetter('g'),
TurkicLetter('ğ', continuant=True), TurkicLetter('h', continuant=True, voiceless=True),
TurkicLetter('j', continuant=True), TurkicLetter('k', voiceless=True),
TurkicLetter('l', continuant=True), TurkicLetter('m', continuant=True),
TurkicLetter('n', continuant=True), TurkicLetter('p', voiceless=True),
TurkicLetter('r', continuant=True), TurkicLetter('s', continuant=True, voiceless=True),
TurkicLetter('ş', continuant=True, voiceless=True), TurkicLetter('t', voiceless=True),
TurkicLetter('v', continuant=True), TurkicLetter('y', continuant=True),
TurkicLetter('z', continuant=True), TurkicLetter('q'), TurkicLetter('w'), TurkicLetter('x')]
capitals = []
for letter in letters:
upper = 'İ' if letter.char_value == 'i' else letter.char_value.upper()
capitals.append(letter.copy_for(upper))
letters.extend(capitals)
return letters
def get_last_letter(self, s: str) -> TurkicLetter:
"""
Returns the last letter of the input as "TurkicLetter". If input is empty or the last character
does not belong to alphabet, returns TurkicLetter.UNDEFINED.
:param str s: input string
:return: last letter of input as TurkicLetter
"""
return TurkicLetter.UNDEFINED if len(s) == 0 else self.get_letter(s[-1])
def get_letter(self, c: str) -> TurkicLetter:
letter = self.letter_map.get(c)
return TurkicLetter.UNDEFINED if letter is None else letter
def get_last_vowel(self, s: str) -> TurkicLetter:
if len(s) == 0:
return TurkicLetter.UNDEFINED
else:
for c in reversed(s):
if self.is_vowel(c):
return self.get_letter(c)
return TurkicLetter.UNDEFINED
def get_first_letter(self, s: str) -> TurkicLetter:
"""
Returns the first letter of the input as "TurkicLetter". If input is empty or the first
character does not belong to alphabet, returns TurkicLetter.UNDEFINED.
:param str s: input string
:return: first letter of input as TurkicLetter
"""
if not s:
return TurkicLetter.UNDEFINED
else:
return self.get_letter(s[0])
@staticmethod
def last_char(s: str) -> str:
return s[-1]
def voice(self, c: str) -> str:
res = self.voicing_map.get(c)
return c if res is None else res
def devoice(self, c: str) -> str:
res = self.devoicing_map.get(c)
return c if res is None else res
TurkishAlphabet.INSTANCE = TurkishAlphabet() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/core/turkish/turkish_alphabet.py | turkish_alphabet.py |
import numpy as np
from typing import List, BinaryIO, Tuple, Optional, Union
from struct import unpack
from zemberek.core.hash.mphf import Mphf
np.seterr(over='ignore')
class MultiLevelMphf(Mphf):
"""
Minimum Perfect Hash Function Implementation. Detailed explanation can be found in original zemberek file
"""
HASH_MULTIPLIER: np.int32 = np.int32(16777619)
INITIAL_HASH_SEED: np.int32 = np.int32(-2128831035)
BIT_MASK_21: np.int32 = np.int32(2097151) # np.int32((1 << 21) - 1)
def __init__(self, hash_level_data: Tuple['MultiLevelMphf.HashIndexes']):
self.hash_level_data = hash_level_data
@staticmethod
def deserialize(f: BinaryIO) -> 'MultiLevelMphf':
level_count, = unpack('>i', f.read(4))
indexes: List['MultiLevelMphf.HashIndexes'] = []
for i in range(level_count):
key_count, = unpack('>i', f.read(4))
bucket_amount, = unpack('>i', f.read(4))
hash_seed_values: bytes = f.read(bucket_amount)
failed_indexes_count, = unpack('>i', f.read(4))
failed_indexes: np.ndarray = np.zeros(failed_indexes_count, dtype=np.int32)
for j in range(failed_indexes_count):
failed_indexes[j], = unpack('>i', f.read(4))
indexes.append(MultiLevelMphf.HashIndexes(key_count, bucket_amount, hash_seed_values, failed_indexes))
return MultiLevelMphf(tuple(indexes))
@staticmethod
def hash_for_str(data: str, seed: int) -> np.int32:
d = np.int32(seed) if seed > 0 else MultiLevelMphf.INITIAL_HASH_SEED
for c in data:
# this line produces a RuntimeWarning caused by an overflow during multiplication
# it has the same results with original in Java, but this is not a good behaviour
# there might be occasions where this causes a problem
d = (d ^ np.int32(ord(c))) * MultiLevelMphf.HASH_MULTIPLIER
return d & np.int32(0x7fffffff)
@staticmethod
def hash_for_int_tuple(data: Tuple[int, ...], seed: int) -> np.int32:
d = np.int32(seed) if seed > 0 else MultiLevelMphf.INITIAL_HASH_SEED
for a in np.asarray(data, dtype=np.int32):
d = (d ^ a) * MultiLevelMphf.HASH_MULTIPLIER
return d & np.int32(0x7fffffff)
@staticmethod
def hash_(
data: Union[Tuple[int, ...], str],
seed: int
) -> np.int32:
if isinstance(data, str):
return MultiLevelMphf.hash_for_str(data, seed)
elif isinstance(data, tuple):
return MultiLevelMphf.hash_for_int_tuple(data, seed)
else:
raise ValueError(f"(data) parameter type not supported: {type(data)}")
def get_for_str(self, key: str, initial_hash: Optional[int] = None):
if initial_hash is None:
initial_hash = self.hash_for_str(key, seed=-1)
for i, hd in enumerate(self.hash_level_data):
seed = hd.get_seed(initial_hash)
if seed != 0:
if i == 0:
return self.hash_for_str(key, seed) % self.hash_level_data[0].key_amount
else:
return self.hash_level_data[i - 1].failed_indexes[self.hash_for_str(key, seed) %
self.hash_level_data[i].key_amount]
return BaseException("Cannot be here")
def get_for_tuple(self, key: Tuple[int, ...], initial_hash: int) -> np.int32:
for i in range(len(self.hash_level_data)):
seed = self.hash_level_data[i].get_seed(initial_hash)
if seed != 0:
if i == 0:
return self.hash_(key, seed) % self.hash_level_data[0].key_amount
else:
return self.hash_level_data[i - 1].failed_indexes[self.hash_(key, seed) %
self.hash_level_data[i].key_amount]
raise BaseException("Cannot be here.")
def get_(
self,
key: Union[Tuple[int, ...], str],
initial_hash: int = None
) -> np.int32:
if isinstance(key, str):
return self.get_for_str(key, initial_hash)
elif isinstance(key, tuple):
return self.get_for_tuple(key, initial_hash)
else:
raise ValueError(f"(key) parameter type not supported: {type(key)}")
class HashIndexes:
def __init__(self, key_amount: int, bucket_amount: int, bucket_hash_seed_values: bytes,
failed_indexes: np.ndarray):
self.key_amount = key_amount
self.bucket_amount = bucket_amount
self.bucket_hash_seed_values = bucket_hash_seed_values
self.failed_indexes = failed_indexes
def get_seed(self, finger_print: int) -> int:
return (self.bucket_hash_seed_values[finger_print % self.bucket_amount]) & 0xFF | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/core/hash/multi_level_mphf.py | multi_level_mphf.py |
from __future__ import annotations
from typing import TYPE_CHECKING, BinaryIO
import struct
import numpy as np
if TYPE_CHECKING:
from zemberek.core.hash.mphf import Mphf
from zemberek.core.hash.multi_level_mphf import MultiLevelMphf
class LossyIntLookup:
MAGIC: np.int32 = np.int32(-889274641)
def __init__(self, mphf: Mphf, data: np.ndarray):
self.mphf = mphf
self.data = data
def get_(self, s: str) -> np.int32:
index = self.mphf.get_(s) * 2
fingerprint: int = LossyIntLookup.get_fingerprint(s)
if fingerprint == self.data[index]:
return self.data[index + 1]
else:
return np.int32(0)
def size_(self) -> int:
return self.data.shape[0] // 2
def get_as_float(self, s: str) -> np.int32:
return self.java_int_bits_to_float(self.get_(s))
@staticmethod
def get_fingerprint(s: str) -> np.int32:
"""
This method performs a bitwise and operation for the hash of a string. It uses java's string hash method
(hashCode()) therefore we implemented java's hash code in python. From java doc:
s[0]*31^(n-1) + s[1]*31^(n-2) + ... + s[n-1]
using int arithmetic, where s[i] is the ith character of the string, n is the length of the string,
and ^ indicates exponentiation. (The hash value of the empty string is zero.)
:param s:
:return:
"""
return LossyIntLookup.java_hash_code(s) & 0x7ffffff
@staticmethod
def java_int_bits_to_float(b: np.int32) -> np.float32:
s = struct.pack('>i', b)
return np.float32(struct.unpack('>f', s)[0])
@staticmethod
def java_hash_code(s: str) -> np.int32:
arr = np.asarray([ord(c) for c in s], dtype=np.int32)
powers = np.arange(arr.shape[0], dtype=np.int32)[::-1]
bases = np.full((arr.shape[0],), 31, dtype=np.int32)
result = np.sum(arr * (np.power(bases, powers)), dtype=np.int32)
return np.int32(result)
@classmethod
def deserialize(cls, dis: BinaryIO) -> 'LossyIntLookup':
magic = np.int32(struct.unpack('>i', dis.read(4))[0])
if magic != cls.MAGIC:
raise ValueError(f"File does not carry expected value in the beginning. magic != LossyIntLookup.magid")
length = np.int32(struct.unpack('>i', dis.read(4))[0])
data = np.empty((length, ), dtype=np.int32)
for i in range(length):
data[i] = struct.unpack('>i', dis.read(4))[0]
mphf: 'Mphf' = MultiLevelMphf.deserialize(dis)
return cls(mphf, data) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/core/compression/lossy_int_lookup.py | lossy_int_lookup.py |
import re
from typing import List, Dict, Set, Tuple
from zemberek.core.turkish import TurkishAlphabet
from zemberek.tokenization.perceptron_segmenter import PerceptronSegmenter
from zemberek.tokenization.span import Span
class TurkishSentenceExtractor(PerceptronSegmenter):
r"""
A class that separates sentences by processing them with both
perceptron model which it inherits from PerceptronSegmenter class and
ruled based approaches
Args:
do_not_split_in_double_quotes bool:
Attributes:
BOUNDARY_CHARS str:
most frequently used sentence ending characters
double_quotes str:
special quoting characters
"""
BOUNDARY_CHARS = set(".!?…")
double_quotes = set("\"”“»«")
def __init__(self, do_not_split_in_double_quotes: bool = False):
super().__init__()
self.weights: Dict[str, float] = self.load_weights_from_csv()
self.do_not_split_in_double_quotes = do_not_split_in_double_quotes
self.abbr_set = self.load_abbreviations()
def extract_to_spans(self, paragraph: str) -> List[Span]:
"""
function that divides paragraph into spans depending on whether feature related to
the current character of the paragraph contribute to the overall score which is used
to determine if the span is sentence or not
:param paragraph: string containing a paragraph to which will be divided
:return: the list of strings of potential sentences
"""
spans = []
quote_spans = None
# if self.do_not_split_in_double_quotes:
# quote_spans = self.double_quote_spans(paragraph)
begin = 0
for j, ch in enumerate(paragraph):
if ch in self.BOUNDARY_CHARS and \
(not self.do_not_split_in_double_quotes or quote_spans is None or not self.in_span(j, quote_spans)):
boundary_data = TurkishSentenceExtractor.BoundaryData(paragraph, j, self.abbr_set)
if not boundary_data.non_boundary_check():
features = boundary_data.extract_features()
score = 0.0
for feature in features:
score += self.get_weight(feature)
if score > 0.0:
span = Span(begin, j + 1)
if span.get_length() > 0:
spans.append(span)
begin = j + 1
if begin < len(paragraph):
span = Span(begin, len(paragraph))
if span.get_length() > 0:
spans.append(span)
return spans
def from_paragraph(self, paragraph: str) -> List[str]:
"""
function that extracts the potential sentences from paragraph
:param paragraph: string that holds paragraph to be analyzed
:return: the list of sentences extracted from paragraph
"""
spans = self.extract_to_spans(paragraph)
sentences = []
for span_ in spans:
sentence = span_.get_sub_string(paragraph).strip()
if len(sentence) > 0:
sentences.append(sentence)
return sentences
@staticmethod
def in_span(index: int, spans: List[Span]) -> bool:
"""
function that checks whether the specified index is in any of the spans
that were previously found
:param index: index value to be checked
:param spans: list of spans already found
:return: returns true if the index falls into boundaries of any span, false otherwise
"""
for span_ in spans:
if span_.start > index:
return False
if span_.in_span(index):
return True
return False
def get_weight(self, key: str) -> float:
if key in self.weights.keys():
return self.weights[key]
return 0.0
class BoundaryData:
r"""
Class that represents various features for the character specified with index of a string
It uses previous and next unigram/bigram characters related to current character, finds previous
and next boundary characters (space or one od BOUNDARY_CHARS), current word, previous and next
parts of the word related to the current char, current word with no punctuations, and next word.
"""
def __init__(self, input_string: str, pointer: int, abbr_set: Set[str]):
self.previous_letter = input_string[pointer-1] if pointer > 0 else '_'
self.next_letter = input_string[pointer+1] if pointer < (len(input_string) - 1) else '_'
self.previous_two_letters = input_string[pointer-2:pointer] if pointer > 2 else '__'
self.next_two_letters = input_string[pointer+1:pointer+3] if pointer < (len(input_string) - 3) else '__'
self.previous_space = self.find_backwards_space_or_char(input_string, pointer)
self.left_chunk = input_string[self.previous_space:pointer]
self.previous_boundary_or_space = self.find_backwards_space_or_char(input_string, pointer, '.')
self.left_chunk_until_boundary = self.left_chunk if self.previous_space == self.previous_boundary_or_space \
else input_string[self.previous_boundary_or_space:pointer]
self.next_space = self.find_forwards_space_or_char(input_string, pointer)
self.right_chunk = input_string[pointer+1:self.next_space] if pointer < (len(input_string) - 1) else ""
self.next_boundary_or_space = self.find_forwards_space_or_char(input_string, pointer, '.')
self.right_chunk_until_boundary = self.right_chunk if self.next_space == self.next_boundary_or_space \
else input_string[pointer+1:self.next_boundary_or_space]
self.current_char = input_string[pointer]
self.current_word = self.left_chunk + self.current_char + self.right_chunk
self.current_word_no_punctuation = re.sub(r"[.!?…]", "", self.current_word)
next_word_exists = input_string[self.next_space+1:].find(' ')
if next_word_exists == -1: # no space character ahead
self.next_word = input_string[self.next_space+1:]
else:
self.next_word = input_string[self.next_space+1: next_word_exists]
self.abbr_set = abbr_set
@staticmethod
def find_backwards_space_or_char(string: str, pos: int, char: str = ' ') -> int:
for i in range(pos - 1, -1, -1):
if string[i] == ' ' or string[i] == char:
return i + 1
return 0
@staticmethod
def find_forwards_space_or_char(string: str, pos: int, char: str = ' '):
for i in range(pos + 1, len(string)):
if string[i] == ' ' or string[i] == char:
return i
return len(string)
def non_boundary_check(self) -> bool:
"""
function that checks the current word or char is a potential sentence boundary
:return:
"""
return len(self.left_chunk_until_boundary) == 1 or self.next_letter == '\'' or \
self.next_letter in TurkishSentenceExtractor.BOUNDARY_CHARS or \
self.current_word in self.abbr_set or self.left_chunk_until_boundary in self.abbr_set or \
PerceptronSegmenter.potential_website(self.current_word)
def extract_features(self) -> Tuple[str]:
"""
function that extracts features from according to a set of rules defined by the owner of
this repository. Each feature extracted in this method has a learned weight in
TurkishSentenceExtractor.weights
:return: the list of features extracted from current position of the paragraph
"""
features = list()
features.append("1:" + ("true" if self.previous_letter.isupper() else "false"))
features.append("1b:" + ("true" if self.next_letter.isspace() else "false"))
features.append("1a:" + self.previous_letter)
features.append("1b:" + self.next_letter)
features.append("2p:" + self.previous_two_letters)
features.append("2n:" + self.next_two_letters)
if len(self.current_word) > 0:
features.append("7c:" + ("true" if self.current_word[0].isupper() else "false"))
features.append("9c:" + PerceptronSegmenter.get_meta_char(self.current_word))
if len(self.right_chunk) > 0:
features.append("7r:" + ("true" if self.right_chunk[0].isupper() else "false"))
features.append("9r:" + PerceptronSegmenter.get_meta_char(self.right_chunk))
if len(self.left_chunk) > 0 and not TurkishAlphabet.INSTANCE.contains_vowel(self.left_chunk):
features.append("lcc:true")
if len(self.current_word_no_punctuation) > 0:
all_up = True
all_digit = True
for c in self.current_word_no_punctuation:
if not c.isupper():
all_up = False
if not c.isdigit():
all_digit = False
if all_up:
features.append("11u:true")
if all_digit:
features.append("11d:true")
return tuple(features) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/tokenization/turkish_sentence_extractor.py | turkish_sentence_extractor.py |
from typing import List, Union, Tuple
from antlr4.InputStream import InputStream
from antlr4.error.ErrorListener import ConsoleErrorListener
from antlr4.Token import Token as Token_
from zemberek.tokenization.token import Token
from zemberek.tokenization.antlr.turkish_lexer import TurkishLexer
class TurkishTokenizer:
DEFAULT: Union['TurkishTokenizer', None] = None
IGNORING_ERROR_LISTENER = ConsoleErrorListener()
def __init__(self, accepted_type_bits: int):
self.accepted_type_bits = accepted_type_bits
def tokenize(self, word: str) -> Tuple[Token, ...]:
return self.get_all_tokens(self.lexer_instance(InputStream(word)))
def get_all_tokens(self, lexer: TurkishLexer) -> Tuple[Token, ...]:
tokens = []
token: Token_ = lexer.nextToken()
while token.type != -1:
type_: Token.Type = self.convert_type(token)
if not self.type_ignored(type_):
tokens.append(self.convert(token))
token = lexer.nextToken()
return tuple(tokens)
@staticmethod
def convert(token: Token_) -> Token:
return Token(token.text, TurkishTokenizer.convert_type(token), token.start, token.stop)
def type_ignored(self, i: Token.Type) -> bool:
return (self.accepted_type_bits & 1 << (i.value - 1)) == 0
@staticmethod
def convert_type(token: Token_) -> Token.Type:
if token.type == 1:
return Token.Type.Abbreviation
elif token.type == 2:
return Token.Type.SpaceTab
elif token.type == 3:
return Token.Type.NewLine
elif token.type == 4:
return Token.Type.Time
elif token.type == 5:
return Token.Type.Date
elif token.type == 6:
return Token.Type.PercentNumeral
elif token.type == 7:
return Token.Type.Number
elif token.type == 8:
return Token.Type.URL
elif token.type == 9:
return Token.Type.Email
elif token.type == 10:
return Token.Type.HashTag
elif token.type == 11:
return Token.Type.Mention
elif token.type == 12:
return Token.Type.MetaTag
elif token.type == 13:
return Token.Type.Emoticon
elif token.type == 14:
return Token.Type.RomanNumeral
elif token.type == 15:
return Token.Type.AbbreviationWithDots
elif token.type == 16:
return Token.Type.Word
elif token.type == 17:
return Token.Type.WordAlphanumerical
elif token.type == 18:
return Token.Type.WordWithSymbol
elif token.type == 19:
return Token.Type.Punctuation
elif token.type == 20:
return Token.Type.UnknownWord
elif token.type == 21:
return Token.Type.Unknown
else:
raise TypeError("Unidentified token type = " + token.text)
@staticmethod
def lexer_instance(input_stream: InputStream) -> TurkishLexer:
lexer = TurkishLexer(input_stream)
lexer.removeErrorListeners()
lexer.addErrorListener(TurkishTokenizer.IGNORING_ERROR_LISTENER)
return lexer
@staticmethod
def builder() -> 'TurkishTokenizer.Builder':
return TurkishTokenizer.Builder()
class Builder:
def __init__(self):
self.accepted_type_bits = -1
def accept_all(self) -> 'TurkishTokenizer.Builder':
self.accepted_type_bits = -1
return self
def ignore_types(self, types: List[Token.Type]) -> 'TurkishTokenizer.Builder':
for i in types:
ordinal = i.value - 1
self.accepted_type_bits &= ~(1 << ordinal)
return self
def build(self) -> 'TurkishTokenizer':
return TurkishTokenizer(self.accepted_type_bits)
TurkishTokenizer.DEFAULT = TurkishTokenizer.builder().accept_all().ignore_types([Token.Type.NewLine,
Token.Type.SpaceTab]).build() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/tokenization/turkish_tokenizer.py | turkish_tokenizer.py |
import csv
import os
import re
from pkg_resources import resource_filename
from typing import Dict, Set
class PerceptronSegmenter:
r"""
class that loads Binary Averaged Perceptron model and some rules-based approaches used in
sentence boundary detection by TurkishSentenceExtractor class
Attributes:
web_words List[str]:
Strings which are potentially representing a web site
lowercase_vowels str:
Lower cased vowels in Turkish alphabet
uppercase_vowels str:
Capital vowels in Turkish alphabet
"""
web_words = ("http:", ".html", "www", ".tr", ".edu", "._zem.com", ".net", ".gov", "._zem.org", "@")
lowercase_vowels = set("aeıioöuüâîû")
uppercase_vowels = set("AEIİOÖUÜÂÎÛ")
def __init__(self):
self.turkish_abbreviation_set = self.load_abbreviations()
pass
@staticmethod
def load_weights_from_csv(path: str = None) -> \
Dict[str, float]:
"""
function that loads model weights from csv file on given path
csv file should be in this format:
feature:str \t value:float
:param path: path to csv file
:return: a dictionary which holds hash values as keys and values as weights
"""
if not path:
path = resource_filename("zemberek", os.path.join("resources", "sentence_boundary_model_weights.csv"))
weights = dict()
csv.field_size_limit(100000000)
with open(path, 'r', encoding="utf-8") as f:
lines = list(csv.reader(f, delimiter="\t"))
for line in lines:
weights[line[0]] = float(line[1])
return weights
@staticmethod
def load_abbreviations(path: str = None) -> Set[str]:
"""
function that loads Turkish abbreviations from a text file on given path It stores both
original and lower cased version in a set
:param path: text file that contains abbreviations as one abbreviation per line
:return: set of strings storing both original and lower cased abbreviations
"""
lower_map = {
ord(u'I'): u'ı',
ord(u'İ'): u'i',
}
if not path:
path = resource_filename("zemberek", os.path.join("resources", "abbreviations.txt"))
abbr_set = set()
with open(path, 'r', encoding="utf-8") as f:
lines = list(f.readlines())
for line in lines:
if len(line.strip()) > 0:
abbr = re.sub(r'\s+', "", line.strip())
abbr_set.add(re.sub(r'\.$', "", abbr))
abbr = abbr.translate(lower_map)
abbr_set.add(re.sub(r'\.$', "", abbr.lower()))
return abbr_set
@classmethod
def potential_website(cls, s: str) -> bool:
for word in cls.web_words:
if word in s:
return True
return False
@classmethod
def get_meta_char(cls, letter: str) -> str:
"""
get meta char of a letter which will be used to get a specific weight value. Each return
value is a special name the owners of the repo used in naming the features of perceptron
:param letter: a letter to be checked
:return: a specific character depending on the letter
"""
if letter.isupper():
c = 86 if letter in cls.uppercase_vowels else 67 # 86 -> 'V', 67 -> 'C'
elif letter.islower():
c = 118 if letter in cls.lowercase_vowels else 99 # 118 -> 'v', 99 -> 'c'
elif letter.isdigit():
c = 100 # 100 -> 'd'
elif letter.isspace():
c = 32 # 32 -> ' '
elif letter == '.' or letter == '!' or letter == '?':
return letter
else:
c = 45 # 45 -> '-'
return chr(c) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/tokenization/perceptron_segmenter.py | perceptron_segmenter.py |
import os
import re
from pkg_resources import resource_filename
from typing import Set, List, TYPE_CHECKING
if TYPE_CHECKING:
from antlr4.atn.ATN import ATN
from antlr4.atn import ATNDeserializer
from antlr4.PredictionContext import PredictionContextCache
from antlr4.Lexer import Lexer
from antlr4.dfa.DFA import DFA
from antlr4.InputStream import InputStream
from antlr4.Token import Token, CommonToken
from queue import Queue
from zemberek.core.turkish import TurkishAlphabet
from zemberek.tokenization.antlr.custom_lexer_ATN_simulator import CustomLexerATNSimulator
class TurkishLexer(Lexer):
_ATN: 'ATN'
abbreviations: Set[str] = set()
fi = resource_filename("zemberek", os.path.join("resources", "abbreviations.txt"))
with open(fi, "r", encoding="utf-8") as f:
for line in f:
if len(line.strip()) > 0:
abbr = re.sub("\\s+", "", line.strip())
if abbr.endswith("."):
abbreviations.add(abbr)
abbreviations.add(abbr.lower())
abbreviations.add(abbr.translate(TurkishAlphabet.lower_map).lower())
del f
_decision_to_DFA: List[DFA]
_shared_context_cache = PredictionContextCache()
def __init__(self, inp: InputStream):
super().__init__(inp)
self._interp = CustomLexerATNSimulator(self, self._ATN, self._decision_to_DFA, self._shared_context_cache)
self.queue = Queue()
def nextToken(self) -> Token:
if not self.queue.empty():
return self.queue.get(block=False)
else:
next_: Token = super(TurkishLexer, self).nextToken()
if next_.type != 16:
return next_
else:
next2: Token = super(TurkishLexer, self).nextToken()
if next2.type == 19 and next2.text == ".":
abbrev = next_.text + "."
if self.abbreviations and abbrev in self.abbreviations:
common_token: CommonToken = CommonToken(type=1)
common_token.text = abbrev
common_token.start = next_.start
common_token.stop = next2.stop
common_token.tokenIndex = next_.tokenIndex
common_token.column = next_.column
common_token.line = next_.line
return common_token
self.queue.put_nowait(next2)
return next_
TurkishLexer._ATN = ATNDeserializer.ATNDeserializer().deserialize(
"\u0003悋Ꜫ脳맭䅼㯧瞆奤\u0002\u0017ȩ\b\u0001\u0004\u0002\t\u0002\u0004\u0003\t\u0003\u0004\u0004\t\u0004\u0004\u0005\t"
"\u0005\u0004\u0006\t\u0006\u0004\u0007\t\u0007\u0004\b\t\b\u0004\t\t\t\u0004\n\t\n\u0004\u000b\t\u000b\u0004\f\t"
"\f\u0004\r\t\r\u0004\u000e\t\u000e\u0004\u000f\t\u000f\u0004\u0010\t\u0010\u0004\u0011\t\u0011\u0004\u0012\t"
"\u0012\u0004\u0013\t\u0013\u0004\u0014\t\u0014\u0004\u0015\t\u0015\u0004\u0016\t\u0016\u0004\u0017\t\u0017\u0004"
"\u0018\t\u0018\u0004\u0019\t\u0019\u0004\u001a\t\u001a\u0004\u001b\t\u001b\u0004\u001c\t\u001c\u0004\u001d\t"
"\u001d\u0004\u001e\t\u001e\u0004\u001f\t\u001f\u0004 \t "
"\u0004!\t!\u0004\"\t\"\u0003\u0002\u0003\u0002\u0003\u0003\u0003\u0003\u0003\u0004\u0003\u0004\u0003\u0005\u0003"
"\u0005\u0003\u0006\u0003\u0006\u0003\u0007\u0003\u0007\u0003\b\u0003\b\u0003\t\u0003\t\u0003\n\u0003\n\u0006\nX"
"\n\n\r\n\u000e\nY\u0003\u000b\u0006\u000b]\n\u000b\r\u000b\u000e\u000b^\u0003\f\u0003\f\u0003\r\u0003\r\u0003\r"
"\u0003\r\u0003\r\u0003\r\u0003\r\u0003\r\u0005\rk\n\r\u0003\r\u0005\rn\n\r\u0003\u000e\u0005\u000eq\n\u000e"
"\u0003\u000e\u0003\u000e\u0003\u000e\u0005\u000ev\n\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003"
"\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0005\u000e\u0084\n"
"\u000e\u0003\u000e\u0005\u000e\u0087\n\u000e\u0003\u000e\u0005\u000e\u008a\n\u000e\u0003\u000e\u0003\u000e\u0003"
"\u000e\u0005\u000e\u008f\n\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003"
"\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0003\u000e\u0005\u000e\u009d\n\u000e\u0003\u000e\u0005"
"\u000e \n\u000e\u0005\u000e¢\n\u000e\u0003\u000f\u0003\u000f\u0003\u000f\u0003\u0010\u0005\u0010¨\n\u0010\u0003"
"\u0010\u0003\u0010\u0003\u0010\u0003\u0010\u0005\u0010®\n\u0010\u0003\u0010\u0005\u0010±\n\u0010\u0003\u0010"
"\u0005\u0010´\n\u0010\u0003\u0010\u0003\u0010\u0003\u0010\u0005\u0010¹\n\u0010\u0003\u0010\u0005\u0010¼\n\u0010"
"\u0003\u0010\u0003\u0010\u0005\u0010À\n\u0010\u0003\u0010\u0005\u0010Ã\n\u0010\u0003\u0010\u0003\u0010\u0003"
"\u0010\u0003\u0010\u0005\u0010É\n\u0010\u0003\u0010\u0003\u0010\u0003\u0010\u0006\u0010Î\n\u0010\r\u0010\u000e"
"\u0010Ï\u0003\u0010\u0003\u0010\u0005\u0010Ô\n\u0010\u0003\u0010\u0003\u0010\u0003\u0010\u0006\u0010Ù\n\u0010\r"
"\u0010\u000e\u0010Ú\u0003\u0010\u0003\u0010\u0005\u0010ß\n\u0010\u0003\u0010\u0003\u0010\u0005\u0010ã\n\u0010"
"\u0003\u0010\u0005\u0010æ\n\u0010\u0005\u0010è\n\u0010\u0003\u0011\u0006\u0011ë\n\u0011\r\u0011\u000e\u0011"
"ì\u0003\u0012\u0003\u0012\u0005\u0012ñ\n\u0012\u0003\u0012\u0003\u0012\u0003\u0013\u0006\u0013ö\n\u0013\r\u0013"
"\u000e\u0013÷\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014"
"\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0005\u0014ĉ\n\u0014\u0003"
"\u0014\u0003\u0014\u0005\u0014č\n\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014"
"\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014"
"\u0005\u0014Ğ\n\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0005"
"\u0014ħ\n\u0014\u0003\u0014\u0006\u0014Ī\n\u0014\r\u0014\u000e\u0014ī\u0003\u0014\u0003\u0014\u0003\u0014\u0003"
"\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003"
"\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0003"
"\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0005\u0014Ň\n\u0014\u0003\u0014\u0003\u0014\u0003\u0014\u0005\u0014"
"Ō\n\u0014\u0003\u0014\u0003\u0014\u0005\u0014Ő\n\u0014\u0003\u0014\u0005\u0014œ\n\u0014\u0005\u0014ŕ\n\u0014"
"\u0003\u0015\u0006\u0015Ř\n\u0015\r\u0015\u000e\u0015ř\u0003\u0015\u0005\u0015ŝ\n\u0015\u0003\u0015\u0006\u0015"
"Š\n\u0015\r\u0015\u000e\u0015š\u0003\u0015\u0003\u0015\u0006\u0015Ŧ\n\u0015\r\u0015\u000e\u0015ŧ\u0003\u0015"
"\u0003\u0015\u0006\u0015Ŭ\n\u0015\r\u0015\u000e\u0015ŭ\u0006\u0015Ű\n\u0015\r\u0015\u000e\u0015ű\u0003\u0015"
"\u0005\u0015ŵ\n\u0015\u0003\u0016\u0003\u0016\u0006\u0016Ź\n\u0016\r\u0016\u000e\u0016ź\u0003\u0016\u0005\u0016"
"ž\n\u0016\u0003\u0017\u0003\u0017\u0006\u0017Ƃ\n\u0017\r\u0017\u000e\u0017ƃ\u0003\u0017\u0005\u0017Ƈ\n\u0017"
"\u0003\u0018\u0003\u0018\u0006\u0018Ƌ\n\u0018\r\u0018\u000e\u0018ƌ\u0003\u0018\u0003\u0018\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003\u0019\u0003"
"\u0019\u0003\u0019\u0005\u0019ǜ\n\u0019\u0003\u001a\u0006\u001aǟ\n\u001a\r\u001a\u000e\u001aǠ\u0003\u001a\u0005"
"\u001aǤ\n\u001a\u0003\u001a\u0005\u001aǧ\n\u001a\u0003\u001b\u0003\u001b\u0003\u001b\u0006\u001bǬ\n\u001b\r"
"\u001b\u000e\u001bǭ\u0003\u001b\u0005\u001bDZ\n\u001b\u0003\u001b\u0005\u001bǴ\n\u001b\u0003\u001c\u0006\u001cǷ\n"
"\u001c\r\u001c\u000e\u001cǸ\u0003\u001d\u0006\u001dǼ\n\u001d\r\u001d\u000e\u001dǽ\u0003\u001e\u0006\u001eȁ\n"
"\u001e\r\u001e\u000e\u001eȂ\u0003\u001e\u0005\u001eȆ\n\u001e\u0003\u001e\u0006\u001eȉ\n\u001e\r\u001e\u000e"
"\u001eȊ\u0003\u001e\u0005\u001eȎ\n\u001e\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f"
"\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f\u0003\u001f\u0005\u001fȜ\n\u001f\u0003 \u0003 "
"\u0003!\u0006!ȡ\n!\r!\u000e!Ȣ\u0003\"\u0006\"Ȧ\n\"\r\"\u000e\"ȧ\u0003ȧ\u0002#\u0003\u0002\u0005\u0002\u0007"
"\u0002\t\u0002\u000b\u0002\r\u0002\u000f\u0002\u0011\u0002\u0013\u0002\u0015\u0004\u0017\u0005\u0019\u0006\u001b"
"\u0007\u001d\b\u001f\t!\u0002#\u0002%\u0002'\n)\u000b+\f-\r/\u000e1\u000f3\u00105\u00117\u00129\u0013;\u0014"
"=\u0002?\u0015A\u0016C\u0017\u0003\u0002\u001d\u0003\u00022;\u000b\u0002c|ääééððøøýþġġijijšš\u000b\u0002C"
"\\ÄÄÉÉÐÐØØÝÞĠĠIJIJŠŠ\u0011\u0002C\\c|ÄÄÉÉÐÐØØÝÞääééððøøýþĠġIJijŠš\u0012\u00022;C\\c|ÄÄÉÉÐÐØØÝÞääééððøøýþĠġIJijŠš\u0013"
"\u00022;C\\aac|ÄÄÉÉÐÐØØÝÞääééððøøýþĠġIJijŠš\u0004\u0002))‛‛\u0006\u0002$$\u00ad\u00ad½½„‟\u0004\u0002\u000b\u000b"
"\"\"\u0004\u0002\f\f\u000f\u000f\u0003\u000224\u0004\u000200<<\u0003\u000227\u0003\u000225\u0003\u000223\u0003"
"\u000233\u0003\u00029;\u0003\u000244\u0003\u000222\u0004\u0002--//\u0004\u0002..00\u0004\u0002GGgg\u0019\u0002(("
"--/;==??AAC]__aac|ÄÄÉÉÐÐØØÝÞääééððøøýþĠġIJijŠš\u0006\u00022;C\\aac|\u0007\u0002EFKKNOXXZZ\u0010\u0002##&("
"*1<=?B]`}}\u007f\u007f««°°‚‚\u2028\u2028™™ℤℤ\u0014\u0002\u000b\f\u000f\u000f\"$&1<=?B]`}}\u007f\u007f««\u00ad"
"\u00ad°°½½‚‛„‟\u2028\u2028™™ℤℤ\u0002ʋ\u0002\u0015\u0003\u0002\u0002\u0002\u0002\u0017\u0003\u0002\u0002\u0002"
"\u0002\u0019\u0003\u0002\u0002\u0002\u0002\u001b\u0003\u0002\u0002\u0002\u0002\u001d\u0003\u0002\u0002\u0002"
"\u0002\u001f\u0003\u0002\u0002\u0002\u0002'\u0003\u0002\u0002\u0002\u0002)\u0003\u0002\u0002\u0002\u0002+\u0003"
"\u0002\u0002\u0002\u0002-\u0003\u0002\u0002\u0002\u0002/\u0003\u0002\u0002\u0002\u00021\u0003\u0002\u0002\u0002"
"\u00023\u0003\u0002\u0002\u0002\u00025\u0003\u0002\u0002\u0002\u00027\u0003\u0002\u0002\u0002\u00029\u0003\u0002"
"\u0002\u0002\u0002;\u0003\u0002\u0002\u0002\u0002?\u0003\u0002\u0002\u0002\u0002A\u0003\u0002\u0002\u0002\u0002C"
"\u0003\u0002\u0002\u0002\u0003E\u0003\u0002\u0002\u0002\u0005G\u0003\u0002\u0002\u0002\u0007I\u0003\u0002\u0002"
"\u0002\tK\u0003\u0002\u0002\u0002\u000bM\u0003\u0002\u0002\u0002\rO\u0003\u0002\u0002\u0002\u000fQ\u0003\u0002"
"\u0002\u0002\u0011S\u0003\u0002\u0002\u0002\u0013U\u0003\u0002\u0002\u0002\u0015\\\u0003\u0002\u0002\u0002\u0017"
"`\u0003\u0002\u0002\u0002\u0019b\u0003\u0002\u0002\u0002\u001b¡\u0003\u0002\u0002\u0002\u001d£\u0003\u0002\u0002"
"\u0002\u001fç\u0003\u0002\u0002\u0002!ê\u0003\u0002\u0002\u0002#î\u0003\u0002\u0002\u0002%õ\u0003\u0002\u0002"
"\u0002'Ŕ\u0003\u0002\u0002\u0002)ŗ\u0003\u0002\u0002\u0002+Ŷ\u0003\u0002\u0002\u0002-ſ\u0003\u0002\u0002\u0002"
"/ƈ\u0003\u0002\u0002\u00021Ǜ\u0003\u0002\u0002\u00023Ǟ\u0003\u0002\u0002\u00025ǫ\u0003\u0002\u0002\u00027Ƕ\u0003"
"\u0002\u0002\u00029ǻ\u0003\u0002\u0002\u0002;Ȁ\u0003\u0002\u0002\u0002=ț\u0003\u0002\u0002\u0002?ȝ\u0003\u0002"
"\u0002\u0002AȠ\u0003\u0002\u0002\u0002Cȥ\u0003\u0002\u0002\u0002EF\t\u0002\u0002\u0002F\u0004\u0003\u0002\u0002"
"\u0002GH\t\u0003\u0002\u0002H\u0006\u0003\u0002\u0002\u0002IJ\t\u0004\u0002\u0002J\b\u0003\u0002\u0002\u0002KL\t"
"\u0005\u0002\u0002L\n\u0003\u0002\u0002\u0002MN\t\u0006\u0002\u0002N\f\u0003\u0002\u0002\u0002OP\t\u0007\u0002"
"\u0002P\u000e\u0003\u0002\u0002\u0002QR\t\b\u0002\u0002R\u0010\u0003\u0002\u0002\u0002ST\t\t\u0002\u0002T\u0012"
"\u0003\u0002\u0002\u0002UW\u0005\u000f\b\u0002VX\u0005\t\u0005\u0002WV\u0003\u0002\u0002\u0002XY\u0003\u0002"
"\u0002\u0002YW\u0003\u0002\u0002\u0002YZ\u0003\u0002\u0002\u0002Z\u0014\u0003\u0002\u0002\u0002["
"]\t\n\u0002\u0002\\[\u0003\u0002\u0002\u0002]^\u0003\u0002\u0002\u0002^\\\u0003\u0002\u0002\u0002^_\u0003\u0002"
"\u0002\u0002_\u0016\u0003\u0002\u0002\u0002`a\t\u000b\u0002\u0002a\u0018\u0003\u0002\u0002\u0002bc\t\f\u0002"
"\u0002cd\t\u0002\u0002\u0002de\t\r\u0002\u0002ef\t\u000e\u0002\u0002fj\t\u0002\u0002\u0002gh\t\r\u0002\u0002hi\t"
"\u000e\u0002\u0002ik\t\u0002\u0002\u0002jg\u0003\u0002\u0002\u0002jk\u0003\u0002\u0002\u0002km\u0003\u0002\u0002"
"\u0002ln\u0005\u0013\n\u0002ml\u0003\u0002\u0002\u0002mn\u0003\u0002\u0002\u0002n\u001a\u0003\u0002\u0002"
"\u0002oq\t\u000f\u0002\u0002po\u0003\u0002\u0002\u0002pq\u0003\u0002\u0002\u0002qr\u0003\u0002\u0002\u0002rs\t"
"\u0002\u0002\u0002su\u00070\u0002\u0002tv\t\u0010\u0002\u0002ut\u0003\u0002\u0002\u0002uv\u0003\u0002\u0002"
"\u0002vw\u0003\u0002\u0002\u0002wx\t\u0002\u0002\u0002x\u0083\u00070\u0002\u0002yz\t\u0011\u0002\u0002z{"
"\t\u0012\u0002\u0002{|\t\u0002\u0002\u0002|\u0084\t\u0002\u0002\u0002}~\t\u0013\u0002\u0002~\u007f\t\u0014\u0002"
"\u0002\u007f\u0080\t\u0002\u0002\u0002\u0080\u0084\t\u0002\u0002\u0002\u0081\u0082\t\u0002\u0002\u0002\u0082"
"\u0084\t\u0002\u0002\u0002\u0083y\u0003\u0002\u0002\u0002\u0083}\u0003\u0002\u0002\u0002\u0083\u0081\u0003\u0002"
"\u0002\u0002\u0084\u0086\u0003\u0002\u0002\u0002\u0085\u0087\u0005\u0013\n\u0002\u0086\u0085\u0003\u0002\u0002"
"\u0002\u0086\u0087\u0003\u0002\u0002\u0002\u0087¢\u0003\u0002\u0002\u0002\u0088\u008a\t\u000f\u0002\u0002\u0089"
"\u0088\u0003\u0002\u0002\u0002\u0089\u008a\u0003\u0002\u0002\u0002\u008a\u008b\u0003\u0002\u0002\u0002\u008b"
"\u008c\t\u0002\u0002\u0002\u008c\u008e\u00071\u0002\u0002\u008d\u008f\t\u0010\u0002\u0002\u008e\u008d\u0003"
"\u0002\u0002\u0002\u008e\u008f\u0003\u0002\u0002\u0002\u008f\u0090\u0003\u0002\u0002\u0002\u0090\u0091\t\u0002"
"\u0002\u0002\u0091\u009c\u00071\u0002\u0002\u0092\u0093\t\u0011\u0002\u0002\u0093\u0094\t\u0012\u0002\u0002"
"\u0094\u0095\t\u0002\u0002\u0002\u0095\u009d\t\u0002\u0002\u0002\u0096\u0097\t\u0013\u0002\u0002\u0097\u0098\t"
"\u0014\u0002\u0002\u0098\u0099\t\u0002\u0002\u0002\u0099\u009d\t\u0002\u0002\u0002\u009a\u009b\t\u0002\u0002"
"\u0002\u009b\u009d\t\u0002\u0002\u0002\u009c\u0092\u0003\u0002\u0002\u0002\u009c\u0096\u0003\u0002\u0002\u0002"
"\u009c\u009a\u0003\u0002\u0002\u0002\u009d\u009f\u0003\u0002\u0002\u0002\u009e \u0005\u0013\n\u0002\u009f\u009e"
"\u0003\u0002\u0002\u0002\u009f \u0003\u0002\u0002\u0002 ¢\u0003\u0002\u0002\u0002¡p\u0003\u0002\u0002\u0002"
"¡\u0089\u0003\u0002\u0002\u0002¢\u001c\u0003\u0002\u0002\u0002£¤\u0007'\u0002\u0002¤¥\u0005\u001f\u0010\u0002"
"¥\u001e\u0003\u0002\u0002\u0002¦¨\t\u0015\u0002\u0002§¦\u0003\u0002\u0002\u0002§¨\u0003\u0002\u0002\u0002"
"¨©\u0003\u0002\u0002\u0002©ª\u0005!\u0011\u0002ª«\t\u0016\u0002\u0002«\u00ad\u0005!\u0011\u0002¬®\u0005#\u0012"
"\u0002\u00ad¬\u0003\u0002\u0002\u0002\u00ad®\u0003\u0002\u0002\u0002®°\u0003\u0002\u0002\u0002¯±\u0005\u0013\n"
"\u0002°¯\u0003\u0002\u0002\u0002°±\u0003\u0002\u0002\u0002±è\u0003\u0002\u0002\u0002²´\t\u0015\u0002\u0002"
"³²\u0003\u0002\u0002\u0002³´\u0003\u0002\u0002\u0002´µ\u0003\u0002\u0002\u0002µ¶\u0005!\u0011\u0002¶¸\u0005"
"#\u0012\u0002·¹\u0005\u0013\n\u0002¸·\u0003\u0002\u0002\u0002¸¹\u0003\u0002\u0002\u0002¹è\u0003\u0002\u0002"
"\u0002º¼\t\u0015\u0002\u0002»º\u0003\u0002\u0002\u0002»¼\u0003\u0002\u0002\u0002¼½\u0003\u0002\u0002\u0002"
"½¿\u0005!\u0011\u0002¾À\u0005\u0013\n\u0002¿¾\u0003\u0002\u0002\u0002¿À\u0003\u0002\u0002\u0002Àè\u0003\u0002"
"\u0002\u0002ÁÃ\t\u0015\u0002\u0002ÂÁ\u0003\u0002\u0002\u0002ÂÃ\u0003\u0002\u0002\u0002ÃÄ\u0003\u0002\u0002\u0002"
"ÄÅ\u0005!\u0011\u0002ÅÆ\u00071\u0002\u0002ÆÈ\u0005!\u0011\u0002ÇÉ\u0005\u0013\n\u0002ÈÇ\u0003\u0002\u0002\u0002"
"ÈÉ\u0003\u0002\u0002\u0002Éè\u0003\u0002\u0002\u0002ÊË\u0005!\u0011\u0002ËÌ\u00070\u0002\u0002ÌÎ\u0003\u0002"
"\u0002\u0002ÍÊ\u0003\u0002\u0002\u0002ÎÏ\u0003\u0002\u0002\u0002ÏÍ\u0003\u0002\u0002\u0002ÏÐ\u0003\u0002\u0002"
"\u0002ÐÑ\u0003\u0002\u0002\u0002ÑÓ\u0005!\u0011\u0002ÒÔ\u0005\u0013\n\u0002ÓÒ\u0003\u0002\u0002\u0002ÓÔ\u0003"
"\u0002\u0002\u0002Ôè\u0003\u0002\u0002\u0002ÕÖ\u0005!\u0011\u0002Ö×\u0007.\u0002\u0002×Ù\u0003\u0002\u0002\u0002"
"ØÕ\u0003\u0002\u0002\u0002ÙÚ\u0003\u0002\u0002\u0002ÚØ\u0003\u0002\u0002\u0002ÚÛ\u0003\u0002\u0002\u0002ÛÜ\u0003"
"\u0002\u0002\u0002ÜÞ\u0005!\u0011\u0002Ýß\u0005\u0013\n\u0002ÞÝ\u0003\u0002\u0002\u0002Þß\u0003\u0002\u0002"
"\u0002ßè\u0003\u0002\u0002\u0002àâ\u0005!\u0011\u0002áã\u00070\u0002\u0002âá\u0003\u0002\u0002\u0002âã\u0003"
"\u0002\u0002\u0002ãå\u0003\u0002\u0002\u0002äæ\u0005\u0013\n\u0002åä\u0003\u0002\u0002\u0002åæ\u0003\u0002\u0002"
"\u0002æè\u0003\u0002\u0002\u0002ç§\u0003\u0002\u0002\u0002ç³\u0003\u0002\u0002\u0002ç»\u0003\u0002\u0002\u0002"
"çÂ\u0003\u0002\u0002\u0002çÍ\u0003\u0002\u0002\u0002çØ\u0003\u0002\u0002\u0002çà\u0003\u0002\u0002\u0002è "
"\u0003\u0002\u0002\u0002éë\u0005\u0003\u0002\u0002êé\u0003\u0002\u0002\u0002ëì\u0003\u0002\u0002\u0002ìê\u0003"
"\u0002\u0002\u0002ìí\u0003\u0002\u0002\u0002í\"\u0003\u0002\u0002\u0002îð\t\u0017\u0002\u0002ïñ\t\u0015\u0002"
"\u0002ðï\u0003\u0002\u0002\u0002ðñ\u0003\u0002\u0002\u0002ñò\u0003\u0002\u0002\u0002òó\u0005!\u0011\u0002ó$"
"\u0003\u0002\u0002\u0002ôö\t\u0018\u0002\u0002õô\u0003\u0002\u0002\u0002ö÷\u0003\u0002\u0002\u0002÷õ\u0003\u0002"
"\u0002\u0002÷ø\u0003\u0002\u0002\u0002ø&\u0003\u0002\u0002\u0002ùú\u0007j\u0002\u0002úû\u0007v\u0002\u0002"
"ûü\u0007v\u0002\u0002üý\u0007r\u0002\u0002ýþ\u0007<\u0002\u0002þÿ\u00071\u0002\u0002ÿĉ\u00071\u0002\u0002"
"Āā\u0007j\u0002\u0002āĂ\u0007v\u0002\u0002Ăă\u0007v\u0002\u0002ăĄ\u0007r\u0002\u0002Ąą\u0007u\u0002\u0002"
"ąĆ\u0007<\u0002\u0002Ćć\u00071\u0002\u0002ćĉ\u00071\u0002\u0002Ĉù\u0003\u0002\u0002\u0002ĈĀ\u0003\u0002\u0002"
"\u0002ĉĊ\u0003\u0002\u0002\u0002ĊČ\u0005%\u0013\u0002ċč\u0005\u0013\n\u0002Čċ\u0003\u0002\u0002\u0002Čč\u0003"
"\u0002\u0002\u0002čŕ\u0003\u0002\u0002\u0002Ďď\u0007j\u0002\u0002ďĐ\u0007v\u0002\u0002Đđ\u0007v\u0002\u0002"
"đĒ\u0007r\u0002\u0002Ēē\u0007<\u0002\u0002ēĔ\u00071\u0002\u0002ĔĞ\u00071\u0002\u0002ĕĖ\u0007j\u0002\u0002"
"Ėė\u0007v\u0002\u0002ėĘ\u0007v\u0002\u0002Ęę\u0007r\u0002\u0002ęĚ\u0007u\u0002\u0002Ěě\u0007<\u0002\u0002"
"ěĜ\u00071\u0002\u0002ĜĞ\u00071\u0002\u0002ĝĎ\u0003\u0002\u0002\u0002ĝĕ\u0003\u0002\u0002\u0002ĝĞ\u0003\u0002"
"\u0002\u0002Ğğ\u0003\u0002\u0002\u0002ğĠ\u0007y\u0002\u0002Ġġ\u0007y\u0002\u0002ġĢ\u0007y\u0002\u0002Ģģ\u00070"
"\u0002\u0002ģĤ\u0003\u0002\u0002\u0002ĤĦ\u0005%\u0013\u0002ĥħ\u0005\u0013\n\u0002Ħĥ\u0003\u0002\u0002\u0002"
"Ħħ\u0003\u0002\u0002\u0002ħŕ\u0003\u0002\u0002\u0002ĨĪ\t\u0019\u0002\u0002ĩĨ\u0003\u0002\u0002\u0002Īī\u0003"
"\u0002\u0002\u0002īĩ\u0003\u0002\u0002\u0002īĬ\u0003\u0002\u0002\u0002Ĭņ\u0003\u0002\u0002\u0002ĭĮ\u00070\u0002"
"\u0002Įį\u0007e\u0002\u0002įİ\u0007q\u0002\u0002İŇ\u0007o\u0002\u0002ıIJ\u00070\u0002\u0002IJij\u0007q\u0002\u0002"
"ijĴ\u0007t\u0002\u0002ĴŇ\u0007i\u0002\u0002ĵĶ\u00070\u0002\u0002Ķķ\u0007g\u0002\u0002ķĸ\u0007f\u0002\u0002"
"ĸŇ\u0007w\u0002\u0002Ĺĺ\u00070\u0002\u0002ĺĻ\u0007i\u0002\u0002Ļļ\u0007q\u0002\u0002ļŇ\u0007x\u0002\u0002"
"Ľľ\u00070\u0002\u0002ľĿ\u0007p\u0002\u0002Ŀŀ\u0007g\u0002\u0002ŀŇ\u0007v\u0002\u0002Łł\u00070\u0002\u0002"
"łŃ\u0007k\u0002\u0002Ńń\u0007p\u0002\u0002ńŅ\u0007h\u0002\u0002ŅŇ\u0007q\u0002\u0002ņĭ\u0003\u0002\u0002\u0002"
"ņı\u0003\u0002\u0002\u0002ņĵ\u0003\u0002\u0002\u0002ņĹ\u0003\u0002\u0002\u0002ņĽ\u0003\u0002\u0002\u0002ņŁ\u0003"
"\u0002\u0002\u0002Ňŋ\u0003\u0002\u0002\u0002ňʼn\u00070\u0002\u0002ʼnŊ\u0007v\u0002\u0002ŊŌ\u0007t\u0002\u0002"
"ŋň\u0003\u0002\u0002\u0002ŋŌ\u0003\u0002\u0002\u0002Ōŏ\u0003\u0002\u0002\u0002ōŎ\u00071\u0002\u0002ŎŐ\u0005"
"%\u0013\u0002ŏō\u0003\u0002\u0002\u0002ŏŐ\u0003\u0002\u0002\u0002ŐŒ\u0003\u0002\u0002\u0002őœ\u0005\u0013\n"
"\u0002Œő\u0003\u0002\u0002\u0002Œœ\u0003\u0002\u0002\u0002œŕ\u0003\u0002\u0002\u0002ŔĈ\u0003\u0002\u0002\u0002"
"Ŕĝ\u0003\u0002\u0002\u0002Ŕĩ\u0003\u0002\u0002\u0002ŕ("
"\u0003\u0002\u0002\u0002ŖŘ\u0005\r\u0007\u0002ŗŖ\u0003\u0002\u0002\u0002Řř\u0003\u0002\u0002\u0002řŗ\u0003\u0002"
"\u0002\u0002řŚ\u0003\u0002\u0002\u0002ŚŜ\u0003\u0002\u0002\u0002śŝ\u00070\u0002\u0002Ŝś\u0003\u0002\u0002\u0002"
"Ŝŝ\u0003\u0002\u0002\u0002ŝş\u0003\u0002\u0002\u0002ŞŠ\u0005\r\u0007\u0002şŞ\u0003\u0002\u0002\u0002Šš\u0003"
"\u0002\u0002\u0002šş\u0003\u0002\u0002\u0002šŢ\u0003\u0002\u0002\u0002Ţţ\u0003\u0002\u0002\u0002ţů\u0007B\u0002"
"\u0002ŤŦ\u0005\r\u0007\u0002ťŤ\u0003\u0002\u0002\u0002Ŧŧ\u0003\u0002\u0002\u0002ŧť\u0003\u0002\u0002\u0002"
"ŧŨ\u0003\u0002\u0002\u0002Ũũ\u0003\u0002\u0002\u0002ũū\u00070\u0002\u0002ŪŬ\u0005\r\u0007\u0002ūŪ\u0003\u0002"
"\u0002\u0002Ŭŭ\u0003\u0002\u0002\u0002ŭū\u0003\u0002\u0002\u0002ŭŮ\u0003\u0002\u0002\u0002ŮŰ\u0003\u0002\u0002"
"\u0002ůť\u0003\u0002\u0002\u0002Űű\u0003\u0002\u0002\u0002űů\u0003\u0002\u0002\u0002űŲ\u0003\u0002\u0002\u0002"
"ŲŴ\u0003\u0002\u0002\u0002ųŵ\u0005\u0013\n\u0002Ŵų\u0003\u0002\u0002\u0002Ŵŵ\u0003\u0002\u0002\u0002ŵ*\u0003"
"\u0002\u0002\u0002ŶŸ\u0007%\u0002\u0002ŷŹ\u0005\r\u0007\u0002Ÿŷ\u0003\u0002\u0002\u0002Źź\u0003\u0002\u0002"
"\u0002źŸ\u0003\u0002\u0002\u0002źŻ\u0003\u0002\u0002\u0002ŻŽ\u0003\u0002\u0002\u0002żž\u0005\u0013\n\u0002"
"Žż\u0003\u0002\u0002\u0002Žž\u0003\u0002\u0002\u0002ž,"
"\u0003\u0002\u0002\u0002ſƁ\u0007B\u0002\u0002ƀƂ\u0005\r\u0007\u0002Ɓƀ\u0003\u0002\u0002\u0002Ƃƃ\u0003\u0002"
"\u0002\u0002ƃƁ\u0003\u0002\u0002\u0002ƃƄ\u0003\u0002\u0002\u0002ƄƆ\u0003\u0002\u0002\u0002ƅƇ\u0005\u0013\n\u0002"
"Ɔƅ\u0003\u0002\u0002\u0002ƆƇ\u0003\u0002\u0002\u0002Ƈ.\u0003\u0002\u0002\u0002ƈƊ\u0007>\u0002\u0002ƉƋ\u0005\r"
"\u0007\u0002ƊƉ\u0003\u0002\u0002\u0002Ƌƌ\u0003\u0002\u0002\u0002ƌƊ\u0003\u0002\u0002\u0002ƌƍ\u0003\u0002\u0002"
"\u0002ƍƎ\u0003\u0002\u0002\u0002ƎƏ\u0007@\u0002\u0002Ə0\u0003\u0002\u0002\u0002ƐƑ\u0007<\u0002\u0002Ƒǜ\u0007"
"+\u0002\u0002ƒƓ\u0007<\u0002\u0002ƓƔ\u0007/\u0002\u0002Ɣǜ\u0007+\u0002\u0002ƕƖ\u0007<\u0002\u0002ƖƗ\u0007/\u0002"
"\u0002Ɨǜ\u0007_\u0002\u0002Ƙƙ\u0007<\u0002\u0002ƙǜ\u0007F\u0002\u0002ƚƛ\u0007<\u0002\u0002ƛƜ\u0007/\u0002\u0002"
"Ɯǜ\u0007F\u0002\u0002Ɲƞ\u0007:\u0002\u0002ƞƟ\u0007/\u0002\u0002Ɵǜ\u0007+\u0002\u0002Ơơ\u0007=\u0002\u0002"
"ơǜ\u0007+\u0002\u0002Ƣƣ\u0007=\u0002\u0002ƣƤ\u0007–\u0002\u0002Ƥǜ\u0007+\u0002\u0002ƥƦ\u0007<\u0002\u0002"
"Ʀǜ\u0007*\u0002\u0002Ƨƨ\u0007<\u0002\u0002ƨƩ\u0007/\u0002\u0002Ʃǜ\u0007*\u0002\u0002ƪƫ\u0007<\u0002\u0002"
"ƫƬ\u0007)\u0002\u0002Ƭǜ\u0007*\u0002\u0002ƭƮ\u0007<\u0002\u0002ƮƯ\u0007)\u0002\u0002Ưǜ\u0007+\u0002\u0002"
"ưƱ\u0007<\u0002\u0002Ʊǜ\u0007R\u0002\u0002ƲƳ\u0007<\u0002\u0002Ƴǜ\u0007r\u0002\u0002ƴƵ\u0007<\u0002\u0002"
"Ƶǜ\u0007~\u0002\u0002ƶƷ\u0007?\u0002\u0002Ʒǜ\u0007~\u0002\u0002Ƹƹ\u0007?\u0002\u0002ƹǜ\u0007+\u0002\u0002"
"ƺƻ\u0007?\u0002\u0002ƻǜ\u0007*\u0002\u0002Ƽƽ\u0007<\u0002\u0002ƽƾ\u0007–\u0002\u0002ƾǜ\u00071\u0002\u0002"
"ƿǀ\u0007<\u0002\u0002ǀǜ\u00071\u0002\u0002ǁǂ\u0007<\u0002\u0002ǂǃ\u0007`\u0002\u0002ǃǜ\u0007+\u0002\u0002"
"DŽDž\u0007±\u0002\u0002Dždž\u0007^\u0002\u0002džLJ\u0007a\u0002\u0002LJLj\u0007*\u0002\u0002Ljlj\u0007テ\u0002\u0002"
"ljNJ\u0007+\u0002\u0002NJNj\u0007a\u0002\u0002Njnj\u00071\u0002\u0002njǜ\u0007±\u0002\u0002Ǎǎ\u0007Q\u0002\u0002"
"ǎǏ\u0007a\u0002\u0002Ǐǜ\u0007q\u0002\u0002ǐǑ\u0007q\u0002\u0002Ǒǒ\u0007a\u0002\u0002ǒǜ\u0007Q\u0002\u0002"
"Ǔǔ\u0007Q\u0002\u0002ǔǕ\u0007a\u0002\u0002Ǖǜ\u0007Q\u0002\u0002ǖǗ\u0007^\u0002\u0002Ǘǘ\u0007q\u0002\u0002"
"ǘǜ\u00071\u0002\u0002Ǚǚ\u0007>\u0002\u0002ǚǜ\u00075\u0002\u0002ǛƐ\u0003\u0002\u0002\u0002Ǜƒ\u0003\u0002\u0002"
"\u0002Ǜƕ\u0003\u0002\u0002\u0002ǛƘ\u0003\u0002\u0002\u0002Ǜƚ\u0003\u0002\u0002\u0002ǛƝ\u0003\u0002\u0002\u0002"
"ǛƠ\u0003\u0002\u0002\u0002ǛƢ\u0003\u0002\u0002\u0002Ǜƥ\u0003\u0002\u0002\u0002ǛƧ\u0003\u0002\u0002\u0002Ǜƪ\u0003"
"\u0002\u0002\u0002Ǜƭ\u0003\u0002\u0002\u0002Ǜư\u0003\u0002\u0002\u0002ǛƲ\u0003\u0002\u0002\u0002Ǜƴ\u0003\u0002"
"\u0002\u0002Ǜƶ\u0003\u0002\u0002\u0002ǛƸ\u0003\u0002\u0002\u0002Ǜƺ\u0003\u0002\u0002\u0002ǛƼ\u0003\u0002\u0002"
"\u0002Ǜƿ\u0003\u0002\u0002\u0002Ǜǁ\u0003\u0002\u0002\u0002ǛDŽ\u0003\u0002\u0002\u0002ǛǍ\u0003\u0002\u0002\u0002"
"Ǜǐ\u0003\u0002\u0002\u0002ǛǓ\u0003\u0002\u0002\u0002Ǜǖ\u0003\u0002\u0002\u0002ǛǙ\u0003\u0002\u0002\u0002ǜ2\u0003"
"\u0002\u0002\u0002ǝǟ\t\u001a\u0002\u0002Ǟǝ\u0003\u0002\u0002\u0002ǟǠ\u0003\u0002\u0002\u0002ǠǞ\u0003\u0002\u0002"
"\u0002Ǡǡ\u0003\u0002\u0002\u0002ǡǣ\u0003\u0002\u0002\u0002ǢǤ\u00070\u0002\u0002ǣǢ\u0003\u0002\u0002\u0002"
"ǣǤ\u0003\u0002\u0002\u0002ǤǦ\u0003\u0002\u0002\u0002ǥǧ\u0005\u0013\n\u0002Ǧǥ\u0003\u0002\u0002\u0002Ǧǧ\u0003"
"\u0002\u0002\u0002ǧ4\u0003\u0002\u0002\u0002Ǩǩ\u0005\u0007\u0004\u0002ǩǪ\u00070\u0002\u0002ǪǬ\u0003\u0002\u0002"
"\u0002ǫǨ\u0003\u0002\u0002\u0002Ǭǭ\u0003\u0002\u0002\u0002ǭǫ\u0003\u0002\u0002\u0002ǭǮ\u0003\u0002\u0002\u0002"
"Ǯǰ\u0003\u0002\u0002\u0002ǯDZ\u0005\u0007\u0004\u0002ǰǯ\u0003\u0002\u0002\u0002ǰDZ\u0003\u0002\u0002\u0002DZdz\u0003"
"\u0002\u0002\u0002DzǴ\u0005\u0013\n\u0002dzDz\u0003\u0002\u0002\u0002dzǴ\u0003\u0002\u0002\u0002Ǵ6\u0003\u0002\u0002"
"\u0002ǵǷ\u0005\t\u0005\u0002Ƕǵ\u0003\u0002\u0002\u0002ǷǸ\u0003\u0002\u0002\u0002ǸǶ\u0003\u0002\u0002\u0002"
"Ǹǹ\u0003\u0002\u0002\u0002ǹ8\u0003\u0002\u0002\u0002ǺǼ\u0005\u000b\u0006\u0002ǻǺ\u0003\u0002\u0002\u0002Ǽǽ\u0003"
"\u0002\u0002\u0002ǽǻ\u0003\u0002\u0002\u0002ǽǾ\u0003\u0002\u0002\u0002Ǿ:\u0003\u0002\u0002\u0002ǿȁ\u0005\u000b"
"\u0006\u0002Ȁǿ\u0003\u0002\u0002\u0002ȁȂ\u0003\u0002\u0002\u0002ȂȀ\u0003\u0002\u0002\u0002Ȃȃ\u0003\u0002\u0002"
"\u0002ȃȅ\u0003\u0002\u0002\u0002ȄȆ\u0007/\u0002\u0002ȅȄ\u0003\u0002\u0002\u0002ȅȆ\u0003\u0002\u0002\u0002"
"ȆȈ\u0003\u0002\u0002\u0002ȇȉ\u0005\u000b\u0006\u0002Ȉȇ\u0003\u0002\u0002\u0002ȉȊ\u0003\u0002\u0002\u0002ȊȈ\u0003"
"\u0002\u0002\u0002Ȋȋ\u0003\u0002\u0002\u0002ȋȍ\u0003\u0002\u0002\u0002ȌȎ\u0005\u0013\n\u0002ȍȌ\u0003\u0002\u0002"
"\u0002ȍȎ\u0003\u0002\u0002\u0002Ȏ<\u0003\u0002\u0002\u0002ȏȜ\u0005\u000f\b\u0002ȐȜ\u0005\u0011\t\u0002ȑȒ\u00070"
"\u0002\u0002Ȓȓ\u00070\u0002\u0002ȓȜ\u00070\u0002\u0002Ȕȕ\u0007*\u0002\u0002ȕȖ\u0007#\u0002\u0002ȖȜ\u0007+\u0002"
"\u0002ȗȘ\u0007*\u0002\u0002Șș\u0007A\u0002\u0002șȜ\u0007+\u0002\u0002ȚȜ\t\u001b\u0002\u0002țȏ\u0003\u0002\u0002"
"\u0002țȐ\u0003\u0002\u0002\u0002țȑ\u0003\u0002\u0002\u0002țȔ\u0003\u0002\u0002\u0002țȗ\u0003\u0002\u0002\u0002"
"țȚ\u0003\u0002\u0002\u0002Ȝ>\u0003\u0002\u0002\u0002ȝȞ\u0005=\u001f\u0002Ȟ@\u0003\u0002\u0002\u0002ȟȡ\n\u001c"
"\u0002\u0002Ƞȟ\u0003\u0002\u0002\u0002ȡȢ\u0003\u0002\u0002\u0002ȢȠ\u0003\u0002\u0002\u0002Ȣȣ\u0003\u0002\u0002"
"\u0002ȣB\u0003\u0002\u0002\u0002ȤȦ\u000b\u0002\u0002\u0002ȥȤ\u0003\u0002\u0002\u0002Ȧȧ\u0003\u0002\u0002\u0002"
"ȧȨ\u0003\u0002\u0002\u0002ȧȥ\u0003\u0002\u0002\u0002ȨD\u0003\u0002\u0002\u0002I\u0002Y^jmpu\u0083\u0086\u0089"
"\u008e\u009c\u009f¡§\u00ad°³¸»¿ÂÈÏÓÚÞâåçìð÷ĈČĝĦīņŋŏŒŔřŜšŧŭűŴźŽƃƆƌǛǠǣǦǭǰdzǸǽȂȅȊȍțȢȧ\u0002 "
)
TurkishLexer._decision_to_DFA = [DFA(TurkishLexer._ATN.getDecisionState(i), i) for i in
range(len(TurkishLexer._ATN.decisionToState))] | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/tokenization/antlr/turkish_lexer.py | turkish_lexer.py |
import logging
from struct import unpack
from typing import List, Dict, Tuple, BinaryIO
from zemberek.core.turkish import TurkishAlphabet
logger = logging.getLogger(__name__)
class LmVocabulary:
DEFAULT_SENTENCE_BEGIN_MARKER = "<s>"
DEFAULT_SENTENCE_END_MARKER = "</s>"
DEFAULT_UNKNOWN_WORD = "<unk>"
def __init__(self, f: BinaryIO):
vocabulary_length, = unpack(">i", f.read(4))
vocab: List[str] = []
for i in range(vocabulary_length):
utf_length, = unpack(">H", f.read(2))
vocab.append(f.read(utf_length).decode("utf-8"))
self.vocabulary_index_map: Dict[str, int] = {}
self.unknown_word = None
self.sentence_start = None
self.unknown_word_index = -1
self.sentence_start_index = -1
self.sentence_end_index = -1
self.sentence_end = None
self.vocabulary = ()
self.generate_map(vocab)
def index_of(self, word: str) -> int:
k = self.vocabulary_index_map.get(word)
return self.unknown_word_index if k is None else k
def size(self) -> int:
return len(self.vocabulary)
def to_indexes(self, words: Tuple[str, str]) -> Tuple[int, ...]:
indexes: List[int] = []
for word in words:
if word not in self.vocabulary_index_map:
indexes.append(self.unknown_word_index)
else:
indexes.append(self.vocabulary_index_map[word])
return tuple(indexes)
def generate_map(self, input_vocabulary: List[str]):
index_counter = 0
clean_vocab: List[str] = []
for word in input_vocabulary:
if word in self.vocabulary_index_map.keys():
logger.warning("Language model vocabulary has duplicate item: " + word)
else:
if word.translate(TurkishAlphabet.INSTANCE.lower_map).lower() == "<unk>":
if self.unknown_word_index != -1:
logger.warning('Unknown word was already defined as {} but another matching token exist in the '
'input vocabulary: {}'.format(self.unknown_word, word))
else:
self.unknown_word = word
self.unknown_word_index = index_counter
elif word.translate(TurkishAlphabet.INSTANCE.lower_map).lower() == "<s>":
if self.sentence_start_index != -1:
logger.warning(f"Sentence start index was already defined as {self.sentence_start} but another "
f"matching token exist in the input vocabulary: {word}")
else:
self.sentence_start = word
self.sentence_start_index = index_counter
elif word.translate(TurkishAlphabet.INSTANCE.lower_map).lower() == "</s>":
if self.sentence_end_index != -1:
logger.warning(f"Sentence end index was already defined as {self.sentence_end} but another "
f"matching token exist in the input vocabulary: {word})")
else:
self.sentence_end = word
self.sentence_end_index = index_counter
self.vocabulary_index_map[word] = index_counter
clean_vocab.append(word)
index_counter += 1
if self.unknown_word_index == -1:
self.unknown_word = "<unk>"
clean_vocab.append(self.unknown_word)
self.vocabulary_index_map[self.unknown_word] = index_counter
index_counter += 1
logger.debug("Necessary special token " + self.unknown_word + " was not found in the vocabulary, it is "
"added explicitly")
self.unknown_word_index = self.vocabulary_index_map[self.unknown_word]
if self.sentence_start_index == -1:
self.sentence_start = "<s>"
clean_vocab.append(self.sentence_start)
self.vocabulary_index_map[self.sentence_start] = index_counter
index_counter += 1
logger.debug("Vocabulary does not contain sentence start token, it is added explicitly.")
self.sentence_start_index = self.vocabulary_index_map[self.sentence_start]
if self.sentence_end_index == -1:
self.sentence_end = "</s>"
clean_vocab.append(self.sentence_end)
self.vocabulary_index_map[self.sentence_end] = index_counter
self.sentence_end_index = self.vocabulary_index_map[self.sentence_end]
self.vocabulary = tuple(clean_vocab)
@staticmethod
def load_from_data_input_stream(f: BinaryIO) -> 'LmVocabulary':
return LmVocabulary(f) | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/lm/lm_vocabulary.py | lm_vocabulary.py |
import sys
import math
import numpy as np
from enum import Enum, auto
from struct import unpack
from math import log
from typing import List, Tuple, Optional
from zemberek.core.hash import Mphf, MultiLevelMphf, LargeNgramMphf
from zemberek.core.quantization import FloatLookup
from zemberek.lm import LmVocabulary
from zemberek.lm.compression.gram_data_array import GramDataArray
class SmoothLM:
"""
SmoothLm is a compressed, optionally quantized, randomized back-off n-gram language model. It
uses Minimal Perfect Hash functions for compression, This means actual n-gram values are not
stored in the model.
Detailed explanation can be found in original zemberek file
"""
LOG_ZERO_FLOAT = -math.log(sys.float_info.max)
def __init__(self, resource: str, log_base: float, unigram_weigth: float, unknown_backoff_penalty: float,
use_stupid_backoff: bool, stupid_backoff_alpha: float, ngram_key_file):
with open(resource, "rb") as f: # "zemberek/resources/lm-unigram.slm"
self.version, = unpack('>i', f.read(4))
self.type_int, = unpack('>i', f.read(4))
self.log_base, = unpack('>d', f.read(8))
self.order, = unpack('>i', f.read(4))
if self.type_int == 0:
self.type_ = SmoothLM.MphfType.SMALL
else:
self.type_ = SmoothLM.MphfType.LARGE
self.counts = [0]
for unigram_count in range(1, self.order + 1):
count, = unpack('>i', f.read(4))
self.counts.insert(unigram_count, count)
self.counts = tuple(self.counts)
self.probability_lookups = [FloatLookup(np.asarray([0.0], dtype=np.float32))]
for unigram_count in range(1, self.order + 1):
self.probability_lookups.insert(unigram_count, FloatLookup.get_lookup_from_double(f))
self.probability_lookups = tuple(self.probability_lookups)
self.backoff_lookups = [FloatLookup(np.asarray([0.0], dtype=np.float32))]
for unigram_count in range(1, self.order):
self.backoff_lookups.insert(unigram_count, FloatLookup.get_lookup_from_double(f))
self.backoff_lookups = tuple(self.backoff_lookups)
self.ngram_data = [None]
for unigram_count in range(1, self.order + 1):
self.ngram_data.insert(unigram_count, GramDataArray(f))
self.ngram_data = tuple(self.ngram_data)
unigram_count = self.ngram_data[1].count
self.unigram_probs = np.zeros((unigram_count,), dtype=np.float32)
if self.order > 1:
self.unigram_backoffs = np.zeros((unigram_count,), dtype=np.float32)
else:
self.unigram_backoffs = np.zeros((1,), dtype=np.float32)
for vocabulary_size in range(unigram_count):
i = self.ngram_data[1].get_probability_rank(vocabulary_size)
self.unigram_probs[vocabulary_size] = self.probability_lookups[1].get(i)
if self.order > 1:
backoff = self.ngram_data[1].get_back_off_rank(vocabulary_size)
self.unigram_backoffs[vocabulary_size] = self.backoff_lookups[1].get(backoff)
if self.type_ == SmoothLM.MphfType.LARGE:
self.mphfs: List[Optional[Mphf]] = [None] * (self.order + 1)
for i in range(2, self.order + 1):
self.mphfs.insert(i, LargeNgramMphf.deserialize(f))
self.mphfs: Tuple[Optional[Mphf]] = tuple(self.mphfs)
else:
# this part doesn't work in default settings. It will be implemented if needed
raise NotImplementedError
self.vocabulary: LmVocabulary = LmVocabulary.load_from_data_input_stream(f)
vocabulary_size = self.vocabulary.size()
if vocabulary_size > unigram_count:
self.ngram_data[1].count = vocabulary_size
self.unigram_probs = self.unigram_probs[:vocabulary_size] if len(self.unigram_probs) >= vocabulary_size\
else np.pad(self.unigram_probs, (0, vocabulary_size - len(self.unigram_probs)))
self.unigram_backoffs = self.unigram_backoffs[:vocabulary_size] \
if len(self.unigram_backoffs) >= vocabulary_size \
else np.pad(self.unigram_backoffs, (0, vocabulary_size - len(self.unigram_backoffs)))
for i in range(unigram_count, vocabulary_size, 1):
self.unigram_probs[i] = -20.0
self.unigram_backoffs[i] = 0.0
self.unigram_weight = unigram_weigth
self.unknown_backoff_penalty = unknown_backoff_penalty
self.use_stupid_backoff = use_stupid_backoff
self.stupid_backoff_alpha = stupid_backoff_alpha
if log_base != 10.0:
self.change_log_base(log_base)
self.stupid_backoff_log_alpha = (math.log(stupid_backoff_alpha) / math.log(log_base)) * 1.0
else:
self.stupid_backoff_log_alpha = float(log(stupid_backoff_alpha) / log(10.0))
# self.log_base = log_base
if unigram_weigth != 1.0:
raise NotImplementedError("Unigram smoothing is not implemented, it will be if needed")
self.ngram_ids = None
if ngram_key_file:
raise NotImplementedError("Loading n-gram id data is not implemented, it will be if needed")
def change_log_base(self, new_base: float):
FloatLookup.change_base(self.unigram_probs, self.log_base, new_base)
FloatLookup.change_base(self.unigram_backoffs, self.log_base, new_base)
for i in range(2, len(self.probability_lookups)):
self.probability_lookups[i].change_self_base(self.log_base, new_base)
if i < len(self.backoff_lookups) - 1:
self.backoff_lookups[i].change_self_base(self.log_base, new_base)
self.log_base = np.float32(new_base)
def ngram_exists(self, word_indexes: Tuple[int, ...]) -> bool:
if len(word_indexes) < 1 or len(word_indexes) > self.order:
raise ValueError(f"Amount of tokens must be between 1 and {self.order} But it is {(len(word_indexes))}")
order = len(word_indexes)
if order == 1:
return 0 <= word_indexes[0] < len(self.unigram_probs)
quick_hash: int = MultiLevelMphf.hash_(word_indexes, -1)
index = self.mphfs[order].get_(word_indexes, quick_hash)
if self.ngram_ids is None:
return self.ngram_data[order].check_finger_print(quick_hash, index)
return self.ngram_ids.exists(word_indexes, index)
def get_unigram_probability(self, id_: int) -> float:
return self.get_probability((id_,))
def get_probability(self, word_indexes: Tuple[int, ...]) -> float:
n = len(word_indexes)
if n == 1:
return self.unigram_probs[word_indexes[0]]
elif n == 2:
return self.get_bigram_probability(word_indexes[0], word_indexes[1])
elif n == 3:
return self.get_tri_gram_probability(word_indexes)
else:
raise NotImplementedError()
def get_tri_gram_probability(self, w: Tuple[int, ...]) -> float:
finger_print = MultiLevelMphf.hash_(w, seed=-1)
n_gram_index = self.mphfs[3].get_(w, finger_print)
if not self.ngram_data[3].check_finger_print(finger_print, n_gram_index):
return self.get_bigram_probability_value(w[0], w[1]) + self.get_bigram_probability_value(w[1], w[2])
else:
return self.probability_lookups[3].get(self.ngram_data[3].get_probability_rank(n_gram_index))
def get_bigram_probability(self, w0: int, w1: int) -> float:
prob = self.get_bigram_probability_value(w0, w1)
if prob == self.LOG_ZERO_FLOAT:
if self.use_stupid_backoff:
return self.stupid_backoff_log_alpha + self.unigram_probs[w1]
else:
return self.unigram_backoffs[w0] + self.unigram_probs[w1]
else:
return prob
def get_bigram_probability_value(self, w0: int, w1: int) -> float:
quick_hash = MultiLevelMphf.hash_((w0, w1), -1)
index = self.mphfs[2].get_((w0, w1), quick_hash)
if self.ngram_data[2].check_finger_print(quick_hash, index):
return self.probability_lookups[2].get(self.ngram_data[2].get_probability_rank(index))
else:
return self.LOG_ZERO_FLOAT
@staticmethod
def builder(resource: str) -> 'SmoothLM.Builder':
return SmoothLM.Builder(resource)
class Builder:
def __init__(self, resource: str):
self._log_base = 10.0
self._unknown_backoff_penalty = 0.0
self._unigram_weight = 1.0
self._use_stupid_backoff = False
self._stupid_backoff_alpha = 0.4
self.resource = resource
self.ngram_ids = None
def log_base(self, log_base: float) -> 'SmoothLM.Builder':
self._log_base = log_base
return self
def build(self) -> 'SmoothLM':
return SmoothLM(self.resource, self._log_base, self._unigram_weight, self._unknown_backoff_penalty,
self._use_stupid_backoff, self._stupid_backoff_alpha, self.ngram_ids)
class MphfType(Enum):
SMALL = auto()
LARGE = auto() | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/lm/compression/smooth_lm.py | smooth_lm.py |
import numpy as np
from struct import unpack
class GramDataArray:
MAX_BUF: np.int32 = np.int32(0x3fffffff)
def __init__(self, file):
self.count, self.fp_size, self.prob_size, self.backoff_size = unpack('>4i', file.read(4 * 4))
if self.fp_size == 4:
self.fp_mask = -1
else:
self.fp_mask = (1 << self.fp_size * 8) - 1
self.block_size = self.fp_size + self.prob_size + self.backoff_size
# values below are calculated via some functions in Java version.
# Instead, for trigram and uni-gram models we hardcoded these values
if self.block_size == 2: #
page_length = 268435456
self.page_shift = 28
self.index_mask = 134217727
elif self.block_size == 4:
page_length = 134217728
self.page_shift = 27
self.index_mask = 67108863
else:
page_length = 536870912
self.page_shift = 29
self.index_mask = 268435455
page_counter = 1
self.data = []
total = 0
dt = np.dtype('>b')
for i in range(page_counter):
if i < (page_counter - 1):
read_count = page_length * self.block_size
total += page_length * self.block_size
else:
read_count = (self.count * self.block_size) - total
self.data.append(np.fromfile(file, dtype=dt, count=read_count))
def get_probability_rank(self, index: int) -> int:
page_id = self.rshift(index, self.page_shift)
page_index = (index & self.index_mask) * self.block_size + self.fp_size
d = self.data[page_id]
if self.prob_size == 1:
return d[page_index] & 255
elif self.prob_size == 2:
return (d[page_index] & 255) << 8 | d[page_index + 1] & 255
elif self.prob_size == 3:
return (d[page_index] & 255) << 16 | (d[page_index + 1] & 255) << 8 | d[page_index + 2] & 255
else:
return -1
def get_back_off_rank(self, index: int) -> int:
page_id = self.rshift(index, self.page_shift)
page_index = (index & self.index_mask) * self.block_size + self.fp_size + self.prob_size
d = self.data[page_id]
if self.backoff_size == 1:
return d[page_index] & 255
elif self.backoff_size == 2:
return (d[page_index] & 255) << 8 | d[page_index + 1] & 255
elif self.backoff_size == 3:
return (d[page_index] & 255) << 16 | (d[page_index + 1] & 255) << 8 | d[page_index + 2] & 255
else:
return -1
def check_finger_print(self, fp_to_check_: int, global_index: int) -> bool:
fp_to_check = fp_to_check_ & self.fp_mask
page_index = (global_index & self.index_mask) * self.block_size
d: bytes = self.data[self.rshift(global_index, self.page_shift)]
if self.fp_size == 1:
return fp_to_check == (d[page_index] & 0xFF)
elif self.fp_size == 2:
return (self.rshift(fp_to_check, 8) == d[page_index] & 0xFF) \
and (fp_to_check & 0xFF == d[page_index + 1] & 0xFF)
elif self.fp_size == 3:
return (self.rshift(fp_to_check, 16) == (d[page_index] & 0xFF)) \
and ((self.rshift(fp_to_check, 8) & 0xFF) == (d[page_index + 1] & 0xFF)) \
and ((fp_to_check & 0xFF) == (d[page_index + 2] & 0xFF))
elif self.fp_size == 4:
return (self.rshift(fp_to_check, 24) == (d[page_index] & 0xFF)) \
and ((self.rshift(fp_to_check, 16) & 0xFF) == (d[page_index + 1] & 0xFF)) \
and ((self.rshift(fp_to_check, 8) & 0xFF) == (d[page_index + 2] & 0xFF)) \
and ((fp_to_check & 0xFF) == (d[page_index + 3] & 0xFF))
else:
raise BaseException("fp_size must be between 1 and 4")
@staticmethod
def rshift(val: int, n: int) -> int:
"""Unsigned right shift operator
:param int val: integer to be shifted to right
:param int n: number of shifts
:return: shifted value
"""
return (val % 0x100000000) >> n | zemberek-python | /zemberek-python-0.2.3.tar.gz/zemberek-python-0.2.3/zemberek/lm/compression/gram_data_array.py | gram_data_array.py |
# zemfrog-auth
Authentication for the zemfrog framework
Currently only supports JWT (JSON Web Token) authentication.
# Features
* JWT Authentication Blueprint
* Event signal support for user information (login, register, etc)
Usage
=====
Install the module
```sh
pip install zemfrog-auth
```
Add jwt blueprints to your zemfrog application
```python
BLUEPRINTS = ["zemfrog_auth.jwt"]
```
Using event signals
-------------------
In this section I will give an example of using the event signal using a blinker.
```python
# Add this to wsgi.py
from zemfrog_auth.signals import on_user_logged_in
@on_user_logged_in.connect
def on_logged_in(user):
print("Signal user logged in:", user)
```
For a list of available signals, you can see it [here](https://github.com/zemfrog/zemfrog-auth/blob/main/zemfrog_auth/signals.py).
For signal documentation you can visit [here](https://pythonhosted.org/blinker/).
| zemfrog-auth | /zemfrog-auth-1.0.3.tar.gz/zemfrog-auth-1.0.3/README.md | README.md |
from datetime import datetime, timedelta
from flask_jwt_extended import create_access_token, decode_token, get_raw_jwt
from jwt import DecodeError, ExpiredSignatureError
from marshmallow import fields
from werkzeug.security import generate_password_hash, check_password_hash
from zemfrog.decorators import http_code, authenticate, use_kwargs, marshal_with
from zemfrog.helper import db_add, db_update, db_commit, get_mail_template, get_user_roles
from zemfrog.models import (
DefaultResponseSchema,
LoginSchema,
LoginSuccessSchema,
PasswordResetSchema,
RegisterSchema,
RequestPasswordResetSchema,
)
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from zemfrog.tasks import send_email
from ..models import User, Log, Role, Permission
from ..signals import *
class PermissionSchema(SQLAlchemyAutoSchema):
class Meta:
ordered = True
model = Permission
class RoleSchema(SQLAlchemyAutoSchema):
class Meta:
ordered = True
model = Role
permissions = fields.List(fields.Nested(PermissionSchema()))
class UserDetailSchema(SQLAlchemyAutoSchema):
class Meta:
ordered = True
model = User
exclude = ("password",)
roles = fields.List(fields.Nested(RoleSchema()))
@authenticate()
@marshal_with(200, UserDetailSchema)
def user_detail():
"""
User detail info.
"""
email = get_raw_jwt().get("identity")
user = User.query.filter_by(email=email).first()
return user
@use_kwargs(LoginSchema(), location="form")
@marshal_with(404, DefaultResponseSchema)
@marshal_with(200, LoginSuccessSchema)
@http_code
def login(kwds):
"""
Login and get access token.
"""
email = kwds.get("username")
passw = kwds.get("password")
user = User.query.filter_by(email=email).first()
if user and user.confirmed and check_password_hash(user.password, passw):
login_date = datetime.utcnow()
log = Log(login_date=login_date)
user.logs.append(log)
db_commit()
roles = get_user_roles(user)
claims = {"roles": roles}
access_token = create_access_token(email, user_claims=claims)
on_user_logged_in.send(user)
return {"access_token": access_token}
return {"message": "Incorrect email or password.", "code": 404}
@use_kwargs(RegisterSchema(), location="form")
@marshal_with(200, DefaultResponseSchema)
@marshal_with(403, DefaultResponseSchema)
@http_code
def register(kwds):
"""
Register an account.
"""
email = kwds.get("username")
passw = kwds.get("password")
first_name = kwds.get("first_name")
last_name = kwds.get("last_name")
username = first_name + " " + last_name
if email:
user = User.query.filter_by(email=email).first()
if not user:
if username and passw:
passw = generate_password_hash(passw)
user = User(
first_name=first_name,
last_name=last_name,
name=username,
email=email,
password=passw,
registration_date=datetime.utcnow(),
)
db_add(user)
token = create_access_token(
user.id,
expires_delta=False,
user_claims={"token_registration": True},
)
msg = get_mail_template("register.html", token=token)
send_email.delay("Registration", html=msg, recipients=[email])
on_user_registration.send(user)
message = "Successful registration."
status_code = 200
else:
message = "Username and password are required."
status_code = 403
else:
message = "Email already exists."
status_code = 403
else:
message = "Email required."
status_code = 403
return {"message": message, "code": status_code}
@marshal_with(200, DefaultResponseSchema)
@marshal_with(403, DefaultResponseSchema)
@http_code
def confirm_account(token):
"""
Confirm account.
"""
try:
data = decode_token(token)
if not data["user_claims"].get("token_registration"):
raise DecodeError
uid = data["identity"]
user = User.query.filter_by(id=uid).first()
if user and not user.confirmed:
message = "Confirmed."
status_code = 200
db_update(user, confirmed=True, date_confirmed=datetime.utcnow())
on_confirmed_user.send(user)
else:
raise DecodeError
except DecodeError:
message = "Invalid token."
status_code = 403
return {"message": message, "code": status_code}
@use_kwargs(RequestPasswordResetSchema(), location="form")
@marshal_with(200, DefaultResponseSchema)
@marshal_with(404, DefaultResponseSchema)
@marshal_with(403, DefaultResponseSchema)
@http_code
def request_password_reset(kwds):
"""
Request a password reset.
"""
email = kwds.get("username")
if email:
user = User.query.filter_by(email=email).first()
if not user:
message = "User not found."
status_code = 404
else:
message = "A password reset request has been sent."
status_code = 200
token = create_access_token(
user.id,
expires_delta=timedelta(hours=2),
user_claims={"token_password_reset": True},
)
msg = get_mail_template(
"forgot_password.html", token=token
)
send_email.delay("Forgot password", html=msg, recipients=[email])
log = Log(date_requested_password_reset=datetime.utcnow())
user.logs.append(log)
db_commit()
on_forgot_password.send(user)
else:
message = "Email required."
status_code = 403
return {"message": message, "code": status_code}
@marshal_with(200, DefaultResponseSchema)
@marshal_with(401, DefaultResponseSchema)
@marshal_with(403, DefaultResponseSchema)
@http_code
def confirm_password_reset_token(token):
"""
Validate password reset token.
"""
try:
data = decode_token(token)
if not data["user_claims"].get("token_password_reset"):
raise DecodeError
uid = data["identity"]
user = User.query.filter_by(id=uid).first()
if user:
message = "Valid token."
status_code = 200
else:
raise DecodeError
except DecodeError:
message = "Invalid token."
status_code = 401
except ExpiredSignatureError:
message = "Token expired."
status_code = 403
return {"message": message, "code": status_code}
@use_kwargs(PasswordResetSchema(), location="form")
@marshal_with(200, DefaultResponseSchema)
@marshal_with(403, DefaultResponseSchema)
@marshal_with(401, DefaultResponseSchema)
@marshal_with(404, DefaultResponseSchema)
@http_code
def password_reset(kwds, token):
"""
Reset user password.
"""
try:
data = decode_token(token)
if not data["user_claims"].get("token_password_reset"):
raise DecodeError
uid = data["identity"]
user = User.query.filter_by(id=uid).first()
passw = kwds.get("password")
if user and passw:
passw = generate_password_hash(passw)
log = Log(date_set_new_password=datetime.utcnow())
user.logs.append(log)
db_update(user, password=passw)
on_reset_password.send(user)
message = "Successfully change password."
status_code = 200
else:
message = "User not found."
status_code = 404
except DecodeError:
message = "Invalid token."
status_code = 401
except ExpiredSignatureError:
message = "Token expired."
status_code = 403
return {"message": message, "code": status_code} | zemfrog-auth | /zemfrog-auth-1.0.3.tar.gz/zemfrog-auth-1.0.3/zemfrog_auth/jwt/views.py | views.py |
# zemfrog-test
Zemfrog unit testing tools
# Features
* Support automatically create unit tests for API / blueprints
* Available fixtures:
- client
> This is to access the Client class to interact with the API
- app_ctx
> This is to enable the flask context application
- req_ctx
> This is to activate the flask request context application
- user
> This is to generate confirmed random users
# Warning
zemfrog test is available a finalizer to delete all users when the test session ends. so you need to create a special database for testing.
# Usage
Install this
```sh
pip install zemfrog-test
```
And add it to the `COMMANDS` configuration in the zemfrog application.
```python
COMMANDS = ["zemfrog_test"]
```
Now that you have the `test` command, here is a list of supported commands:
* `init` - Initialize the tests directory in the project directory.
* `new` - Create unit tests for the API or blueprint. (The names entered must match `APIS` and `BLUEPRINTS` configurations. For example `zemfrog_auth.jwt`)
* `run` - To run unit tests. **It doesn't work with the `pytest` command, don't know why. :/**
| zemfrog-test | /zemfrog-test-1.0.3.tar.gz/zemfrog-test-1.0.3/README.md | README.md |
from string import ascii_letters
from distutils.dir_util import copy_tree
from random import choice
import os
from marshmallow import Schema
from typing import Callable, List
from zemfrog.exception import ZemfrogTemplateNotFound
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "templates")
def get_template(*paths) -> str:
"""
Function to get template base directory.
:param paths: template directory or file name.
:raises: ZemfrogTemplateNotFound
"""
t = os.path.join(TEMPLATE_DIR, *paths)
if not (os.path.isdir(t) or os.path.isfile(t)):
raise ZemfrogTemplateNotFound("unknown template %r" % os.sep.join(paths))
return t
def copy_template(name: str, dst: str):
"""
Function for copying templates.
:param name: template directory name.
:param dst: Destination output.
"""
t = get_template(name)
copy_tree(t, dst)
def generate_random_string(length):
"""Make random upper / lower case depending on length.
Args:
length (int): letter length
"""
rv = ""
while len(rv) != length:
c = choice(ascii_letters)
rv += c
return rv
def parse_args_to_spec(func: Callable):
apispec = getattr(func, "_apidoc", {})
args: List[dict] = apispec.get("arguments", {}).get("parameters", [])
data = {}
for a in args:
schema = a["schema"]
if isinstance(schema, dict):
keys = list(schema.keys())
elif isinstance(schema, Schema):
keys = list(schema.fields.keys())
loc = a.get("in", "json")
if loc in ("json", "form", "files", "query"):
param = data.get(loc, {})
for k in keys:
param[k] = None
data[loc] = param
else:
raise ValueError("parameter location is unknown: %r" % loc)
return data
def parse_paths(url: str) -> List[str]:
"""Parse paths in the url
Args:
url (str): url endpoint.
"""
paths = {}
for p in url.split("/"):
p = p.strip()
if p.startswith("<") and p.endswith(">"):
path = p.strip("<>")
if ":" in path:
path = path.split(":", 1)[1]
paths[path] = None
return paths | zemfrog-test | /zemfrog-test-1.0.3.tar.gz/zemfrog-test-1.0.3/zemfrog_test/helper.py | helper.py |
from importlib import import_module
from flask import current_app
from .helper import copy_template, get_template, parse_args_to_spec, parse_paths
from zemfrog.helper import get_import_name, import_attr
from jinja2 import Template
import os
def g_init_test():
import_name = current_app.import_name
root_path = current_app.root_path
tests_path = os.path.join(root_path, "tests")
if os.path.isdir(tests_path):
print("the 'tests' directory already exists")
return
print("Creating unittest... ", end="")
copy_template("tests", tests_path)
main_app = True if import_name == "wsgi" else False
for root, _, files in os.walk(tests_path):
for f in files:
if not f.endswith(".py"):
continue
f = os.path.join(root, f)
with open(f) as fp:
data = fp.read()
t = Template(data)
new = t.render(import_name=import_name, main_app=main_app)
with open(f, "w") as fp:
fp.write(new)
print("(done)")
def g_unit_test(name):
specs = []
output_dir = os.path.join(current_app.root_path, "tests")
if not os.path.isdir(output_dir):
print("Error: You must run 'flask test init' first")
exit(1)
import_name = get_import_name(current_app)
if name in current_app.config["APIS"]:
try:
res = import_module(import_name + f"apis.{name}")
except (ImportError, AttributeError):
res = import_module(name)
tag = res.tag
routes = res.routes
for detail in routes:
url, view, methods = detail
e = view.__name__
spec = {
"data": parse_args_to_spec(view),
"method": methods[0],
"name": tag,
"func": e,
"endpoint": f"{tag}.{e}",
"paths": parse_paths(url),
}
specs.append(spec)
output_dir = os.path.join(output_dir, "apis")
elif name in current_app.config["BLUEPRINTS"]:
try:
bp = import_attr(import_name + f"{name}.routes.init_blueprint")()
urls = import_module(import_name + f"{name}.urls")
except (ImportError, AttributeError):
bp = import_attr(f"{name}.routes.init_blueprint")()
urls = import_module(f"{name}.urls")
name = bp.name
routes = urls.routes
for detail in routes:
url, view, methods = detail
e = view.__name__
spec = {
"data": parse_args_to_spec(view),
"method": methods[0],
"name": name,
"func": e,
"endpoint": f"{name}.{e}",
"paths": parse_paths(url),
}
specs.append(spec)
output_dir = os.path.join(output_dir, "blueprints")
else:
print("Error: unknown resource %r" % name)
exit(1)
output_file = os.path.join(output_dir, f"test_{name}.py")
if os.path.isfile(output_file):
print("Error: The file already exists %r" % output_file)
exit(1)
tpl = get_template("unittest.py")
with open(tpl) as fp:
data = fp.read()
print("Creating unit testing %r... " % name, end="")
with open(output_file, "w") as fp:
data = Template(data).render(name=name, specs=specs)
fp.write(data)
print("(done)") | zemfrog-test | /zemfrog-test-1.0.3.tar.gz/zemfrog-test-1.0.3/zemfrog_test/generator.py | generator.py |
=======
History
=======
1.0.0 (2020-09-03)
------------------
* First release on PyPI.
1.0.1 (2020-09-07)
------------------
* Automation create (CRUD) API
* Update template API
* Update zemfrog release information.
1.0.2 (2020-09-08)
------------------
* Update API structure
1.0.3 (2020-09-08)
------------------
* re-upload
1.0.4 (2020-09-09)
------------------
* fix manifest file
1.0.5 (2020-09-10)
------------------
* add command boilerplate
* add schema command
1.0.6 (2020-09-15)
------------------
* add jwt authentication
* refactor blueprint boilerplate
* add send async email
* fix celery
1.0.7 (2020-09-19)
------------------
* Fix: `#8 <https://github.com/zemfrog/zemfrog/issues/8>`_
* flask-apispec integration.
* improve authentication.
* add default schema models.
* Fix: rest api boilerplate
* IMPROVE: Added a prompt if a schema model exists.
* IMPROVE: add zemfrgo to requirements
* DOC: add README to project boilerplate
1.0.8 (2020-10-03)
------------------
* Fix: `#12 <https://github.com/zemfrog/zemfrog/issues/12>`_, `#13 <https://github.com/zemfrog/zemfrog/issues/13>`_, `#14 <https://github.com/zemfrog/zemfrog/issues/14>`_
* IMPROVE: import the orm model in the schema generator.
* General Update: update development status
1.0.9 (2020-10-05)
------------------
* Fix: `#16 <https://github.com/zemfrog/zemfrog/issues/16>`_, `#14 <https://github.com/zemfrog/zemfrog/issues/14>`_, `#17 <https://github.com/zemfrog/zemfrog/issues/17>`_
* NEW: add version option
1.2.0 (2020-10-19)
------------------
* NEW: add load urls
* NEW: add load middlewares
* NEW: middleware boilerplate.
* NEW: multiple apps support
* Fix minor bugs
1.2.1 (2020-10-27)
------------------
* New Feature: added prompt to manage the app.
* moved mail dir to templates/emails
* add ``api_doc`` & ``authenticate`` decorator.
* NEW: add swagger oauth2.
* NEW: add first_name & last_name column.
* IMPROVE: Support creating REST API descriptions via function documents.
* Refactor Code: Rename and add field validation.
* Code Change: update REST API structure.
1.2.2 (2020-10-28)
------------------
* Refactor generator
* New Feature: add error handler
1.2.3 (2020-11-13)
------------------
* Adding: current_db local proxy
* rename services directory to tasks
1.2.4 (2020-11-14)
------------------
* support multiple static files
* Add an endpoint to validate the password reset token
* fix `#37 <https://github.com/zemfrog/zemfrog/issues/37>`_
1.2.5 (2020-11-18)
------------------
* NEW: add extension, model, task generator
* Refactor Code: add model mixin
* add command user, role & permission
* FIX: auth logs
* New Feature: supports role-based access control
1.2.6 (2020-11-21)
------------------
* IMPROVE: commands to manage nested applications
* Added endpoint for checking token jwt
* Add an endpoint to retrieve one data from the model
* Add schema to limit results
* Added a handler for handling API errors
1.2.7 (2020-11-24)
------------------
* FIX: user checks in the test token endpoint
* NEW: support for creating your own app loader
* FIX: Make user roles optional
* FIX: `#49 <https://github.com/zemfrog/zemfrog/issues/49>`_
2.0.1 (2020-12-20)
------------------
* Refactoring app loaders
* IMPROVE: REST API, models & validators
* IMPROVE: added template checks
* IMPROVE: add password validator
* IMPROVE: Compatible with frontend nuxtjs
* NEW: add flask-cors extension
2.0.2 (2020-12-20)
------------------
* fix: missing flask-cors dependency
2.0.3 (2020-12-20)
------------------
* IMPROVE: clean up dependencies
3.0.1 (2020-12-20)
------------------
* add command secretkey
* Fix: varchar length
* Added db migration based on environment
* Stable release
4.0.1 (2021-03-04)
------------------
* IMPROVE: Move extensions to global
* NEW: add pre-commit tool
* IMPROVE: refactor json response
* Refactor Code: run pre-commit
* IMPROVE: Change 'SystemExit' to 'ValidationError'
* IMPROVE: Rename the api directory to apis
* NEW: add autoflake hook
* Changed the stable version status to BETA
4.0.2 (2021-03-05)
------------------
* FIX: response message in jwt & error handler boilerplate
* FIX: update zemfrog version in requirements.txt
4.0.3 (2021-03-17)
------------------
* Fix https://github.com/zemfrog/zemfrog/issues/87
* Add pre-commit to requirements-dev.txt
4.0.4 (2021-03-31)
------------------
* FIX: role & permission relation
* FIX: typo column name
* IMPROVE: split blueprint and task to global
* IMPROVE: split error handlers to global
* IMPROVE: set default blueprint to blank
* IMPROVE: Use schema from source rather than local proxy
* IMPROVE: Using the model name corresponding user input
4.0.5 (2021-04-01)
------------------
* FIX: Load the blueprint name
* FIX: unknown column
* NEW: added codecov workflow (testing)
* NEW: add default value to 'confirmed' column
5.0.1 (2021-04-10)
------------------
* Flask-smorest integration. Based on `#63 <https://github.com/zemfrog/zemfrog/issues/63>`_
* Refactor Code: added scaffolding
* FIX: use 'alt_response' instead of 'response' to wrap multiple responses.
* FIX: configuration to enable / disable OpenAPI
* IMPROVE: no longer supports to load main urls
* IMPROVE: Add a command description to the sub application
* IMPROVE: use 'subprocess.call' instead of 'os.system'
* Change the name of the password reset request template
* create pyup.io config file
| zemfrog | /zemfrog-5.0.1.tar.gz/zemfrog-5.0.1/HISTORY.rst | HISTORY.rst |
=======
zemfrog
=======
.. image:: https://raw.githubusercontent.com/zemfrog/zemfrog/master/docs/_static/logo.png
:target: https://zemfrog.readthedocs.io
:alt: zemfrog logo
.. image:: https://img.shields.io/pypi/v/zemfrog.svg?style=for-the-badge
:target: https://pypi.python.org/pypi/zemfrog
.. image:: https://img.shields.io/pypi/status/zemfrog.svg?style=for-the-badge
:target: https://pypi.python.org/pypi/zemfrog/
.. image:: https://img.shields.io/pypi/dm/zemfrog?logo=python&style=for-the-badge
:target: https://pypi.python.org/pypi/zemfrog/
.. image:: https://img.shields.io/travis/zemfrog/zemfrog.svg?style=for-the-badge
:target: https://travis-ci.com/zemfrog/zemfrog
.. image:: https://readthedocs.org/projects/zemfrog/badge/?version=latest&style=for-the-badge
:target: https://zemfrog.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Zemfrog is a simple framework based on flask for building a REST API quickly.
Which focuses on building a customizable, flexible and manageable REST API!
This project is heavily inspired by `FastAPI <https://fastapi.tiangolo.com/>`_ and `Django <https://www.djangoproject.com/>`_ Framework.
Notes
-----
The project is still in ``BETA`` version, **which means that all the APIs in it are still unstable**.
Please be careful if you want to use it in a production environment! thanks.
Why zemfrog?
------------
Zemfrog is equipped with advanced features including:
* Solid application structure.
* Automatically generate REST API.
* Built-in JWT authentication.
* RBAC support.
* Automatically generate API documentation (swagger-ui).
* Background jobs support.
* Database migration based on application environment.
* And much more...
Donate & Support
----------------
Keep in mind that donations are very important to me, because currently I am working alone to develop this project.
It takes a lot of time and energy. If this project is useful, please give me any support. I really appreciate it.
And also you can donate your money via:
.. image:: https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png
:target: https://www.buymeacoffee.com/aprilahijriyan
.. image:: https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif
:target: https://www.paypal.me/aprilahijriyan
.. image:: https://c5.patreon.com/external/logo/become_a_patron_button.png
:target: https://www.patreon.com/bePatron?u=20603237
.. image:: https://ko-fi.com/img/githubbutton_sm.svg
:target: https://ko-fi.com/E1E746746
Links
-----
* Homepage: https://github.com/zemfrog/zemfrog
* Documentation: https://zemfrog.readthedocs.io
* License: `MIT <https://github.com/zemfrog/zemfrog/blob/master/LICENSE>`_
Credits
-------
* `Flask <https://github.com/pallets/flask>`_
* `Cookie Cutter <https://github.com/cookiecutter/cookiecutter>`_
| zemfrog | /zemfrog-5.0.1.tar.gz/zemfrog-5.0.1/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/zemfrog/zemfrog/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zemfrog could always use more documentation, whether as part of the
official zemfrog docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/zemfrog/zemfrog/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zemfrog` for local development.
1. Fork the `zemfrog` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zemfrog.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zemfrog
$ cd zemfrog/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zemfrog tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check
https://travis-ci.com/zemfrog/zemfrog/pull_requests
and make sure that the tests pass for all supported Python versions.
| zemfrog | /zemfrog-5.0.1.tar.gz/zemfrog-5.0.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.