code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import pandas as pd
import os
import datetime
def get_data(data_name, columns=None, start_time='2016-01-01', end_time='2021-01-01', frequency=1):
base_data_names = ['adj_close_price', 'adj_open_price', 'adj_high_price', 'adj_low_price',
'volume', 'value', 'openint']
base_columns = ['IF', 'IC', 'IH']
if columns is None:
columns = base_columns
if data_name not in base_data_names:
return
if not isinstance(columns, list):
return
for c in columns:
if c not in base_columns:
print(f'No {c} Data')
return
if 240 % frequency != 0:
print(F'240%frequency should be 0 and frequency should be smaller than or equal to 240')
return
dirname, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(dirname, 'dataset', f'_{data_name}.csv')
output = pd.read_csv(path, index_col=0, header=0, converters={'date': pd.to_datetime})
output = output.loc[pd.to_datetime(start_time):pd.to_datetime(end_time), columns]
if frequency <= 240:
if data_name in ['adj_close_price', 'openint']:
output = output.iloc[frequency-1::frequency]
elif data_name in ['adj_open_price']:
output = output.rolling(frequency).apply(lambda x: x.iloc[0]).iloc[frequency-1::frequency]
elif data_name in ['adj_high_price']:
output = output.rolling(frequency).max().iloc[frequency - 1::frequency]
elif data_name in ['adj_low_price']:
output = output.rolling(frequency).min().iloc[frequency - 1::frequency]
elif data_name in ['volume', 'value']:
output = output.rolling(frequency).sum().iloc[frequency - 1::frequency]
if frequency == 240:
output.index = pd.to_datetime(output.index.date)
return output
def index_futures_adj(frequency=1):
dirname, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(dirname, 'dataset', '_adj_close_price.csv')
output = pd.read_csv(path, index_col=0, header=0)
output.index = pd.to_datetime(output.index)
if 240 % frequency != 0:
print(F'240%frequency should be 0')
return
if frequency <= 240:
output = output[frequency-1::frequency]
elif frequency == 240:
output = output[239::240]
output.index = output.index.to_series().apply(lambda x: datetime.datetime(x.year, x.month, x.day))
return output
def index_futures_volume(frequency=1):
dirname, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(dirname, 'dataset', '_volume.csv')
output = pd.read_csv(path, index_col=0, header=0)
output.index = pd.to_datetime(output.index)
if 240 % frequency != 0:
print(F'240%frequency should be 0')
return
if frequency <= 240:
output = output.rolling(frequency).sum().iloc[frequency-1::frequency]
elif frequency == 240:
output = output.groupby(output.index.date).sum()
output.index = pd.to_datetime(output.index)
return output
|
/sc-backtest-0.1.14.tar.gz/sc-backtest-0.1.14/sc_backtest/data_set.py
| 0.40251 | 0.359477 |
data_set.py
|
pypi
|
import asyncio
import aiohttp
from dataclasses import dataclass, field
from requests import post
from sc_cc_ng_models_python import ContextFilter, BitVal
from typing import List, Optional, Any
from enum import Enum
from itertools import chain
from more_itertools import batched
from functools import reduce
@dataclass
class Result:
"""
A result object, containing data if everything was ok else an error.
"""
data: Optional[Any] = None
error: Optional[str] = None
@dataclass
class SCNG:
url: str
@staticmethod
def _bit_list_json_query(tokens_list: List[List[str]], context_filter: Optional[ContextFilter] = None) -> dict:
if context_filter is None:
context_filter = ContextFilter.empty()
return {
"query": """
query TokenListListQuery(
$tokenList: [[String!]!]!,
$contextFilter: ContextFilter,
) {
tokenListBasedContent(
tokenList: $tokenList
contextFilter: $contextFilter
) {
context
value
reason
}
}
""",
"variables": {
"tokenList": tokens_list,
"contextFilter": context_filter.to_dict(),
}
}
@staticmethod
def _dict_list_json_query(tokens_list: List[List[str]], context_filter: Optional[ContextFilter] = None) -> dict:
if context_filter is None:
context_filter = ContextFilter.empty()
return {
"query": """
query TokenListDictQuery(
$tokenList: [[String!]!]!,
$contextFilter: ContextFilter,
) {
tokensListBasedContentAsDict(
tokenList: $tokenList
contextFilter: $contextFilter
)
}
""",
"variables": {
"tokenList": tokens_list,
"contextFilter": context_filter.to_dict(),
}
}
@staticmethod
def _bit_list_to_string_list(bit_lists: List[List[dict]], ignore_context: bool = False) -> List[List[str]]:
"""
Converts a list of lists of BitVals to a list of lists of strings.
"""
return list(
map(
lambda xs: list(
map(
lambda x: x.to_string(
simple=ignore_context,
),
xs
)
),
bit_lists
)
)
def to_dict_list(
self,
tokens_list: List[List[str]],
context_filter: Optional[ContextFilter] = None,
) -> Result:
"""
Converts lists of tokens to a list of dictionaries, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the dictionary will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:return: A result object.
"""
try:
response = post(
self.url,
json=self._dict_list_json_query(
tokens_list,
context_filter,
),
)
if response.status_code == 200:
json = response.json()
if "errors" in json:
return Result(error=json["errors"])
else:
return Result(data=response.json()['data']['tokensListBasedContentAsDict'])
else:
return Result(error=response.text)
except Exception as e:
return Result(error=str(e))
def to_bit_list(
self,
tokens_list: List[List[str]],
context_filter: Optional[ContextFilter] = None,
) -> Result:
"""
Converts lists of tokens to a list of bit values, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the list will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:return: A result object.
"""
try:
response = post(
self.url,
json=self._bit_list_json_query(
tokens_list,
context_filter,
)
)
if response.status_code == 200:
json = response.json()
if "errors" in json:
return Result(error=json["errors"])
else:
return Result(
data=list(
map(
lambda xs: list(
map(
lambda x: BitVal(
context=x['context'],
value=x['value'],
reason=x['reason'],
),
xs
)
),
response.json()['data']['tokenListBasedContent']
)
)
)
else:
return Result(error=response.text)
except Exception as e:
return Result(error=str(e))
def to_string_list(
self,
tokens_list: list,
context_filter: Optional[ContextFilter] = None,
ignore_context: bool = False,
) -> Result:
"""
Converts lists of tokens to a list of strings, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the list will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:return: A result object.
"""
try:
internal_result = self.to_bit_list(
tokens_list=tokens_list,
context_filter=context_filter,
)
if internal_result.error is None:
return Result(
data=self._bit_list_to_string_list(
internal_result.data,
ignore_context=ignore_context,
)
)
else:
return internal_result
except Exception as e:
return Result(error=str(e))
@dataclass
class ResultAsync:
"""
A result object, containing data if everything was ok else an error.
"""
data: List[Optional[str]] = field(default_factory=lambda: [])
error: List[Optional[str]] = field(default_factory=lambda: [])
@dataclass
class SCNGAsync:
url: str
@staticmethod
def _compile_batched_result(results: List[Result]) -> ResultAsync:
""""""
return ResultAsync(
data=list(
chain(
*map(
lambda x: x.data,
results
)
)
),
error=list(
filter(
lambda x: x is not None,
map(
lambda x: x.error,
results
)
)
),
)
async def _fetch_batch(self, session, data, query_fn, response_keys: list, context_filter: Optional[ContextFilter] = None) -> ResultAsync:
"""
Fetch single batch of data from the server.
"""
try:
async with session.post(
self.url,
json=query_fn(
data,
context_filter,
)
) as response:
result = await response.json()
return Result(
data=reduce(
lambda x,y: x.get(y),
response_keys,
result
)
)
except Exception as e:
return Result(error=str(e))
async def to_dict_list(self, tokens_list, context_filter: Optional[ContextFilter] = None, batch_size: int = 5, seq_size: int = 4) -> ResultAsync:
"""
Converts lists of tokens to a list of dicts, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the list will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:param batching: The batching mode. Can be 'auto', 'none' or 'manual'.
:return: A result object.
"""
try:
final = []
async with aiohttp.ClientSession() as session:
for super_batch in map(
lambda super_batch: map(
lambda batch: asyncio.ensure_future(
self._fetch_batch(
session,
batch,
SCNG._dict_list_json_query,
[
'data',
'tokensListBasedContentAsDict',
],
context_filter,
)
),
super_batch,
),
batched(
batched(
tokens_list,
batch_size,
),
seq_size,
)
):
final.append(
await asyncio.gather(*super_batch)
)
result = self._compile_batched_result(
list(
map(
self._compile_batched_result,
final
)
)
)
return ResultAsync(
data=result.data,
error=list(
chain(*result.error),
)
)
except Exception as e:
return Result(error=str(e))
async def to_bit_list(self, tokens_list, context_filter: Optional[ContextFilter] = None, batch_size: int = 10, seq_size: int = 4) -> ResultAsync:
"""
Converts lists of tokens to a list of bits, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the list will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:param batching: The batching mode. Can be 'auto', 'none' or 'manual'.
:return: A result object.
"""
try:
final = []
async with aiohttp.ClientSession() as session:
for super_batch in map(
lambda super_batch: map(
lambda batch: asyncio.ensure_future(
self._fetch_batch(
session,
batch,
SCNG._bit_list_json_query,
[
'data',
'tokenListBasedContent',
],
context_filter,
)
),
super_batch,
),
batched(
batched(
tokens_list,
batch_size,
),
seq_size,
)
):
final.append(
await asyncio.gather(*super_batch)
)
result = self._compile_batched_result(
list(
map(
self._compile_batched_result,
final
)
)
)
return ResultAsync(
data=list(
map(
lambda xs: list(
map(
lambda x: BitVal(
context=x['context'],
value=x['value'],
reason=x['reason'],
),
xs
)
),
result.data,
)
),
error=list(
chain(*result.error),
)
)
except Exception as e:
return Result(error=str(e))
async def to_string_list(self, tokens_list, context_filter: Optional[ContextFilter] = None, batch_size: int = 10, seq_size: int = 4, ignore_context: bool = False) -> ResultAsync:
"""
Converts lists of tokens to a list of strings, containing
all matching meta data to those tokens. If no meta data was found
for a combination of tokens, the list will be empty.
:param tokens_list: A list of lists of tokens.
:param context_filter: A context filter object.
:param batching: The batching mode. Can be 'auto', 'none' or 'manual'.
:return: A result object.
"""
try:
final = []
async with aiohttp.ClientSession() as session:
for super_batch in map(
lambda super_batch: map(
lambda batch: asyncio.ensure_future(
self._fetch_batch(
session,
batch,
SCNG._bit_list_json_query,
[
'data',
'tokenListBasedContent',
],
context_filter,
)
),
super_batch,
),
batched(
batched(
tokens_list,
batch_size,
),
seq_size,
)
):
final.append(
await asyncio.gather(*super_batch)
)
result = self._compile_batched_result(
list(
map(
self._compile_batched_result,
final
)
)
)
return ResultAsync(
data=SCNG._bit_list_to_string_list(
map(
lambda xs: list(
map(
lambda x: BitVal(
context=x['context'],
value=x['value'],
reason=x['reason'],
),
xs
)
),
result.data,
),
ignore_context=ignore_context,
),
error=list(
chain(*result.error),
)
)
except Exception as e:
return Result(error=str(e))
|
/sc_cc_ng_sdk_python-0.2.1.tar.gz/sc_cc_ng_sdk_python-0.2.1/sc_cc_ng_sdk_python/__init__.py
| 0.843799 | 0.266656 |
__init__.py
|
pypi
|
import lzma
from sc_compression.signatures import Signatures, get_signature
from sc_compression.utils.reader import Reader
try:
import lzham
except ImportError:
from platform import system as get_system_name
lzham = None
if get_system_name() == 'Windows':
from sc_compression.support.lzham import LZHAM
lzham = LZHAM
try:
import zstandard
except ImportError:
zstandard = None
class Decompressor(Reader):
def __init__(self):
super().__init__(b'')
self.signature = Signatures.NONE
self.file_version = -1
self.hash = None
def decompress(self, buffer: bytes) -> bytes:
super().__init__(buffer, 'little')
decompressed = buffer
self.signature = get_signature(self.buffer, self.file_version)
if self.signature == Signatures.NONE:
return decompressed
if self.signature == Signatures.SC:
super().__init__(buffer, 'big')
self.read(2)
self.file_version = self.readInt32()
if self.file_version >= 4:
self.file_version = self.readInt32()
self.hash = self.read(self.readInt32())
decompressed = self.decompress(buffer[self.i:])
elif self.signature == Signatures.SIG:
buffer = buffer[68:]
decompressed = self.decompress(buffer)
elif self.signature == Signatures.SCLZ:
self.read(4)
dict_size_log2 = self.readUByte()
uncompressed_size = self.readInt32()
if lzham:
filters = {
'dict_size_log2': dict_size_log2
}
decompressed = lzham.decompress(self.buffer[self.tell():], uncompressed_size, filters)
elif self.signature == Signatures.LZMA:
decompressor = lzma.LZMADecompressor()
compressed = self.buffer[:5] + b'\xff' * 8 + self.buffer[9:]
decompressed = decompressor.decompress(compressed)
elif self.signature == Signatures.ZSTD:
if zstandard:
decompressor = zstandard.ZstdDecompressor()
decompressed = decompressor.decompress(self.buffer)
else:
raise TypeError(self.signature)
return decompressed
|
/sc_compression-0.6.1-py3-none-any.whl/sc_compression/decompressor.py
| 0.50293 | 0.187542 |
decompressor.py
|
pypi
|
import lzma
from hashlib import md5
from sc_compression.signatures import Signatures
from sc_compression.utils.writer import Writer
try:
import lzham
except ImportError:
from platform import system as get_system_name
lzham = None
if get_system_name() == 'Windows':
from sc_compression.support.lzham import LZHAM
lzham = LZHAM
try:
import zstandard
except ImportError:
zstandard = None
class Compressor(Writer):
lzham_filters = {
'dict_size_log2': 18
}
lzma_filters = [
{
"id": lzma.FILTER_LZMA1,
"dict_size": 256 * 1024,
"lc": 3,
"lp": 0,
"pb": 2,
"mode": lzma.MODE_NORMAL
},
]
def __init__(self):
super().__init__('little')
def compress(self, data, signature: Signatures, file_version: int = None) -> bytes:
uncompressed_size = len(data)
if file_version is None:
file_version = 3 if zstandard and signature != Signatures.SCLZ else 1
if signature == Signatures.ZSTD and not zstandard or \
signature == Signatures.SCLZ and not lzham:
signature = Signatures.SC
super().__init__('little')
if signature is Signatures.NONE:
return data
elif signature in (Signatures.LZMA, Signatures.SIG) or (signature == Signatures.SC and file_version != 3):
compressed = lzma.compress(data, format=lzma.FORMAT_ALONE, filters=self.lzma_filters)
self.write(compressed[:5])
self.writeInt32(uncompressed_size)
self.write(compressed[13:])
compressed = self.buffer
elif signature == Signatures.SCLZ and lzham:
compressed = lzham.compress(data, filters=self.lzham_filters)
self.write(b'SCLZ')
self.writeUByte(self.lzham_filters['dict_size_log2'])
self.writeInt32(uncompressed_size)
self.write(compressed)
compressed = self.buffer
elif signature in (Signatures.SC, Signatures.ZSTD) and file_version == 3:
compressor = zstandard.ZstdCompressor()
compressed = compressor.compress(data)
else:
raise TypeError('Unknown Signature!')
super().__init__('big')
if signature in (Signatures.SC, Signatures.SCLZ):
data_hash = md5(data).digest()
self.write(b'SC')
self.writeInt32(file_version)
if file_version == 4:
self.writeInt32(1)
self.writeInt32(len(data_hash))
self.write(data_hash)
compressed = self.buffer + compressed
elif signature == Signatures.SIG:
self.write(b'Sig:')
self.write(b'\x00' * 64) # sha64
compressed = self.buffer + compressed
return compressed
|
/sc_compression-0.6.1-py3-none-any.whl/sc_compression/compressor.py
| 0.49585 | 0.242794 |
compressor.py
|
pypi
|
class Writer:
def __init__(self, endian: str = 'big'):
super(Writer, self).__init__()
self.endian = endian
self.buffer = b''
def write(self, data: bytes):
self.buffer += data
def writeUInteger(self, integer: int, length: int = 1):
self.buffer += integer.to_bytes(length, self.endian, signed=False)
def writeInteger(self, integer: int, length: int = 1):
self.buffer += integer.to_bytes(length, self.endian, signed=True)
def writeUInt64(self, integer: int):
self.writeUInteger(integer, 8)
def writeInt64(self, integer: int):
self.writeInteger(integer, 8)
def writeFloat(self, floating: float):
exponent = 0
sign = 1
if floating < 0:
sign = -1
floating = -floating
if floating >= 2 ** -1022:
value = floating
while value < 1:
exponent -= 1
value *= 2
while value >= 2:
exponent += 1
value /= 2
mantissa = floating / 2 ** exponent
exponent += 127
as_integer_bin = '0'
if sign == -1:
as_integer_bin = '1'
as_integer_bin += bin(exponent)[2:].zfill(8)
mantissa_bin = ''
for x in range(24):
bit = '0'
if mantissa >= 1/2**x:
mantissa -= 1/2**x
bit = '1'
mantissa_bin += bit
mantissa_bin = mantissa_bin[1:]
as_integer_bin += mantissa_bin
as_integer = int(as_integer_bin, 2)
self.writeUInt32(as_integer)
def writeUInt32(self, integer: int):
self.writeUInteger(integer, 4)
def writeInt32(self, integer: int):
self.writeInteger(integer, 4)
def writeNUInt16(self, integer: int):
self.writeUInt16(integer * 65535)
def writeUInt16(self, integer: int):
self.writeUInteger(integer, 2)
def writeNInt16(self, integer: int):
self.writeInt16(integer * 32512)
def writeInt16(self, integer: int):
self.writeInteger(integer, 2)
def writeUInt8(self, integer: int):
self.writeUInteger(integer)
def writeInt8(self, integer: int):
self.writeInteger(integer)
def writeBool(self, boolean: bool):
if boolean:
self.writeUInt8(1)
else:
self.writeUInt8(0)
writeUInt = writeUInteger
writeInt = writeInteger
writeULong = writeUInt64
writeLong = writeInt64
writeNUShort = writeNUInt16
writeNShort = writeNInt16
writeUShort = writeUInt16
writeShort = writeInt16
writeUByte = writeUInt8
writeByte = writeInt8
def writeChar(self, string: str):
for char in list(string):
self.buffer += char.encode('utf-8')
def writeString(self, string: str):
encoded = string.encode('utf-8')
self.writeUShort(len(encoded))
self.buffer += encoded
|
/sc_compression-0.6.1-py3-none-any.whl/sc_compression/utils/writer.py
| 0.648466 | 0.37605 |
writer.py
|
pypi
|
class Reader:
def __init__(self, buffer: bytes, endian: str = 'big'):
self.buffer = buffer
self.endian = endian
self.i = 0
def read(self, length: int = 1):
result = self.buffer[self.i:self.i + length]
self.i += length
return result
def readUInteger(self, length: int = 1) -> int:
result = 0
for x in range(length):
byte = self.buffer[self.i]
bit_padding = x * 8
if self.endian == 'big':
bit_padding = (8 * (length - 1)) - bit_padding
result |= byte << bit_padding
self.i += 1
return result
def readInteger(self, length: int = 1) -> int:
integer = self.readUInteger(length)
result = integer
if integer > 2**(length * 8) / 2:
result -= 2**(length * 8)
return result
def readUInt64(self) -> int:
return self.readUInteger(8)
def readInt64(self) -> int:
return self.readInteger(8)
def readFloat(self) -> float:
asInt = self.readUInt32()
binary = bin(asInt)
binary = binary[2:].zfill(32)
sign = -1 if binary[0] == 1 else 1
exponent = int(binary[1:9], 2) - 127
mantissa_base = binary[9:]
mantissa_bin = '1' + mantissa_base
mantissa = 0
val = 1
if exponent == -127:
if mantissa_base[1] == -1:
return 0
else:
exponent = -126
mantissa_bin = '0' + mantissa_base
for char in mantissa_bin:
mantissa += val * int(char)
val = val / 2
result = sign * 2 ** exponent * mantissa
return result
def readUInt32(self) -> int:
return self.readUInteger(4)
def readInt32(self) -> int:
return self.readInteger(4)
def readNUInt16(self) -> float:
return self.readUInt16() / 65535
def readUInt16(self) -> int:
return self.readUInteger(2)
def readNInt16(self) -> float:
return self.readInt16() / 32512
def readInt16(self) -> int:
return self.readInteger(2)
def readUInt8(self) -> int:
return self.readUInteger()
def readInt8(self) -> int:
return self.readInteger()
def readBool(self) -> bool:
if self.readUInt8() >= 1:
return True
else:
return False
readUInt = readUInteger
readInt = readInteger
readULong = readUInt64
readLong = readInt64
readNUShort = readNUInt16
readNShort = readNInt16
readUShort = readUInt16
readShort = readInt16
readUByte = readUInt8
readByte = readInt8
def readChar(self, length: int = 1) -> str:
return self.read(length).decode('utf-8')
def readString(self) -> str:
length = self.readUShort()
return self.readChar(length)
def tell(self) -> int:
return self.i
|
/sc_compression-0.6.1-py3-none-any.whl/sc_compression/utils/reader.py
| 0.765243 | 0.334698 |
reader.py
|
pypi
|
from copy import deepcopy
from functools import partial, update_wrapper
from typing import Callable, Dict, Hashable, Tuple, Optional, Union, List
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from dandelion.external.nxviz import encodings, lines
from dandelion.external.nxviz.utils import node_table, edge_table
default_edge_kwargs = dict(facecolor="none", zorder=1)
def line_width(et: pd.DataFrame, lw_by: Hashable):
"""Default edge line width function."""
if lw_by is not None:
return encodings.data_linewidth(et[lw_by], et[lw_by])
return pd.Series([1] * len(et), name="lw")
def transparency(
et: pd.DataFrame, alpha_by: Hashable, alpha_bounds: Optional[Tuple] = None
) -> pd.Series:
"""Default edge line transparency function."""
if alpha_by is not None:
ref_data = et[alpha_by]
if isinstance(alpha_bounds, tuple):
ref_data = pd.Series(alpha_bounds)
return encodings.data_transparency(et[alpha_by], ref_data)
return pd.Series([0.1] * len(et), name="alpha")
def edge_colors(
et: pd.DataFrame,
nt: pd.DataFrame,
color_by: Hashable,
node_color_by: Hashable,
palette: Optional[Union[Dict, List]] = None,
):
"""Default edge line color function."""
if color_by in ("source_node_color", "target_node_color"):
edge_select_by = color_by.split("_")[0]
return encodings.data_color(
et[edge_select_by].apply(nt[node_color_by].get),
nt[node_color_by],
palette,
)
elif color_by:
return encodings.data_color(et[color_by], et[color_by], palette)
return pd.Series(["black"] * len(et), name="color_by")
def validate_color_by(
G: nx.Graph,
color_by: Hashable,
node_color_by: Hashable,
) -> None:
"""Validate `node_color_by` and `G` when `color_by` has a special value."""
if color_by in ("source_node_color", "target_node_color"):
if not isinstance(G, nx.DiGraph):
raise ValueError(
"Special values of `color_by`, can only be set for directed graphs."
)
elif not node_color_by:
raise ValueError(
"When setting `color_by` to special values,"
" `node_color_by` also needs to be set."
)
def draw(
G: nx.Graph,
pos: Dict[Hashable, np.ndarray],
lines_func: Callable,
color_by: Hashable = None,
node_color_by: Hashable = None,
lw_by: Hashable = None,
alpha_by: Hashable = None,
ax=None,
encodings_kwargs: Dict = {},
palette: Optional[Union[Dict, List]] = None,
**linefunc_kwargs,
):
"""Draw edges to matplotlib axes.
## Parameters
- `G`: A NetworkX graph.
- `pos`: A dictionary mapping for x,y coordinates of a node.
- `lines_func`: One of the line drawing functions from `nxviz.lines`
- `color_by`: Categorical or quantitative edge attribute key to color edges by.
There are two special value for this parameter
when using directed graphs:
"source_node_color" and "target_node_color".
If these values are set, then `node_color_by` also needs to be set.
- `node_color_by`: Node metadata attribute key
that has been used to color nodes.
- `node_color_by`: Node metadata attribute key that has been used to
color nodes.
- `lw_by`: Quantitative edge attribute key to determine line width.
- `alpha_by`: Quantitative edge attribute key to determine transparency.
- `ax`: Matplotlib axes object to plot onto.
- `encodings_kwargs`: A dictionary of kwargs
to determine the visual properties of the edge.
- `palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
- `linefunc_kwargs`: All other keyword arguments passed in
will be passed onto the appropriate linefunc.
Special keyword arguments for `encodings_kwargs` include:
- `lw_scale`: A scaling factor for all edges' line widths.
Equivalent to multiplying all line widths by this number.
- `alpha_scale`: A scaling factor for all edges' line transparencies.
Equivalent to multiplying all alphas by this number.
The default transparency is 0.1,
so an alpha_scale of any number greater than or equal to 10
will result in 100% opaque lines.
- `alpha_bounds`: The bounds for transparency.
Should be a tuple of `(lower, upper)` numbers.
This keyword argument lets us manually set the bounds
that we wish to have for 0 opacity (i.e. transparent)
to 1.0 opacity (i.e. opaque.)
Everything else passed in here will be passed
to the matplotlib Patch constructor;
see `nxviz.lines` for more information.
"""
nt = node_table(G)
et = edge_table(G)
if ax is None:
ax = plt.gca()
validate_color_by(G, color_by, node_color_by)
edge_color = edge_colors(et, nt, color_by, node_color_by, palette)
encodings_kwargs = deepcopy(encodings_kwargs)
lw = line_width(et, lw_by) * encodings_kwargs.pop("lw_scale", 1.0)
alpha_bounds = encodings_kwargs.pop("alpha_bounds", None)
alpha = transparency(et, alpha_by, alpha_bounds) * encodings_kwargs.pop(
"alpha_scale", 1.0
)
aes_kw = {"facecolor": "none"}
aes_kw.update(encodings_kwargs)
patches = lines_func(
et,
pos,
edge_color=edge_color,
alpha=alpha,
lw=lw,
aes_kw=aes_kw,
**linefunc_kwargs,
)
for patch in patches:
ax.add_patch(patch)
circos = partial(draw, lines_func=lines.circos)
line = partial(draw, lines_func=lines.line)
arc = partial(draw, lines_func=lines.arc)
hive = partial(draw, lines_func=lines.hive)
matrix = partial(draw, lines_func=lines.matrix)
update_wrapper(circos, draw)
circos.__name__ = "edges.circos"
update_wrapper(line, draw)
line.__name__ = "edges.line"
update_wrapper(arc, draw)
arc.__name__ = "edges.arc"
update_wrapper(hive, draw)
hive.__name__ = "edges.hive"
update_wrapper(matrix, draw)
matrix.__name__ = "edges.matrix"
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/edges.py
| 0.90928 | 0.423875 |
edges.py
|
pypi
|
from functools import partial, update_wrapper
from typing import Dict, Hashable, Union, Optional, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize
from matplotlib.patches import Patch, Rectangle
from dandelion.external.nxviz import encodings, layouts, utils
from dandelion.external.nxviz.geometry import circos_radius, item_theta
from dandelion.external.nxviz.polcart import to_cartesian, to_degrees
def text_alignment(x: float, y: float):
"""
Align text labels based on the x- and y-axis coordinate values.
This function is used for computing the appropriate alignment of the text
label.
For example, if the text is on the "right" side of the plot, we want it to
be left-aligned. If the text is on the "top" side of the plot, we want it
to be bottom-aligned.
:param x, y: (`int` or `float`) x- and y-axis coordinate respectively.
:returns: A 2-tuple of strings, the horizontal and vertical alignments
respectively.
"""
if x == 0:
ha = "center"
elif x > 0:
ha = "left"
else:
ha = "right"
if y == 0:
va = "center"
elif y > 0:
va = "bottom"
else:
va = "top"
return ha, va
def validate_fontdict(fontdict: Dict):
"""Validate `fontdict` keys."""
valid_keys = {"family", "size", "stretch", "style", "variant", "weight"}
assert set(fontdict) <= valid_keys
def circos_group(
G: nx.Graph,
group_by: Hashable,
radius: float = None,
radius_offset: float = 1,
midpoint: bool = True,
fontdict: Dict = {},
ax=None,
):
"""Text annotation of node grouping variable on a circos plot."""
validate_fontdict(fontdict)
nt = utils.node_table(G)
groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()
proportions = groups / groups.sum()
starting_points = proportions.cumsum() - proportions
if midpoint:
starting_points += proportions / 2
angles = starting_points * 360
radians = angles.apply(lambda x: x / 360 * 2 * np.pi)
if ax is None:
ax = plt.gca()
if radius is None:
radius = circos_radius(len(G)) + radius_offset
for label, theta in radians.to_dict().items():
x, y = to_cartesian(radius, theta)
ha, va = text_alignment(x, y)
ax.annotate(label, xy=(x, y), ha=ha, va=va, **fontdict)
def hive_group(
G: nx.Graph,
group_by: Hashable,
offset: float = np.pi / 12,
fontdict: Dict = {},
ax=None,
):
"""Text annotation of hive plot groups."""
validate_fontdict(fontdict)
nt = utils.node_table(G)
groups = sorted(nt[group_by].unique())
if ax is None:
ax = plt.gca()
for grp in groups:
theta = item_theta(groups, grp) + offset
radius = 2 * (8 + len(nt[nt[group_by] == grp]) + 1)
x, y = to_cartesian(radius, theta)
ha, va = text_alignment(x, y)
ax.annotate(grp, xy=(x, y), ha=ha, va=va, **fontdict)
def arc_group(
G: nx.Graph,
group_by: Hashable,
midpoint: bool = True,
y_offset: float = -1,
rotation: float = 45,
ha: str = "right",
va: str = "top",
fontdict: Dict = {},
ax=None,
):
"""Annotate arc group."""
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G)
groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()
proportions = groups / groups.sum()
starting_points = proportions.cumsum() - proportions
if midpoint:
starting_points += proportions / 2
starting_points *= len(G) * 2
for label, starting_point in starting_points.to_dict().items():
x = starting_point
y = y_offset
ax.annotate(
label, xy=(x, y), ha=ha, va=va, rotation=rotation, **fontdict
)
def parallel_group(
G: nx.Graph,
group_by: Hashable,
y_offset: float = -0.3,
rotation: float = 45,
ha: str = "right",
va: str = "top",
fontdict: Dict = {},
ax=None,
):
"""Annotate parallel plot groups."""
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G)
# groups = nt.groupby(group_by).apply(lambda df: len(df)).sort_index()
groups = sorted(nt[group_by].unique())
for i, label in enumerate(groups):
x = i * 4
y = y_offset
ax.annotate(
label, xy=(x, y), ha=ha, va=va, rotation=rotation, **fontdict
)
ax.relim()
def matrix_group(
G: nx.Graph,
group_by: Hashable,
offset: float = -3,
xrotation: float = 0,
yrotation: float = 90,
fontdict: Dict = {},
ax=None,
):
"""Annotate matrix plot groups."""
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G)
group_sizes = nt.groupby(group_by).apply(lambda df: len(df))
proportions = group_sizes / group_sizes.sum()
midpoint = proportions / 2
starting_positions = proportions.cumsum() - proportions
label_positions = (starting_positions + midpoint) * len(G) * 2
label_positions += 1
for label, position in label_positions.to_dict().items():
# Plot the x-axis labels
y = offset
x = position
ax.annotate(
label,
xy=(x, y),
ha="center",
va="center",
rotation=xrotation,
**fontdict,
)
# Plot the y-axis labels
x = offset
y = position
ax.annotate(
label,
xy=(x, y),
ha="center",
va="center",
rotation=yrotation,
**fontdict,
)
def matrix_block(
G: nx.Graph,
group_by: Hashable,
color_by: Hashable = None,
alpha: float = 0.1,
ax=None,
):
"""Annotate group blocks on a matrix plot.
Most useful for highlighting the within- vs between-group edges.
"""
nt = utils.node_table(G)
group_sizes = nt.groupby(group_by).apply(lambda df: len(df)) * 2
starting_positions = group_sizes.cumsum() + 1 - group_sizes
colors = pd.Series(["black"] * len(group_sizes), index=group_sizes.index)
if color_by:
color_data = pd.Series(group_sizes.index, index=group_sizes.index)
colors = encodings.data_color(color_data, color_data)
# Generate patches first
patches = []
for label, position in starting_positions.to_dict().items():
xy = (position, position)
width = height = group_sizes[label]
patch = Rectangle(
xy, width, height, zorder=20, alpha=alpha, facecolor=colors[label]
)
patches.append(patch)
if ax is None:
ax = plt.gca()
# Then add patches in.
for patch in patches:
ax.add_patch(patch)
def colormapping(
data: pd.Series,
legend_kwargs: Dict = {},
ax=None,
palette: Optional[Union[Dict, List]] = None,
):
"""Annotate node color mapping.
If the color attribute is continuous, a colorbar will be added to the matplotlib figure.
Otherwise, a legend will be added.
"""
cmap, data_family = encodings.data_cmap(data, palette)
if ax is None:
ax = plt.gca()
if data_family == "continuous":
norm = Normalize(vmin=data.min(), vmax=data.max())
scalarmap = ScalarMappable(
cmap=cmap,
norm=norm,
)
fig = plt.gcf()
fig.colorbar(scalarmap)
else:
if (palette is not None) and (isinstance(palette, dict)):
labels = pd.Series(list(palette.keys()))
else:
labels = pd.Series(data.unique())
cmap, _ = encodings.data_cmap(labels, palette)
cfunc = encodings.color_func(labels, palette)
colors = labels.apply(cfunc)
patchlist = []
for color, label in zip(colors, labels):
data_key = Patch(color=color, label=label)
patchlist.append(data_key)
kwargs = dict(
loc="best",
ncol=int(len(labels) / 2),
# bbox_to_anchor=(0.5, -0.05),
)
kwargs.update(legend_kwargs)
legend = plt.legend(handles=patchlist, **kwargs)
ax.add_artist(legend)
def node_colormapping(
G: nx.Graph,
color_by: Hashable,
legend_kwargs: Dict = {"loc": "upper right", "bbox_to_anchor": (0.0, 1.0)},
ax=None,
palette: Optional[Union[Dict, List]] = None,
):
"""Annotate node color mapping."""
nt = utils.node_table(G)
data = nt[color_by]
colormapping(data, legend_kwargs, ax, palette)
def edge_colormapping(
G: nx.Graph,
color_by: Hashable,
legend_kwargs: Dict = {"loc": "lower right", "bbox_to_anchor": (0.0, 0.0)},
ax=None,
palette: Optional[Union[Dict, List]] = None,
):
"""Annotate edge color mapping."""
if ax is None:
ax = plt.gca()
et = utils.edge_table(G)
data = et[color_by]
colormapping(data, legend_kwargs, ax, palette)
def node_labels(G, layout_func, group_by, sort_by, fontdict={}, ax=None):
"""Annotate node labels."""
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G)
pos = layout_func(nt, group_by, sort_by)
for node in G.nodes():
ax.annotate(
text=node, xy=pos[node], ha="center", va="center", **fontdict
)
def circos_labels(
G: nx.Graph,
group_by: Hashable = None,
sort_by: Hashable = None,
layout: str = "node_center",
radius: float = None,
radius_offset: float = 1,
fontdict: Dict = {},
ax=None,
):
"""Annotate node labels for circos plot."""
assert layout in ("node_center", "standard", "rotate", "numbers")
if layout == "node_center":
return node_labels(G, layouts.circos, group_by, sort_by, fontdict)
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G, group_by, sort_by)
nodes = list(nt.index)
if radius is None:
radius = circos_radius(len(nodes))
if layout == "numbers":
radius_adjustment = radius / (radius + radius_offset)
else:
radius_adjustment = 1.02
radius += radius_offset
for i, (node, data) in enumerate(nt.iterrows()):
theta = item_theta(nodes, node)
x, y = to_cartesian(r=radius * radius_adjustment, theta=theta)
ha, va = text_alignment(x, y)
if layout == "numbers":
tx, _ = to_cartesian(r=radius, theta=theta)
tx *= 1 - np.log(np.cos(theta) * utils.nonzero_sign(np.cos(theta)))
tx += utils.nonzero_sign(x)
ty_numerator = (
2
* radius
* (
theta
% (utils.nonzero_sign(y) * utils.nonzero_sign(x) * np.pi)
)
)
ty_denominator = utils.nonzero_sign(x) * np.pi
ty = ty_numerator / ty_denominator
ax.annotate(
text="{} - {}".format(*((i, node) if (x > 0) else (node, i))),
xy=(tx, ty),
ha=ha,
va=va,
**fontdict,
)
ax.annotate(text=i, xy=(x, y), ha="center", va="center")
elif layout == "rotate":
theta_deg = to_degrees(theta)
if -90 <= theta_deg <= 90:
rot = theta_deg
else:
rot = theta_deg - 180
ax.annotate(
text=node,
xy=(x, y),
ha=ha,
va="center",
rotation=rot,
rotation_mode="anchor",
**fontdict,
)
# Standard layout
else:
ax.annotate(text=node, xy=(x, y), ha=ha, va=va, **fontdict)
def arc_labels(
G: nx.Graph,
group_by: Hashable = None,
sort_by: Hashable = None,
layout: str = "node_center",
y_offset: float = -1,
ha: str = "right",
va: str = "top",
rotation: float = 45,
fontdict: Dict = {},
ax=None,
):
"""Annotate node labels for arc plot."""
assert layout in ("node_center", "standard")
if layout == "node_center":
return node_labels(G, layouts.arc, group_by, sort_by, fontdict)
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G, group_by, sort_by)
for x, (node, data) in enumerate(nt.iterrows()):
ax.annotate(
node,
xy=(x * 2, y_offset),
ha=ha,
va=va,
rotation=rotation,
**fontdict,
)
def matrix_labels(
G: nx.Graph,
group_by: Hashable = None,
sort_by: Hashable = None,
layout: str = "node_center",
offset: float = -1.5,
x_ha: str = "right",
x_va: str = "top",
y_ha: str = "right",
y_va: str = "center",
x_rotation: float = 45,
y_rotation: float = 0,
fontdict: Dict = {},
ax=None,
):
"""Annotate node labels for matrix plot."""
assert layout in ("node_center", "standard")
validate_fontdict(fontdict)
if ax is None:
ax = plt.gca()
nt = utils.node_table(G, group_by, sort_by)
if layout == "node_center":
x_ha = "center"
x_va = "center"
y_ha = "center"
y_va = "center"
offset = 0
x_rotation = 0
y_rotation = 0
for i, (node, data) in enumerate(nt.iterrows()):
position = (i + 1) * 2
# Plot the x-axis labels
ax.annotate(
node,
xy=(position, offset),
ha=x_ha,
va=x_va,
rotation=x_rotation,
**fontdict,
)
# Plot the y-axis labels
ax.annotate(
node,
xy=(offset, position),
ha=y_ha,
va=y_va,
rotation=y_rotation,
**fontdict,
)
parallel_labels = partial(
node_labels, layout_func=layouts.parallel, sort_by=None
)
update_wrapper(parallel_labels, node_labels)
parallel_labels.__name__ = "annotate.parallel_labels"
hive_labels = partial(node_labels, layout_func=layouts.hive, sort_by=None)
update_wrapper(hive_labels, node_labels)
hive_labels.__name__ = "annotate.hive_labels"
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/annotate.py
| 0.953079 | 0.652823 |
annotate.py
|
pypi
|
from typing import Dict, Hashable
import numpy as np
import pandas as pd
from dandelion.external.nxviz.geometry import circos_radius, item_theta
from dandelion.external.nxviz.polcart import to_cartesian
from dandelion.external.nxviz.utils import group_and_sort
def parallel(
nt: pd.DataFrame, group_by: Hashable, sort_by: Hashable = None
) -> Dict[Hashable, np.ndarray]:
"""Parallel coordinates node layout."""
pos = dict()
for x, (grp, data) in enumerate(nt.groupby(group_by)):
if sort_by is not None:
data = data.sort_values(sort_by)
for y, (node, d) in enumerate(data.iterrows()):
pos[node] = np.array([x * 4, y])
return pos
def circos(
nt: pd.DataFrame,
group_by: Hashable = None,
sort_by: Hashable = None,
radius: float = None,
) -> Dict[Hashable, np.ndarray]:
"""Circos plot node layout."""
pos = dict()
nt = group_and_sort(nt, group_by, sort_by)
nodes = list(nt.index)
if radius is None:
radius = circos_radius(len(nodes))
if group_by:
for grp, df in nt.groupby(group_by):
for node, data in df.iterrows():
x, y = to_cartesian(r=radius, theta=item_theta(nodes, node))
pos[node] = np.array([x, y])
else:
for node, data in nt.iterrows():
x, y = to_cartesian(r=radius, theta=item_theta(nodes, node))
pos[node] = np.array([x, y])
return pos
def hive(
nt: pd.DataFrame,
group_by,
sort_by: Hashable = None,
inner_radius: float = 8,
rotation: float = 0,
):
"""Hive plot node layout.
## Parameters
- `inner_radius`: The inner
"""
nt = group_and_sort(nt, group_by=group_by, sort_by=sort_by)
groups = sorted(nt[group_by].unique())
if len(groups) > 3:
raise ValueError(
f"group_by {group_by} is associated with more than 3 groups. "
f"The groups are {groups}. "
"Hive plots can only handle at most 3 groups at a time."
)
pos = dict()
for grp, df in nt.groupby(group_by):
for i, (node, data) in enumerate(df.iterrows()):
radius = inner_radius + i
theta = item_theta(groups, grp) + rotation
x, y = to_cartesian(r=radius * 2, theta=theta)
pos[node] = np.array([x, y])
return pos
def arc(nt, group_by: Hashable = None, sort_by: Hashable = None):
"""Arc plot node layout."""
nt = group_and_sort(nt, group_by=group_by, sort_by=sort_by)
pos = dict()
for x, (node, data) in enumerate(nt.iterrows()):
pos[node] = np.array([x * 2, 0])
return pos
def geo(
nt, group_by=None, sort_by=None, longitude="longitude", latitude="latitude"
):
"""Geographical node layout."""
pos = dict()
for node, data in nt.iterrows():
pos[node] = data[[longitude, latitude]]
return pos
def matrix(nt, group_by: Hashable = None, sort_by: Hashable = None, axis="x"):
"""Matrix plot layout."""
# Nodes should be grouped and sorted before we begin assigning coordinates.
nt = group_and_sort(node_table=nt, group_by=group_by, sort_by=sort_by)
# We are eventually going to return this pos dictionary.
pos = dict()
# Loop over each of the rows, and assign x, y coordinates in order of them being grouped and sorted.
for i, (node, data) in enumerate(nt.iterrows()):
x = (i + 1) * 2
y = 0
if axis == "y":
x, y = y, x
pos[node] = np.array([x, y])
return pos
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/layouts.py
| 0.893646 | 0.598459 |
layouts.py
|
pypi
|
from itertools import product
from typing import Dict, Iterable, List
import numpy as np
import pandas as pd
from matplotlib.patches import Arc, Circle, Patch, Path, PathPatch
from dandelion.external.nxviz.geometry import correct_hive_angles
from dandelion.external.nxviz.polcart import to_cartesian, to_polar, to_radians
def circos(
et: pd.DataFrame,
pos: Dict,
edge_color: Iterable,
alpha: Iterable,
lw: Iterable,
aes_kw: Dict,
) -> List[Patch]:
"""Circos plot line drawing."""
patches = []
for r, d in et.iterrows():
verts = [pos[d["source"]], (0, 0), pos[d["target"]]]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
path = Path(verts, codes)
patch = PathPatch(
path, edgecolor=edge_color[r], alpha=alpha[r], lw=lw[r], **aes_kw
)
patches.append(patch)
return patches
def line(
et: pd.DataFrame,
pos: Dict,
edge_color: Iterable,
alpha: Iterable,
lw: Iterable,
aes_kw: Dict,
):
"""Straight line drawing function."""
patches = []
for r, d in et.iterrows():
start = d["source"]
end = d["target"]
verts = [pos[start], pos[end]]
codes = [Path.MOVETO, Path.LINETO]
path = Path(verts, codes)
patch = PathPatch(
path, edgecolor=edge_color[r], alpha=alpha[r], lw=lw[r], **aes_kw
)
patches.append(patch)
return patches
def arc(
et: pd.DataFrame,
pos: Dict,
edge_color: Iterable,
alpha: Iterable,
lw: Iterable,
aes_kw: Dict,
):
"""Arc plot edge drawing function."""
patches = []
for r, d in et.iterrows():
start = d["source"]
end = d["target"]
start_x, start_y = pos[start]
end_x, end_y = pos[end]
middle_x = np.mean([start_x, end_x])
middle_y = np.mean([start_y, end_y])
width = abs(end_x - start_x)
height = width
r1, theta1 = to_polar(start_x - middle_x, start_y - middle_y)
r2, theta2 = to_polar(end_x - middle_x, end_y - middle_y)
theta1 = np.rad2deg(theta1)
theta2 = np.rad2deg(theta2)
theta1, theta2 = min([theta1, theta2]), max([theta1, theta2])
patch = Arc(
xy=(middle_x, middle_y),
width=width,
height=height,
theta1=theta1,
theta2=theta2,
edgecolor=edge_color[r],
alpha=alpha[r],
lw=lw[r],
**aes_kw,
)
patches.append(patch)
return patches
def hive(
et: pd.DataFrame,
pos: Dict,
pos_cloned: Dict,
edge_color: Iterable,
alpha: Iterable,
lw: Iterable,
aes_kw: Dict,
curves: bool = True,
):
"""Hive plot line drawing function."""
rad = pd.Series(pos).apply(lambda val: to_polar(*val)).to_dict()
if pos_cloned is None:
pos_cloned = pos
rad_cloned = (
pd.Series(pos_cloned).apply(lambda val: to_polar(*val)).to_dict()
)
patches = []
for r, d in et.iterrows():
start = d["source"]
end = d["target"]
start_radius, start_theta = rad[start]
end_radius, end_theta = rad[end]
_, start_theta_cloned = rad_cloned[start]
_, end_theta_cloned = rad_cloned[end]
# Find the pair of start and end thetas that give the smallest acute angle
smallest_pair = None
smallest_nonzero_angle = np.inf
starts = [start_theta, start_theta_cloned]
ends = [end_theta, end_theta_cloned]
for start, end in product(starts, ends):
start, end = correct_hive_angles(start, end)
if not np.allclose(end - start, 0):
angle = to_radians(abs(min([end - start, start - end])))
if angle < smallest_nonzero_angle:
smallest_nonzero_angle = abs(angle)
smallest_pair = ((start_radius, start), (end_radius, end))
if smallest_pair is None:
continue
(start_radius, start_theta), (end_radius, end_theta) = smallest_pair
if np.allclose(end_theta, 0):
end_theta = 2 * np.pi
startx, starty = to_cartesian(start_radius, start_theta)
endx, endy = to_cartesian(end_radius, end_theta)
middle_theta = np.mean([start_theta, end_theta])
middlex1, middley1 = to_cartesian(start_radius, middle_theta)
middlex2, middley2 = to_cartesian(end_radius, middle_theta)
endx, endy = to_cartesian(end_radius, end_theta)
verts = [(startx, starty), (endx, endy)]
codes = [Path.MOVETO, Path.LINETO]
if curves:
verts = [
(startx, starty),
(middlex1, middley1),
(middlex2, middley2),
(endx, endy),
]
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
path = Path(verts, codes)
patch = PathPatch(
path, lw=lw[r], alpha=alpha[r], edgecolor=edge_color[r], **aes_kw
)
patches.append(patch)
return patches
def matrix(
et,
pos,
pos_cloned,
edge_color: Iterable,
alpha: Iterable,
lw: Iterable,
aes_kw: Dict,
):
"""Matrix plot edge drawing function."""
patches = []
for r, d in et.iterrows():
start = d["source"]
end = d["target"]
x_start, y_start = pos_cloned[start]
x_end, y_end = pos[end]
x, y = (max(x_start, y_start), max(x_end, y_end))
kw = {
"fc": edge_color[r],
"alpha": alpha[r],
"radius": lw[r],
"zorder": 1,
}
kw.update(aes_kw)
patch = Circle(xy=(x, y), **kw)
patches.append(patch)
return patches
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/lines.py
| 0.832237 | 0.558929 |
lines.py
|
pypi
|
from copy import deepcopy
from functools import partial, update_wrapper
from typing import Callable, Dict, Hashable, Optional, Tuple, Union, List
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from matplotlib.patches import Circle
from dandelion.external.nxviz import encodings, layouts
from dandelion.external.nxviz.utils import node_table
from dandelion.external.nxviz.plots import rescale, rescale_arc, rescale_square
def node_colors(
nt: pd.DataFrame,
color_by: Hashable,
palette: Optional[Union[Dict, List]] = None,
):
"""Return pandas Series of node colors."""
if color_by:
return encodings.data_color(nt[color_by], nt[color_by], palette)
return pd.Series(["blue"] * len(nt), name="color_by", index=nt.index)
def transparency(
nt: pd.DataFrame, alpha_by: Hashable, alpha_bounds: Optional[Tuple] = None
):
"""Return pandas Series of transparency (alpha) values.
Transparency must always be normalized to (0, 1)."""
if alpha_by is not None:
ref_data = nt[alpha_by]
if isinstance(alpha_bounds, tuple):
ref_data = pd.Series(alpha_bounds)
return encodings.data_transparency(nt[alpha_by], ref_data)
return pd.Series([1.0] * len(nt), name="transparency", index=nt.index)
def node_size(nt: pd.DataFrame, size_by: Hashable):
"""Return pandas Series of node sizes."""
if size_by:
return encodings.data_size(nt[size_by], nt[size_by])
return pd.Series([1.0] * len(nt), name="size", index=nt.index)
def node_glyphs(nt, pos, node_color, alpha, size, **encodings_kwargs):
"""Draw circos glyphs to the matplotlib axes object."""
patches = dict()
for r, d in nt.iterrows():
kw = {
"fc": node_color[r],
"alpha": alpha[r],
"radius": size[r],
"zorder": 2,
}
kw.update(encodings_kwargs)
c = Circle(xy=pos[r], **kw)
patches[r] = c
return pd.Series(patches)
def draw(
G: nx.Graph,
layout_func: Callable,
group_by: Hashable,
sort_by: Hashable,
color_by: Hashable = None,
alpha_by: Hashable = None,
size_by: Hashable = None,
layout_kwargs: Dict = {},
encodings_kwargs: Dict = {},
rescale_func=rescale,
ax=None,
palette: Optional[Union[Dict, List]] = None,
):
"""Draw nodes to matplotlib axes.
## Parameters
- `G`: The graph to plot.
- `layout_func`: One of the node layout functions from `nxviz.layout`.
- `group_by`: Categorical attribute key to group nodes by.
- `sort_by`: Quantitative or ordinal attribute key to sort nodes.
- `color_by`: Node attribute key to color nodes by.
- `alpha_by`: Quantitative node attribute key to set transparency.
- `size_by`: Quantitative node attribute key to set node size.
- `layout_kwargs`: Keyword arguments to pass
to the appropriate layout function.
- `encodings_kwargs`: A dictionary of kwargs
to determine the visual properties of the node.
- `palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
Special keyword arguments for `encodings_kwargs` include:
- `size_scale`: A scaling factor for all node radii.
Equivalent to multiplying all node radii by this number.
- `alpha_scale`: A scaling factor for all nodes' transparencies.
Equivalent to multiplying all alphas by this number.
The default transparency is 1.0.
If you need to make the nodes transparent,
use a value smaller than one.
- `alpha_bounds`: The bounds for transparency.
Should be a tuple of `(lower, upper)` numbers.
This keyword argument lets us manually set the bounds
that we wish to have for 0 opacity (i.e. transparent)
to 1.0 opacity (i.e. opaque.)
Everything else passed in here will be passed
to the matplotlib Patch constructor;
see `nxviz.lines` for more information.
"""
if ax is None:
ax = plt.gca()
nt = node_table(G)
pos = layout_func(nt, group_by, sort_by, **layout_kwargs)
node_color = node_colors(nt, color_by, palette)
encodings_kwargs = deepcopy(encodings_kwargs)
alpha_bounds = encodings_kwargs.pop("alpha_bounds", None)
alpha = transparency(nt, alpha_by, alpha_bounds) * encodings_kwargs.pop(
"alpha_scale", 1
)
size = node_size(nt, size_by) * encodings_kwargs.pop("size_scale", 1)
patches = node_glyphs(nt, pos, node_color, alpha, size, **encodings_kwargs)
for patch in patches:
ax.add_patch(patch)
rescale_func(G)
return pos
hive = partial(
draw,
layout_func=layouts.hive,
sort_by=None,
layout_kwargs={"inner_radius": 8},
encodings_kwargs={"size_scale": 0.5},
rescale_func=rescale_square,
)
update_wrapper(hive, draw)
hive.__name__ = "nodes.hive"
circos = partial(
draw,
layout_func=layouts.circos,
group_by=None,
sort_by=None,
)
update_wrapper(circos, draw)
circos.__name__ = "nodes.circos"
arc = partial(
draw,
layout_func=layouts.arc,
group_by=None,
sort_by=None,
rescale_func=rescale_arc,
)
update_wrapper(arc, draw)
arc.__name__ = "nodes.arc"
parallel = partial(
draw,
layout_func=layouts.parallel,
sort_by=None,
encodings_kwargs={"size_scale": 0.5},
)
update_wrapper(parallel, draw)
parallel.__name__ = "nodes.parallel"
matrix = partial(draw, layout_func=layouts.matrix, group_by=None, sort_by=None)
update_wrapper(matrix, draw)
matrix.__name__ = "nodes.matrix"
geo = partial(
draw,
layout_func=layouts.geo,
group_by=None,
sort_by=None,
encodings_kwargs={"size_scale": 0.0015},
)
update_wrapper(geo, draw)
geo.__name__ = "nodes.geo"
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/nodes.py
| 0.919715 | 0.446434 |
nodes.py
|
pypi
|
import warnings
from functools import partial, update_wrapper
from itertools import combinations
from typing import Callable, Hashable
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from dandelion.external.nxviz import annotate, api, utils
### Iterators to generate subgraphs.
def hive_triplets(G: nx.Graph, group_by: Hashable):
"""Yield subgraphs containing triplets of node categories.
Intended for hive plotting.
"""
nt = utils.node_table(G)
groups = sorted(nt[group_by].unique())
if len(groups) > 6:
warnings.warn(
"You have more than 6 groups of nodes, "
"which means you might end up having a lot of subplots made. "
"User beware! "
"We recommend using hive plots only when you have 6 or fewer "
"groups of nodes."
)
triplets = combinations(groups, 3)
for groups in triplets:
wanted_nodes = (n for n in G.nodes() if G.nodes[n][group_by] in groups)
yield G.subgraph(wanted_nodes), groups
def edge_group(G: nx.Graph, group_by: Hashable):
"""Yield graphs containing only certain categories of edges."""
et = utils.edge_table(G)
groups = sorted(et[group_by].unique())
for group in groups:
G_sub = G.copy()
G_sub.remove_edges_from(G_sub.edges())
for u, v, d in G.edges(data=True):
if d[group_by] == group:
G_sub.add_edge(u, v, **d)
yield G_sub, group
def node_group_edges(G: nx.Graph, group_by: Hashable):
"""Return a subgraph containing edges connected to a particular category of nodes."""
nt = utils.node_table(G)
groups = sorted(nt[group_by].unique())
for group in groups:
G_sub = G.copy()
G_sub.remove_edges_from(G_sub.edges())
wanted_nodes = (n for n in G.nodes() if G.nodes[n][group_by] == group)
for node in wanted_nodes:
for u, v, d in G.edges(node, data=True):
G_sub.add_edge(u, v, **d)
yield G_sub, group
def n_rows_cols(groups):
"""Return squarest n_rows and n_cols combination."""
nrows = ncols = int(np.ceil(np.sqrt(len(groups))))
return nrows, ncols
def null(*args, **kwargs):
"""A passthrough function that does nothing."""
pass
grouping_annotations = {
"api.circos": annotate.circos_group,
"api.arc": annotate.arc_group,
"api.matrix": annotate.matrix_block,
"api.hive": annotate.hive_group,
"api.parallel": annotate.parallel_group,
}
node_color_annotations = {
"api.hive": null,
"api.circos": annotate.node_colormapping,
"api.matrix": annotate.node_colormapping,
"api.arc": annotate.node_colormapping,
"api.parallel": annotate.node_colormapping,
}
def facet_plot(
G: nx.Graph,
plotting_func: Callable,
node_facet_func: Callable,
node_group_by: Hashable,
node_sort_by: Hashable,
node_color_by: Hashable,
edge_facet_func: Callable,
edge_group_by: Hashable,
edge_color_by: Hashable,
):
"""Generic facet plotting function.
All faceting funcs should take G and group_by and yield graphs.
Underneath the hood, how they work shouldn't be of concern.
Edge facet func takes priority if both node and edge facet func are specified.
Just keep this in mind.
## Parameters
- `G`: The graph to facet.
- `plotting_func`: One of the high level API functions to use for plotting.
- `node_facet_func`: A function to facet the nodes by.
- `node_group_by`: Node metadata attribute to group nodes by.
- `node_sort_by`: Node metadata attribute to sort nodes by.
- `node_color_by`: Node metadata attribute to color nodes by.
- `edge_facet_func`: A function to facet the edges by.
- `edge_group_by`: Edge metadata attribute to group edges by.
- `edge_color_by`: Edge metadata attribute to color edges by.
"""
group_by = node_group_by
facet_func = node_facet_func
if edge_facet_func and edge_group_by:
group_by = edge_group_by
facet_func = edge_facet_func
graphs, groups = zip(*facet_func(G, group_by))
nrows, ncols = n_rows_cols(groups)
fig, axes = plt.subplots(
figsize=(3 * nrows, 3 * ncols), nrows=nrows, ncols=ncols
)
axes = list(axes.flatten())
for G_sub, group, ax in zip(graphs, groups, axes):
plt.sca(ax)
plotting_func(
G_sub,
group_by=node_group_by,
sort_by=node_sort_by,
node_color_by=node_color_by,
edge_color_by=edge_color_by,
)
if node_group_by:
grouping_annotations.get(plotting_func.__name__)(
G_sub, group_by=node_group_by
)
if node_color_by:
# Annotate only on the left most axes
idx = axes.index(ax)
if not idx % nrows:
node_color_annotations[plotting_func.__name__](G, node_color_by)
ax.set_title(f"{group_by} = {group}")
last_idx = axes.index(ax)
for ax in axes[last_idx + 1 :]:
fig.delaxes(ax)
plt.tight_layout()
#### Function begins below. This belongs to the "edge faceting" category.
hive_panel = partial(
facet_plot,
plotting_func=api.hive,
node_facet_func=hive_triplets,
node_sort_by=None,
node_color_by=None,
edge_facet_func=None,
edge_group_by=None,
edge_color_by=None,
)
update_wrapper(hive_panel, facet_plot)
hive_panel.__name__ = "facet.hive_panel"
matrix_panel = partial(
facet_plot,
plotting_func=api.matrix,
node_facet_func=None,
node_group_by=None,
node_sort_by=None,
node_color_by=None,
edge_facet_func=edge_group,
edge_color_by=None,
)
update_wrapper(matrix_panel, facet_plot)
matrix_panel.__name__ = "facet.matrix_panel"
arc_panel = partial(
facet_plot,
plotting_func=api.arc,
node_facet_func=None,
node_group_by=None,
node_sort_by=None,
node_color_by=None,
edge_facet_func=edge_group,
edge_color_by=None,
)
update_wrapper(arc_panel, facet_plot)
arc_panel.__name__ = "facet.arc_panel"
circos_panel = partial(
facet_plot,
plotting_func=api.circos,
node_facet_func=None,
node_group_by=None,
node_sort_by=None,
node_color_by=None,
edge_facet_func=edge_group,
edge_color_by=None,
)
update_wrapper(circos_panel, facet_plot)
circos_panel.__name__ = "facet.circos_panel"
parallel_panel = partial(
facet_plot,
plotting_func=api.parallel,
node_facet_func=None,
node_group_by=None,
node_sort_by=None,
node_color_by=None,
edge_facet_func=edge_group,
edge_color_by=None,
)
update_wrapper(parallel_panel, facet_plot)
parallel_panel.__name__ = "facet.parallel_panel"
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/facet.py
| 0.862757 | 0.518973 |
facet.py
|
pypi
|
from collections import Counter
import pandas as pd
import warnings
from typing import Iterable
def is_data_homogenous(data_container: Iterable):
"""
Checks that all of the data in the container are of the same Python data
type. This function is called in every other function below, and as such
need not necessarily be called.
:param data_container: A generic container of data points.
"""
data_types = set([type(i) for i in data_container])
return len(data_types) == 1
def infer_data_type(data_container: Iterable):
"""
For a given container of data, infer the type of data as one of
continuous, categorical, or ordinal.
For now, it is a one-to-one mapping as such:
- str: categorical
- int: ordinal
- float: continuous
There may be better ways that are not currently implemented below. For
example, with a list of numbers, we can check whether the number of unique
entries is less than or equal to 12, but has over 10000+ entries. This
would be a good candidate for floats being categorical.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
warnings.warn(
"`infer_data_type` is deprecated! "
"Please use `infer_data_family` instead!"
)
# Defensive programming checks.
# 0. Ensure that we are dealing with lists or tuples, and nothing else.
assert isinstance(data_container, list) or isinstance(
data_container, tuple
), "data_container should be a list or tuple."
# 1. Don't want to deal with only single values.
assert (
len(set(data_container)) > 1
), "There should be more than one value in the data container."
# 2. Don't want to deal with mixed data.
assert is_data_homogenous(
data_container
), "Data are not of a homogenous type!"
# Once we check that the data type of the container is homogenous, we only
# need to check the first element in the data container for its type.
datum = data_container[0]
# Return statements below
# treat binomial data as categorical
# TODO: make tests for this.
if len(set(data_container)) == 2:
return "categorical"
elif isinstance(datum, str):
return "categorical"
elif isinstance(datum, int):
return "ordinal"
elif isinstance(datum, float):
return "continuous"
else:
raise ValueError("Not possible to tell what the data type is.")
def infer_data_family(data: pd.Series):
"""Infer data family from a column of data.
The three families are "continuous", "ordinal", and "categorical".
The rules:
- dtype = float:
- min < 0 and max > 0: divergent
- otherwise: continuous
- dtype = integer:
- greater than 12 distinct integers: continuous
- otherwise: ordinal
- dtype = object: categorical
"""
if data.dtype == float:
if data.min() < 0 and data.max() > 0:
return "divergent"
return "continuous"
if data.dtype == int:
if len(set(data)) > 9:
return "continuous"
return "ordinal"
return "categorical"
def is_data_diverging(data_container: Iterable):
"""
We want to use this to check whether the data are diverging or not.
This is a simple check, can be made much more sophisticated.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
assert infer_data_type(data_container) in [
"ordinal",
"continuous",
], "Data type should be ordinal or continuous"
# Check whether the data contains negative and positive values.
has_negative = False
has_positive = False
for i in data_container:
if i < 0:
has_negative = True
elif i > 0:
has_positive = True
if has_negative and has_positive:
return True
else:
return False
def is_groupable(data_container: Iterable):
"""
Returns whether the data container is a "groupable" container or not.
By "groupable", we mean it is a 'categorical' or 'ordinal' variable.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
is_groupable = False
if infer_data_type(data_container) in ["categorical", "ordinal"]:
is_groupable = True
return is_groupable
def num_discrete_groups(data_container: Iterable):
"""
Returns the number of discrete groups present in a data container.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
return len(set(data_container))
def items_in_groups(data_container: Iterable):
"""
Returns discrete groups present in a data container and the number items
per group.
:param data_container: A generic container of data points.
:type data_container: `iterable`
"""
return Counter(data_container)
def node_table(G, group_by=None, sort_by=None):
"""Return the node table of a graph G.
## Parameters
- `G`: A NetworkX graph.
- `group_by`: A key in the node attribute dictionary.
- `sort_by`: A key in the node attribute dictionary.
## Returns
A pandas DataFrame, such that the index is the node
and the columns are node attributes.
"""
node_table = []
node_index = []
for n, d in G.nodes(data=True):
node_table.append(d)
node_index.append(n)
df = pd.DataFrame(data=node_table, index=node_index)
df = group_and_sort(df, group_by, sort_by)
return df
import networkx as nx
def edge_table(G) -> pd.DataFrame:
"""Return the edge table of a graph.
The nodes involved in the edge are keyed
under the `source` and `target` keys.
This is a requirement for use with the hammer_bundle module
in datashader's bundler.
The rest of their node attributes are returned as columns.
"""
data = []
for u, v, d in G.edges(data=True):
row = dict()
row.update(d)
row["source"] = u
row["target"] = v
data.append(row)
if not G.is_directed():
u, v = v, u
row = dict()
row.update(d)
row["source"] = u
row["target"] = v
data.append(row)
return pd.DataFrame(data)
from typing import Hashable, Iterable
def group_and_sort(
node_table: pd.DataFrame,
group_by: Hashable = None,
sort_by: Hashable = None,
) -> pd.DataFrame:
"""Group and sort a node table."""
sort_criteria = []
if group_by:
sort_criteria.append(group_by)
if sort_by:
sort_criteria.append(sort_by)
if sort_criteria:
node_table = node_table.sort_values(sort_criteria)
return node_table
def nonzero_sign(xy):
"""
A sign function that won't return 0
"""
return -1 if xy < 0 else 1
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/utils.py
| 0.860896 | 0.705214 |
utils.py
|
pypi
|
from functools import partial
from typing import Callable, Tuple, Optional, Union, Dict, List
from itertools import cycle
import numpy as np
import pandas as pd
from matplotlib.cm import get_cmap
from matplotlib.colors import ListedColormap, Normalize, BoundaryNorm
from palettable.colorbrewer import qualitative, sequential
from dandelion.external.nxviz.utils import infer_data_family
def data_cmap(
data: pd.Series, palette: Optional[Union[Dict, List]] = None
) -> Tuple:
"""Return a colormap for data attribute.
Returns both the cmap and data family.
"""
data_family = infer_data_family(data)
if data_family == "categorical":
if palette is None:
base_cmap = qualitative
num_categories = max(len(data.unique()), 3)
if num_categories > 12:
raise ValueError(
f"It appears you have >12 categories for the key {data.name}. "
"Because it's difficult to discern >12 categories, "
"and because colorbrewer doesn't have a qualitative colormap "
"with greater than 12 categories, "
"nxviz does not support plotting with >12 categories. "
"Please provide your own palette."
)
cmap = ListedColormap(
base_cmap.__dict__[f"Set3_{num_categories}"].mpl_colors
)
else:
cmap = palette
elif data_family == "ordinal":
cmap = get_cmap("viridis")
elif data_family == "continuous":
cmap = get_cmap("viridis")
elif data_family == "divergent":
cmap = get_cmap("bwr")
return cmap, data_family
def continuous_color_func(val, cmap, data: pd.Series):
"""Return RGBA of a value.
## Parameters
- `val`: Value to convert to RGBA
- `cmap`: A Matplotlib cmap
- `data`: Pandas series.
"""
norm = Normalize(vmin=data.min(), vmax=data.max())
return cmap(norm(val))
def divergent_color_func(val, cmap, data: pd.Series):
"""Return RGBA for divergent color func.
Divergent colormaps are best made symmetric.
Hence, vmin and vmax are set appropriately here.
"""
vmin = min(data.min(), -data.max())
vmax = max(data.max(), -data.min())
norm = Normalize(vmin=vmin, vmax=vmax)
return cmap(norm(val))
def discrete_color_func(
val, cmap, data: pd.Series, palette: Optional[Union[Dict, List]] = None
):
"""Return RGB corresponding to a value.
## Parameters
- `val`: Value to convert to RGBA
- `cmap`: A Matplotlib cmap
- `data`: Pandas series.
"""
if palette is not None:
if isinstance(palette, dict):
return palette[val]
else:
pal = dict(zip(data.unique(), cycle(palette)))
return pal[val]
else:
colors = sorted(data.unique())
return cmap.colors[colors.index(val)]
def ordinal_color_func(val, cmap, data):
"""Return RGB corresponding to an ordinal value.
## Parameters
- `val`: Value to convert to RGBA
- `cmap`: A Matplotlib cmap
- `data`: Pandas series.
"""
bounds = np.arange(data.min(), data.max())
norm = BoundaryNorm(bounds, cmap.N)
return cmap(norm(val))
def color_func(
data: pd.Series, palette: Optional[Union[Dict, List]] = None
) -> Callable:
"""Return a color function that takes in a value and returns an RGB(A) tuple.
This will do the mapping to the continuous and discrete color functions.
"""
cmap, data_family = data_cmap(data, palette)
func = discrete_color_func
if data_family in ["continuous", "ordinal"]:
func = continuous_color_func
return partial(func, cmap=cmap, data=data)
else:
return partial(func, cmap=cmap, data=data, palette=palette)
def data_color(
data: pd.Series,
ref_data: pd.Series,
palette: Optional[Union[Dict, List]] = None,
) -> pd.Series:
"""Return iterable of colors for a given data.
`cfunc` gives users the ability to customize the color mapping of a node.
The only thing that we expect is that it takes in a value
and returns a matplotlib-compatible RGB(A) tuple or hexadecimal value.
The function takes in `ref_data`
which is used to determine important colormap values (such as boundaries).
That colormap is then applied to the actual `data`.
## Parameters
- `data`: The data on which to map colors.
- `ref_data`: The data on which the colormap is constructed.
- `palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
"""
cfunc = color_func(ref_data, palette)
return data.apply(cfunc)
def data_transparency(data: pd.Series, ref_data: pd.Series) -> pd.Series:
"""Transparency based on value."""
norm = Normalize(vmin=ref_data.min(), vmax=ref_data.max())
return data.apply(norm)
def data_size(data: pd.Series, ref_data: pd.Series) -> pd.Series:
"""Square root node size."""
return data.apply(np.sqrt)
def data_linewidth(data: pd.Series, ref_data: pd.Series) -> pd.Series:
"""Line width scales linearly with property (by default)."""
return data
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/encodings.py
| 0.949153 | 0.586967 |
encodings.py
|
pypi
|
from functools import partial, update_wrapper
from typing import Callable, Dict, Hashable, Optional, Union, List
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from dandelion.external.nxviz import edges, nodes
from dandelion.external.nxviz.plots import aspect_equal, despine
# This docstring applies to all plotting functions in this module.
# docstring =
def base(
G: nx.Graph,
node_layout_func: Callable,
edge_line_func: Callable,
group_by: Hashable,
sort_by: Hashable,
node_color_by: Hashable = None,
node_alpha_by: Hashable = None,
node_size_by: Hashable = None,
node_enc_kwargs: Dict = {},
edge_color_by: Hashable = None,
edge_lw_by: Hashable = None,
edge_alpha_by: Hashable = None,
edge_enc_kwargs: Dict = {},
node_layout_kwargs: Dict = {},
edge_line_kwargs: Dict = {},
node_palette: Optional[Union[Dict, List]] = None,
edge_palette: Optional[Union[Dict, List]] = None,
):
"""High-level graph plotting function.
## Parameters
### Basic
- `G`: A NetworkX Graph.
### Nodes
- `group_by`: Node metadata attribute key to group nodes.
- `sort_by`: Node metadata attribute key to sort nodes.
- `node_color_by`: Node metadata attribute key to color nodes.
- `node_alpha_by`: Node metadata attribute key to set node transparency.
- `node_size_by`: Node metadata attribute key to set node size.
- `node_enc_kwargs`: Keyword arguments to set node visual encodings.
TODO: Elaborate on what these arguments are.
- `node_palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
### Edges
- `edge_color_by`: Edge metdata attribute key to color edges.
There are two special value for this parameter
when using directed graphs:
"source_node_color" and "target_node_color".
If these values are set, then `node_color_by` also needs to be set.
- `edge_lw_by`: Edge metdata attribute key to set edge line width.
- `edge_alpha_by`: Edge metdata attribute key to set edge transparency.
- `edge_enc_kwargs`: Keyword arguments to set edge visual encodings.
- `edge_palette`: Same as node_palette but for edges.
TODO: Elaborate on what these arguments are.
"""
pos = node_layout_func(
G,
group_by=group_by,
sort_by=sort_by,
color_by=node_color_by,
size_by=node_size_by,
alpha_by=node_alpha_by,
encodings_kwargs=node_enc_kwargs,
layout_kwargs=node_layout_kwargs,
palette=node_palette,
)
edge_line_func(
G,
pos,
color_by=edge_color_by,
node_color_by=node_color_by,
lw_by=edge_lw_by,
alpha_by=edge_alpha_by,
encodings_kwargs=edge_enc_kwargs,
palette=edge_palette,
)
despine()
aspect_equal()
return plt.gca()
arc = partial(
base,
node_layout_func=nodes.arc,
edge_line_func=edges.arc,
group_by=None,
sort_by=None,
)
update_wrapper(arc, base)
arc.__name__ = "api.arc"
circos = partial(
base,
node_layout_func=nodes.circos,
edge_line_func=edges.circos,
group_by=None,
sort_by=None,
)
update_wrapper(circos, base)
circos.__name__ = "api.circos"
parallel = partial(
base,
node_layout_func=nodes.parallel,
edge_line_func=edges.line,
sort_by=None,
node_enc_kwargs={"size_scale": 0.5},
)
update_wrapper(parallel, base)
parallel.__name__ = "api.parallel"
geo = partial(
base,
node_layout_func=nodes.geo,
edge_line_func=edges.line,
group_by=None,
sort_by=None,
node_enc_kwargs={"size_scale": 0.0015},
)
update_wrapper(geo, base)
geo.__name__ = "api.geo"
def base_cloned(
G,
node_layout_func,
edge_line_func,
group_by,
sort_by=None,
node_color_by=None,
node_alpha_by=None,
node_size_by=None,
node_enc_kwargs={},
edge_color_by=None,
edge_lw_by=None,
edge_alpha_by=None,
edge_enc_kwargs={},
node_layout_kwargs: Dict = {},
edge_line_kwargs: Dict = {},
cloned_node_layout_kwargs: Dict = {},
node_palette: Optional[Union[Dict, List]] = None,
edge_palette: Optional[Union[Dict, List]] = None,
):
"""High-level graph plotting function.
## Parameters
### Basic
- `G`: A NetworkX Graph.
### Nodes
- `group_by`: Node metadata attribute key to group nodes.
- `sort_by`: Node metadata attribute key to sort nodes.
- `node_color_by`: Node metadata attribute key to color nodes.
- `node_alpha_by`: Node metadata attribute key to set node transparency.
- `node_size_by`: Node metadata attribute key to set node size.
- `node_enc_kwargs`: Keyword arguments to set node visual encodings.
TODO: Elaborate on what these arguments are.
- `node_palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
### Edges
- `edge_color_by`: Edge metdata attribute key to color edges.
There are two special value for this parameter
when using directed graphs:
"source_node_color" and "target_node_color".
If these values are set, then `node_color_by` also needs to be set.
- `edge_lw_by`: Edge metdata attribute key to set edge line width.
- `edge_alpha_by`: Edge metdata attribute key to set edge transparency.
- `edge_enc_kwargs`: Keyword arguments to set edge visual encodings.
- `edge_palette`: Same as node_palette but for edges.
"""
pos = node_layout_func(
G,
group_by=group_by,
sort_by=sort_by,
color_by=node_color_by,
size_by=node_size_by,
alpha_by=node_alpha_by,
encodings_kwargs=node_enc_kwargs,
layout_kwargs=node_layout_kwargs,
palette=node_palette,
)
pos_cloned = node_layout_func(
G,
group_by=group_by,
sort_by=sort_by,
color_by=node_color_by,
size_by=node_size_by,
alpha_by=node_alpha_by,
encodings_kwargs=node_enc_kwargs,
layout_kwargs=cloned_node_layout_kwargs,
palette=node_palette,
)
edge_line_func(
G,
pos,
pos_cloned=pos_cloned,
color_by=edge_color_by,
node_color_by=node_color_by,
lw_by=edge_lw_by,
alpha_by=edge_alpha_by,
encodings_kwargs=edge_enc_kwargs,
palette=edge_palette,
**edge_line_kwargs,
)
despine()
aspect_equal()
return plt.gca()
hive = partial(
base_cloned,
node_layout_func=nodes.hive,
edge_line_func=edges.hive,
cloned_node_layout_kwargs={"rotation": np.pi / 6},
)
update_wrapper(hive, base_cloned)
hive.__name__ = "api.hive"
matrix = partial(
base_cloned,
group_by=None,
node_layout_func=nodes.matrix,
edge_line_func=edges.matrix,
cloned_node_layout_kwargs={"axis": "y"},
)
update_wrapper(matrix, base_cloned)
matrix.__name__ = "api.matrix"
# Object-oriented API below, placed for compatibility.
class BasePlot:
"""Base Plot class."""
def __init__(
self,
G: nx.Graph = None,
node_grouping: Hashable = None,
node_order: Hashable = None,
node_color: Hashable = None,
node_alpha: Hashable = None,
node_size: Hashable = None,
nodeprops: Dict = None,
edge_color: Hashable = None,
edge_alpha: Hashable = None,
edge_width: Hashable = None,
edgeprops: Dict = None,
node_palette: Optional[Union[Dict, List]] = None,
edge_palette: Optional[Union[Dict, List]] = None,
):
"""Instantiate a plot.
## Parameters:
- `G`: NetworkX graph to plot.
- `node_grouping`: The node attribute on which to specify the grouping position of nodes.
- `node_order`: The node attribute on which to specify the coloring of nodes.
- `node_color`: The node attribute on which to specify the colour of nodes.
- `node_alpha`: The node attribute on which to specify the transparency of nodes.
- `node_size`: The node attribute on which to specify the size of nodes.
- `nodeprops`: A `matplotlib`-compatible `props` dictionary.
- `edge_color`: The edge attribute on which to specify the colour of edges.
- `edge_alpha`: The edge attribute on which to specify the transparency of edges.
- `edge_width`: The edge attribute on which to specify the width of edges.
- `edgeprops`: A `matplotlib-compatible `props` dictionary.
- `node_palette`: Optional custom palette of colours for plotting categorical groupings
in a list/dictionary. Colours must be values `matplotlib.colors.ListedColormap`
can interpret. If a dictionary is provided, key and record corresponds to
category and colour respectively.
- `edge_palette`: Same as node_palette but for edges.
"""
import warnings
warnings.warn(
"As of nxviz 0.7, the object-oriented API is being deprecated "
"in favour of a functional API. "
"Please consider switching your plotting code! "
"The object-oriented API wrappers remains in place "
"to help you transition over. "
"A few changes between the old and new API exist; "
"please consult the nxviz documentation for more information. "
"When the 1.0 release of nxviz happens, "
"the object-oriented API will be dropped entirely."
)
def draw():
"""No longer implemented!"""
pass
functional_api_names = [
"group_by",
"sort_by",
"node_color_by",
"node_alpha_by",
"node_size_by",
"node_enc_kwargs",
"edge_color_by",
"edge_alpha_by",
"edge_lw_by",
"edge_enc_kwargs",
"node_palette",
"edge_palette",
]
object_api_names = [
"node_grouping",
"node_order",
"node_color",
"node_alpha",
"node_size",
"nodeprops",
"edge_color",
"edge_alpha",
"edge_width",
"edgeprops",
"node_palette",
"edge_palette",
]
functional_to_object = dict(zip(functional_api_names, object_api_names))
object_to_functional = dict(zip(object_api_names, functional_api_names))
class ArcPlot(BasePlot):
"""Arc Plot."""
def __init__(self, G, **kwargs):
super().__init__()
func_kwargs = {object_to_functional[k]: v for k, v in kwargs.items()}
self.fig = plt.figure()
self.ax = arc(G, **func_kwargs)
class CircosPlot(BasePlot):
"""Circos Plot."""
def __init__(self, G, **kwargs):
super().__init__()
func_kwargs = {object_to_functional[k]: v for k, v in kwargs.items()}
self.fig = plt.figure()
self.ax = circos(G, **func_kwargs)
class HivePlot(BasePlot):
"""Hive Plot."""
def __init__(self, G, **kwargs):
super().__init__()
func_kwargs = {object_to_functional[k]: v for k, v in kwargs.items()}
self.fig = plt.figure()
self.ax = hive(G, **func_kwargs)
class MatrixPlot(BasePlot):
"""Matrix Plot."""
def __init__(self, G, **kwargs):
super().__init__()
func_kwargs = {object_to_functional[k]: v for k, v in kwargs.items()}
self.fig = plt.figure()
self.ax = matrix(G, **func_kwargs)
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/api.py
| 0.852736 | 0.438304 |
api.py
|
pypi
|
from typing import Callable, Hashable
from networkx.drawing import layout
from dandelion.external.nxviz import layouts, lines, utils
from matplotlib.patches import Circle, Rectangle
import matplotlib.pyplot as plt
from functools import partial, update_wrapper
import numpy as np
import pandas as pd
from copy import deepcopy
import networkx as nx
def node(
G,
node,
layout_func,
group_by,
sort_by=None,
# visual properties
color="red", # color
radius=1, # size
alpha=1.0, # transparency
clone=False,
cloned_node_layout_kwargs={},
):
"""Highlight one particular node."""
nt = utils.node_table(G, group_by=group_by, sort_by=sort_by)
pos = layout_func(nt, group_by=group_by, sort_by=sort_by)
ax = plt.gca()
zorder = max([_.zorder for _ in ax.get_children()])
c = Circle(xy=pos[node], fc=color, radius=radius, zorder=zorder)
ax.add_patch(c)
if clone:
pos_cloned = layout_func(
nt, group_by=group_by, sort_by=sort_by, **cloned_node_layout_kwargs
)
c = Circle(
xy=pos_cloned[node], fc=color, radius=radius, zorder=zorder + 1
)
ax.add_patch(c)
circos_node = partial(node, layout_func=layouts.circos, group_by=None)
update_wrapper(circos_node, node)
circos_node.__name__ = "highlights.circos_node"
parallel_node = partial(node, layout_func=layouts.parallel, group_by=None)
update_wrapper(parallel_node, node)
parallel_node.__name__ = "highlights.parallel_node"
arc_node = partial(node, layout_func=layouts.arc, group_by=None)
update_wrapper(arc_node, node)
arc_node.__name__ = "highlights.arc_node"
hive_node = partial(
node,
layout_func=layouts.hive,
clone=True,
cloned_node_layout_kwargs={"rotation": np.pi / 6},
)
update_wrapper(hive_node, node)
hive_node.__name__ = "highlights.hive_node"
matrix_node = partial(
node,
layout_func=layouts.matrix,
group_by=None,
clone=True,
cloned_node_layout_kwargs={"axis": "y"},
)
update_wrapper(matrix_node, node)
matrix_node.__name__ = "highlights.matrix_node"
def edge(
G,
source,
target,
layout_func,
line_func,
group_by,
sort_by,
color="red", # color
lw=1.0, # size
alpha=1.0, # transparency
clone=False,
cloned_node_layout_kwargs={},
line_func_aes_kw={"zorder": 30, "fc": "none"},
line_func_kwargs={},
):
"""Highlight one particular edge."""
nt = utils.node_table(G, group_by=group_by, sort_by=sort_by)
et = (
utils.edge_table(G)
.query("source == @source")
.query("target == @target")
)
pos = layout_func(nt, group_by=group_by, sort_by=sort_by)
line_func_kwargs = deepcopy(line_func_kwargs)
line_func_kwargs.update(
et=et,
pos=pos,
edge_color=pd.Series([color], index=et.index),
alpha=pd.Series([alpha], index=et.index),
lw=pd.Series([lw], index=et.index),
aes_kw=line_func_aes_kw,
)
if clone:
pos_cloned = layout_func(
nt, group_by=group_by, sort_by=sort_by, **cloned_node_layout_kwargs
)
line_func_kwargs["pos_cloned"] = pos_cloned
patches = line_func(**line_func_kwargs)
ax = plt.gca()
for patch in patches:
ax.add_patch(patch)
circos_edge = partial(
edge,
layout_func=layouts.circos,
line_func=lines.circos,
group_by=None,
sort_by=None,
)
update_wrapper(circos_edge, node)
circos_edge.__name__ = "highlights.circos_edge"
arc_edge = partial(
edge,
layout_func=layouts.arc,
line_func=lines.arc,
group_by=None,
sort_by=None,
line_func_aes_kw={"zorder": 1},
)
update_wrapper(arc_edge, node)
arc_edge.__name__ = "highlights.arc_edge"
hive_edge = partial(
edge,
layout_func=layouts.hive,
line_func=lines.hive,
sort_by=None,
clone=True,
cloned_node_layout_kwargs={"rotation": np.pi / 6},
)
update_wrapper(hive_edge, node)
hive_edge.__name__ = "highlights.hive_edge"
matrix_edge = partial(
edge,
layout_func=layouts.matrix,
line_func=lines.matrix,
group_by=None,
sort_by=None,
clone=True,
cloned_node_layout_kwargs={"axis": "y"},
line_func_aes_kw={},
)
update_wrapper(matrix_edge, node)
matrix_edge.__name__ = "highlights.matrix_edge"
parallel_edge = partial(
edge,
layout_func=layouts.parallel,
line_func=lines.line,
sort_by=None,
line_func_aes_kw={"zorder": 1},
)
update_wrapper(parallel_edge, node)
parallel_edge.__name__ = "highlights.parallel_edge"
def matrix_row(
G: nx.Graph,
node: Hashable,
group_by: Hashable = None,
sort_by: Hashable = None,
axis="x",
color="red",
):
"""Highlight one row (or column) in the matrix plot."""
nt = utils.node_table(G)
pos = layouts.matrix(nt, group_by=group_by, sort_by=sort_by, axis=axis)
x, y = pos[node]
width = 2
height = 2 * len(G)
xy = x - 1, y + 1
if axis == "y":
width, height = height, width
xy = x + 1, y - 1
rectangle = Rectangle(
xy=xy, width=width, height=height, fc=color, ec="none", alpha=0.3
)
ax = plt.gca()
ax.add_patch(rectangle)
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/highlights.py
| 0.829803 | 0.251303 |
highlights.py
|
pypi
|
import numpy as np
from .polcart import to_cartesian
from typing import List, Hashable
def item_theta(itemlist: List[Hashable], item: Hashable):
"""
Maps node to an angle in radians.
:param itemlist: Item list from the graph.
:param item: The item of interest. Must be in the itemlist.
:returns: theta -- the angle of the item in radians.
"""
assert len(itemlist) > 0, "itemlist must be a list of items."
assert item in itemlist, "item must be inside itemlist."
i = itemlist.index(item)
theta = i * 2 * np.pi / len(itemlist)
return theta
def get_cartesian(r: float, theta: float):
"""
Returns the cartesian (x,y) coordinates of (r, theta).
:param r: Real-valued radius.
:param theta: Angle in radians.
:returns: to_cartesian(r, theta)
"""
return to_cartesian(r, theta)
def correct_negative_angle(angle):
"""
Corrects a negative angle to a positive one.
:param angle: The angle in radians.
:returns: `angle`, corrected to be positively-valued.
"""
angle = angle % (2 * np.pi)
if angle < 0:
angle += 2 * np.pi
return angle
def circos_radius(n_nodes: int, node_radius: float = 1.0):
"""
Automatically computes the origin-to-node centre radius of the Circos plot
using the triangle equality sine rule.
a / sin(A) = b / sin(B) = c / sin(C)
:param n_nodes: the number of nodes in the plot.
:param node_radius: the radius of each node.
:returns: Origin-to-node centre radius.
"""
A = 2 * np.pi / n_nodes # noqa
B = (np.pi - A) / 2 # noqa
a = 2 * node_radius
return a * np.sin(B) / np.sin(A)
def correct_hive_angles(start, end):
"""Perform correction of hive plot angles for edge drawing."""
if start > np.pi and end == 0.0:
end = 2 * np.pi
if start < np.pi and end == 0.0:
start, end = end, start
if end < np.pi and start == 2 * np.pi:
start = 0
if end > np.pi and start == 0:
start = 2 * np.pi
return start, end
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/dandelion/external/nxviz/geometry.py
| 0.954287 | 0.737276 |
geometry.py
|
pypi
|
"""changeo clonotypes script"""
import argparse
import dandelion as ddl
import os
import scanpy
from scanpy import logging as logg
scanpy.settings.verbosity = 3
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--h5ddl",
required=True,
help=("Dandelion object to call changeo clonotypes on."),
)
parser.add_argument(
"--manual_threshold",
help=("Optional manual override of SHazaM threshold."),
)
parser.add_argument(
"--plot_file",
help=(
"File name to save PDF of SHazaM plot to. Defaults to the input object name "
+ "with _shazam.pdf appended."
),
)
parser.add_argument(
"--key_added",
default="changeo_clone_id",
help=(
"The column name to store the identified clone ID under in the object. "
+ "Defaults to changeo_clone_id."
),
)
parser.add_argument(
"--h5ddl_out",
help=(
"Path to save Dandelion object with changeo clonotypes to. Defaults to the "
+ "input object name with _changeo.h5ddl appended."
),
)
args = parser.parse_args()
# set up the default names of files if need be. needs the base name of the file
basename = os.path.splitext(args.h5ddl)[0]
if args.plot_file is None:
args.plot_file = basename + "_shazam.pdf"
if args.h5ddl_out is None:
args.h5ddl_out = basename + "_changeo.h5ddl"
if args.manual_threshold is not None:
args.manual_threshold = float(args.manual_threshold)
return args
def main():
"""Main changeo-clonotypes."""
logg.info("Software versions:\n")
ddl.logging.print_header()
start = logg.info("\nBeginning assigning change-o clonotypes\n")
# parse arguments
args = parse_args()
logg.info(
"command line parameters:\n",
deep=(
f"\n"
f"--------------------------------------------------------------\n"
f" --h5ddl = {args.h5ddl}\n"
f" --manual_threshold = {str(args.manual_threshold)}\n"
f" --plot_file = {args.plot_file}\n"
f" --key_added = {args.key_added}\n"
f" --h5ddl_out = {args.h5ddl_out}\n"
f"--------------------------------------------------------------\n"
),
)
# the actual process is easy. the dependencies quite a bit less so
vdj = ddl.read_h5ddl(args.h5ddl)
ddl.pp.calculate_threshold(
vdj,
manual_threshold=args.manual_threshold,
save_plot=args.plot_file,
)
ddl.tl.define_clones(vdj, key_added=args.key_added)
vdj.write_h5ddl(args.h5ddl_out)
logg.info("Assigning Change-o clonotypes finished.\n", time=start)
if __name__ == "__main__":
main()
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/container/changeo_clonotypes.py
| 0.636692 | 0.164886 |
changeo_clonotypes.py
|
pypi
|
# Interoperability with `scirpy`

It is now possible to convert the file formats between `dandelion>=0.1.1` and `scirpy>=0.6.2` [[Sturm2020]](https://academic.oup.com/bioinformatics/article/36/18/4817/5866543) to enhance the collaboration between the analysis toolkits.
We will download the *airr_rearrangement.tsv* file from here:
```bash
# bash
wget https://cf.10xgenomics.com/samples/cell-vdj/4.0.0/sc5p_v2_hs_PBMC_10k/sc5p_v2_hs_PBMC_10k_b_airr_rearrangement.tsv
```
**Import *dandelion* module**
```
import os
import dandelion as ddl
# change directory to somewhere more workable
os.chdir(os.path.expanduser('/Users/kt16/Downloads/dandelion_tutorial/'))
ddl.logging.print_versions()
import scirpy as ir
ir.__version__
```
## `dandelion`
```
# read in the airr_rearrangement.tsv file
file_location = 'sc5p_v2_hs_PBMC_10k/sc5p_v2_hs_PBMC_10k_b_airr_rearrangement.tsv'
vdj = ddl.read_10x_airr(file_location)
vdj
```
The test file contains a blank `clone_id` column so we run `find_clones` to populate it first.
```
ddl.tl.find_clones(vdj)
```
### `ddl.to_scirpy` : Converting `dandelion` to `scirpy`
```
irdata = ddl.to_scirpy(vdj)
irdata
```
to transfer every column found in a dandelion airr object (including things like `germline_alignment_d_mask`) do:
```
irdata = ddl.to_scirpy(vdj, include_fields = vdj.data.columns)
irdata
```
The `clone_id` is mapped to `IR_VJ_1_clone_id` column.
`transfer = True` will perform dandelion's `tl.transfer`.
```
irdatax = ddl.to_scirpy(vdj, transfer = True)
irdatax
```
### `ddl.from_scirpy` : Converting `scirpy` to `dandelion`
```
vdjx = ddl.from_scirpy(irdata)
vdjx
vdjx.metadata
```
## `scirpy`
### `ir.io.from_dandelion` : Converting `dandelion` to `scirpy`
```
irdata2 = ir.io.from_dandelion(vdj, include_fields = vdj.data.columns)
irdata2
```
likewise, `transfer = True` will perform dandelion's `tl.transfer`.
```
irdata2x = ir.io.from_dandelion(vdj, transfer = True, include_fields = vdj.data.columns)
irdata2x
```
### `ir.io.to_dandelion` : Converting `scirpy` to `dandelion`
```
vdj3 = ir.io.to_dandelion(irdata2)
vdj3
```
### Example of reading with `scirpy` followed by conversion to `dandelion`
```
# read in the airr_rearrangement.tsv file
file_location = 'sc5p_v2_hs_PBMC_10k/sc5p_v2_hs_PBMC_10k_b_airr_rearrangement.tsv'
irdata_s = ir.io.read_airr(file_location)
irdata_s
```
This time, find clones with `scirpy`'s method.
```
ir.tl.chain_qc(irdata_s)
ir.pp.ir_dist(irdata_s, metric = 'hamming', sequence="aa")
ir.tl.define_clonotypes(irdata_s)
irdata_s
vdj4 = ir.io.to_dandelion(irdata_s)
vdj4
```
### Visualising with `scirpy`'s plotting tools
You can now also plot `dandelion` networks using `scirpy`'s functions.
```
ddl.tl.generate_network(vdj, key = 'junction')
irdata_s.obs['scirpy_clone_id'] = irdata_s.obs['clone_id'] # stash it
ddl.tl.transfer(irdata_s, vdj, overwrite = True) # overwrite scirpy's clone_id definition
ir.tl.clonotype_network(irdata_s, min_cells = 2)
ir.pl.clonotype_network(irdata_s, color = 'clone_id', panel_size=(7,7))
```
to swap to a shorter clone_id name (ordered by size)
```
ddl.tl.transfer(irdata_s, vdj, clone_key = 'clone_id_by_size')
ir.tl.clonotype_network(irdata_s, clonotype_key= 'clone_id_by_size', min_cells = 2)
ir.pl.clonotype_network(irdata_s, color = 'clone_id_by_size', panel_size=(7,7))
```
you can also collapse the networks to a single node and plot by size
```
ddl.tl.transfer(irdata_s, vdj, clone_key = 'clone_id_by_size', collapse_nodes = True)
ir.tl.clonotype_network(irdata_s, clonotype_key = 'clone_id_by_size', min_cells = 2)
ir.pl.clonotype_network(irdata_s, color = 'scirpy_clone_id', panel_size=(7,7))
```
|
/sc-dandelion-0.3.2.tar.gz/sc-dandelion-0.3.2/docs/notebooks/1c_dandelion_scirpy.ipynb
| 0.424054 | 0.937555 |
1c_dandelion_scirpy.ipynb
|
pypi
|
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import BaseSummaryDiffAnalyzer
class BranchSummaryGroupByBranchDiffAnalyzer(BaseSummaryDiffAnalyzer):
"""
机构汇总(按机构分组)差异分析类
"""
def __init__(self, *, config: ConfigManager, is_first_analyzer=False):
self._branch_order = dict()
super().__init__(config=config, is_first_analyzer=is_first_analyzer)
self._key_enabled = "diff.branch_summary_group_by_branch.enabled"
def _read_config(self, *, config: ConfigManager):
super()._read_config(config=config)
# 选中需要处理的机构清单
self._branch_selected_list = config.get("branch.selected_list")
self._init_branch_orders()
# 生成的Excel中Sheet的名称
self._target_sheet_name = config.get("diff.branch_summary_group_by_branch.target_sheet_name")
# Sheet名称
self._sheet_name = config.get("diff.branch_summary_group_by_branch.sheet_name")
# 表头行索引
self._header_row = config.get("diff.branch_summary_group_by_branch.header_row")
# 所属机构列名称(Excel中列名必须唯一)
index_column_names = config.get("diff.branch_summary_group_by_branch.index_column_names")
if index_column_names is not None and type(index_column_names) is list:
self._index_column_names.extend(index_column_names)
# 待分析差异列名称列表(Excel中列名必须唯一)
diff_column_dict: dict = config.get("diff.branch_summary_group_by_branch.diff_column_list")
if diff_column_dict is not None and type(diff_column_dict) is dict:
self._diff_column_dict.update(diff_column_dict)
def _filter_origin_data(self, *, data):
column_name = self._index_column_names[0]
# 筛选指定部门,删除合计行
data = data[data[column_name].isin(self._branch_selected_list)]
return data
def _init_branch_orders(self):
index = 1
for branch in self._branch_selected_list:
self._branch_order[branch] = index
index = index + 1
def _sort_branch_rows(self, branches):
return branches.map(self._branch_order)
def _after_calculated_difference(self, result: pd.DataFrame) -> pd.DataFrame:
result = super()._after_calculated_difference(result=result)
# 先删除合计行
result = result.drop("合计")
# 按指定顺序排序机构名称
result.sort_index(
axis=0,
inplace=True,
key=self._sort_branch_rows,
)
# 添加合计行
result.loc["合计"] = result.apply(lambda x: x.sum())
return result
|
/sc_diff_analysis-0.0.28-py3-none-any.whl/sc_diff_analysis/analyzer/branch_summary_group_by_branch_diff_analyzer.py
| 0.420719 | 0.164248 |
branch_summary_group_by_branch_diff_analyzer.py
|
pypi
|
# Copyright (c) 2018, Cabral, Juan; Luczywo, Nadia; Zanazi Jose Luis
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""DRV method implementation
"""
__all__ = ["DRVProcess"]
# =============================================================================
# IMPORTS
# =============================================================================
import itertools as it
import numpy as np
from scipy import stats
import attr
import joblib
import jinja2
from skcriteria import norm, rank
from skcriteria.madm import simple
from . import normtests, plot
# =============================================================================
# CONSTANTS
# =============================================================================
NORMAL_TESTS = {"shapiro": normtests.shapiro,
"ks": normtests.kstest}
# =============================================================================
# STRUCTURATION FUNCTIONS
# =============================================================================
def nproduct_indexes(nproducts, climit):
"""Calculate the indexs of the products"""
sst = np.sum((nproducts - np.mean(nproducts)) ** 2)
ssw = np.sum((nproducts - np.mean(nproducts, axis=0)) ** 2)
ssb = sst - ssw
ssu = (nproducts.shape[0] - 1) / float(nproducts.shape[1] * 3)
ivr = ssw / ssu
inc = ivr <= climit
resume = np.mean(nproducts, axis=0)
return sst, ssw, ssb, ssu, ivr, inc, resume
def solve_nproducts(mtx):
"""Create the product (normalized) matrix"""
rmtx = np.flip(mtx, axis=1)
rcumprod = np.cumprod(rmtx, axis=1)
wproducts = np.flip(rcumprod, axis=1)
return norm.sum(wproducts, axis=1)
def subproblem(mtx, climit, ntest, ntest_kwargs, alpha_norm):
"""Create and evaluate the product (normalized) matrix"""
nproducts = solve_nproducts(mtx)
sst, ssw, ssb, ssu, ivr, inc, resume = nproduct_indexes(nproducts, climit)
n_sts, pvals = ntest(nproducts, axis=1, **ntest_kwargs)
n_reject_h0 = pvals <= alpha_norm
return {
"nproducts": nproducts,
"sst": sst,
"ssw": ssw,
"ssb": ssb,
"ssu": ssu,
"ivr": ivr,
"in_consensus": inc,
"ntest_sts": n_sts,
"ntest_pvals": pvals,
"ntest_reject_h0": n_reject_h0,
"resume": resume}
# =============================================================================
# AGGREGATION STAGE
# =============================================================================
def run_aggregator(idx, mtxs, criteria, weights, aggregator):
"""Helper to run the aggregator with joblib"""
mtx = np.vstack([m[idx] for m in mtxs]).T
weight = 1 if weights is None else weights[idx]
return aggregator.decide(mtx, criteria=criteria, weights=weight)
def rank_ttest_rel(agg_p, aidx, bidx):
"""Helper to run the t-test with joblib"""
a_vals = np.array([r.e_.points[aidx] for r in agg_p])
b_vals = np.array([r.e_.points[bidx] for r in agg_p])
return stats.ttest_rel(a_vals, b_vals)
def fdr_by(alpha, pvals, I):
"""False discovery rate of Benjamini-Yekutieli"""
L = I * (I - 1) / 2.
pvals_rank = rank.rankdata(pvals, reverse=True)
denom = 1 / np.arange(1, L + 1)
fdr = pvals_rank * alpha / denom
return fdr
# =============================================================================
# DRV as FUNCTION
# =============================================================================
def drv(
weights, abc, climit, ntest, ntest_kwargs,
alpha_norm, alpha_rank, njobs, agg_only_consensus
):
# PREPROCESS
# determine numbers of parallel jobs
njobs = joblib.cpu_count() if njobs is None else njobs
# determine the normal test
ntest = NORMAL_TESTS.get(ntest, ntest)
ntest_kwargs = {} if ntest_kwargs is None else ntest_kwargs
# number of participants & alternatives
N, I = np.shape(abc[0])
# number of criteria
J = len(abc)
# placeholder to store the results
results = {"N_": N, "I_": I, "J_": J}
# WEIGHTS
if np.ndim(weights) > 1:
wresults = subproblem(
mtx=weights,
climit=climit,
alpha_norm=alpha_norm,
ntest=ntest,
ntest_kwargs=ntest_kwargs)
else:
wresults = {}
# copy weights results to the global results
results.update({
"wmtx_": wresults.get("nproducts"),
"wsst_": wresults.get("sst"),
"wssw_": wresults.get("ssw"),
"wssb_": wresults.get("ssb"),
"wssu_": wresults.get("ssu"),
"wivr_": wresults.get("ivr"),
"wntest_sts_": wresults.get("ntest_sts"),
"wntest_pvals_": wresults.get("ntest_pvals"),
"wntest_reject_h0_": wresults.get("ntest_reject_h0"),
"win_consensus_": wresults.get("in_consensus"),
"weights_mean_": wresults.get("resume")})
# ALTERNATIVES
with joblib.Parallel(n_jobs=njobs) as jobs:
wresults = jobs(
joblib.delayed(subproblem)(
amtx,
climit=climit,
alpha_norm=alpha_norm,
ntest=ntest,
ntest_kwargs=ntest_kwargs)
for amtx in abc)
# copy alt results to the global results
results.update({
"amtx_criteria_": tuple([r["nproducts"] for r in wresults]),
"asst_": np.hstack([r["sst"] for r in wresults]),
"assw_": np.hstack([r["ssw"] for r in wresults]),
"assb_": np.hstack([r["ssb"] for r in wresults]),
"assu_": np.hstack([r["ssu"] for r in wresults]),
"aivr_": np.hstack([r["ivr"] for r in wresults]),
"ain_consensus_": np.hstack([r["in_consensus"] for r in wresults]),
"antest_sts_": np.vstack([r["ntest_sts"] for r in wresults]),
"antest_pvals_": np.vstack([r["ntest_pvals"] for r in wresults]),
"antest_reject_h0_": np.vstack([
r["ntest_reject_h0"] for r in wresults]),
"amtx_mean_": np.vstack([r["resume"] for r in wresults])})
# CONSENSUS
consensus = np.all(results["ain_consensus_"])
if consensus and results["weights_mean_"] is not None:
consensus = consensus and results["win_consensus_"]
results["consensus_"] = consensus # to global results
# GLOBAL REJECT H0
reject_h0 = np.any(results["antest_reject_h0_"])
if not reject_h0 and results["wntest_reject_h0_"] is not None:
reject_h0 = reject_h0 or np.any(results["wntest_reject_h0_"])
results["ntest_reject_h0_"] = reject_h0
# AGGREGATION
if consensus or not agg_only_consensus:
aggregator = simple.WeightedSum(mnorm="none", wnorm="none")
criteria = [max] * J
weights_mean = (
1 if results["weights_mean_"] is None else
results["weights_mean_"])
agg_m = aggregator.decide(
results["amtx_mean_"].T, criteria=criteria, weights=weights_mean)
with joblib.Parallel(n_jobs=1) as jobs:
agg_p = jobs(
joblib.delayed(run_aggregator)(
idx=idx,
mtxs=results["amtx_criteria_"],
criteria=criteria,
weights=results["wmtx_"],
aggregator=aggregator)
for idx in range(N))
agg_p = tuple(agg_p)
with joblib.Parallel(n_jobs=1) as jobs:
# rank verification
ttest_results = jobs(
joblib.delayed(rank_ttest_rel)(
agg_p=agg_p, aidx=aidx, bidx=bidx)
for aidx, bidx in it.combinations(range(I), 2))
ttest_size = len(ttest_results)
rank_t, rank_p = np.empty(ttest_size), np.empty(ttest_size)
for idx, r in enumerate(ttest_results):
rank_t[idx] = r.statistic
rank_p[idx] = r.pvalue
rank_fdr = fdr_by(alpha=alpha_rank, pvals=rank_p, I=I)
rank_results = rank_p < rank_fdr
rank_results_resume = np.all(rank_results)
else:
agg_p, agg_m = None, None
rank_t, rank_p, rank_fdr, rank_results = None, None, None, None
rank_results_resume = False
# to global results
results["aggregation_criteria_"] = agg_p
results["aggregation_mean_"] = agg_m
results["rank_check_t_"] = rank_t
results["rank_check_pval_"] = rank_p
results["rank_check_fdr_"] = rank_fdr
results["rank_check_results_"] = rank_results
results["rank_check_results_resume_"] = rank_results_resume
results["strict_preference_"] = (
consensus and not reject_h0 and rank_results_resume)
return results
# =============================================================================
# RESULT CLASS
# =============================================================================
@attr.s(frozen=True)
class DRVResult(object):
"""Result set of the DRV method.
Parameters
----------
ntest : str
Normality-test. Test to check if the priorities established by group
members must have a random behavior, represented by Normal
Distribution.
ntest_kwargs : dict or None
Parameters for the normal test function.
alpha_norm : float
significance. If the any p-value of n-test is less than ``alpha_norm``,
we reject the null hypothesis of the normality tests.
alpha_rank : float
significance. If the any FDR Benjamini-Yekutieli of the
``rank_check_pval_`` is less than ``alpha_rank``, we reject the null
hypothesis (two alternatives are not different enough).
climit : float
Consensus limit. Maximum value of the IVR to asume that the solution
is stable.
The Stability is verified using the normality analysis of priorities
for each element of a sub-problem, or by using the IVR
(Índice de Variabilidad Remanente, Remaining Variability Index)
``IVR <= climit`` are indicative of stability.
N_ : int
Number of participants
I_ : int
Number of alternatives
J_ : int
Number of criteria.
strict_preference_ : bool
True if consensus_ and rank_check_results_resume_ are True and also
ntest_reject_h0_ is False.
consensus_ : bool
If all the sub-problems are in consensus. In other words if
every problem has ther IVR <= climit.
ntest_reject_h0_ : bool
True if any sub-problem reject one of their normality test H0 then
this.
rank_check_results_resume_ : bool
True only if all values of rank_check_results_ are True. If
rank_check_results_ is None then rank_check_results_resume_ is False.
weights_mean_ : array or None
If the weight preference if provided, this attribute contains
a array where every j-nth element is mean of the weights assigned by
the participants to the j-nth criteria.
wmtx_ : array or None
If the weight preference if provided, this attribute contains
a 2D array where every row is a weight assigned by a single
participant.
wsst_ : float or None
Weights sub-problem Square Sum Total.
If the weight preference if provided, this attribute contains
the total sum of squares of the weight sub-problem. This value
is calculated as
``sum((wmtx_ - mean(wmtx_))**2))``.
wssw_ : float or None
Weights sub-problem Square-Sum Within.
If the weight preference if provided, this attribute contains
the sum of squares within criteria of the weight sub-problem, and
represents the residual variability after a stage of analysis.
This value is calculated as
``sum((wmtx_ - mean_by_row(wmtx_))**2))``
wssb_ : float or None
Weights sub-problem Square-Sum Between.
If the weight preference if provided, this attribute contains
the sum of squares between criteria of the weight sub-problem,
This value is calculated as ``wsst_ - wssw_``.
wssu_ : float or None
Weights sub-problem Square-Sum of Uniform distribution.
Corresponds to the uniform distribution and reflects a situation of
complete disagreement within the group.
wivr_ : float or None
Weights sub-problem Índice de Variabilidad Remanente
(Translation: Remaining Variability Index).
If the weight preference if provided, this attribute contains
Is a ratio of agreement calculates as ``wssw_ / wssu_``.
win_consensus_ : bool or None
Weights sub-problem In Consensus.
If the weight preference if provided, this attribute contains
the weights sub-problem is in consensus. In other words if all
the weight sub-problem ``wivr_ <= climit``.
wntest_sts_ : ndarray or None
Weights Normal Test Statistics.
If the weight preference if provided, this attribute contains an array
with the normality test statistic by criteria.
wntest_pvals_ : array or None
Weights Normal Test P-value.
If the weight preference if provided, this attribute contains an array
with the normality test Normality test p-value by criteria. This
values are useful if you have an small number of criteria to reinforce
the assumption normality.
wntest_reject_h0_ : array or None
If the weight preference if provided, this attribute contains an array
where the j-nth element is True if the normality test fails for
the values of the criteria j-nth.
amtx_criteria_ : tuple of arrays
Alternatives matrix by criteria.
A tuple where the j-nth element is a 2D array of preference of
the ``I_`` alternatives by the criteria j.
asst_ : array
Alternatives by criteria sub-problems Square-Sum Within.
Array where the j-nth element is the total sum of squares of the
evaluation of the alternatives by the criteria j.
Every element on this array is calculated as
``sum((amtx_criteria_[j] - mean(amtx_criteria_[j]))**2))``.
assw_ : array
Alternatives by criteria sub-problems Square-Sum Within.
Array where the j-nth element is the total sum of squares within
of the evaluation of the alternatives by the criteria j, and
represents the residual variability after a stage of analysis.
Every element on this array is calculated as
``sum((amtx_criteria_[j] - mean_by_row(amtx_criteria_[j]))**2))``
assb_ : array
Alternatives by criteria sub-problems Square-Sum Between.
Array where the j-nth element is the total sum of squares between
of the evaluation of the alternatives by the criteria j.
Every element on this array is calculated as ``asst_ - assw_``.
assu_ : array
Alternatives by criteria sub-problems Square-Sum of Uniform
distribution. Corresponds to the uniform distribution and reflects a
situation of complete disagreement within the group.
aivr_ : array
Alternatives by criteria sub-problems Índice de Variabilidad Remanente
(Translation: Remaining Variability Index).
Array where the j-nth element a ratio of agreement of the alternatives
by the criteria j. Is calculated as follows: ``assw_ / assu_``.
ain_consensus_ : array
Alternatives by criteria sub-problems In Consensus.
Array where the j-nth element is True if the alternatives
by the criteria j are in consensus. In other words if
``aivr_[j] <= climit``.
amtx_mean_ : 2D array
Alternative matrix.
Created as a mean of all alternatives matrix by criteria.
antest_sts_ : 2D array
Alternatives by criteria sub-problems Normal Test Statistics.
Array where the A_ij element contains the statistic of the normality
test for the alternative i under the criteria j.
antest_pvals_ : 2D array
Alternatives by criteria sub-problems Normal Test Statistics.
Array where the A_ij element contains the p-value of the normality
test for the alternative i under the criteria j.
antest_reject_h0_ : 2D array
Alternatives by criteria sub-problems status of the null hypothesis.
Array where the A_ij element contains the null hypotesys of the
normality test for the alternative i under the criteria j must be
rejected.
aggregation_criteria_ : tuple
Tuple where the j-nth element is the decision using the data of the
alternatives by the criteria j. (The matrix is ``amtx_criteria_[j]``,
the weight is ``wmtx_[j] ``, with maximize criteria.)
aggregation_mean_ : skcriteria.madm.Decision
Decision using the data of the
alternatives by the means. (The matrix is ``amtx_mean_``,
the weight is ``weights_mean_ ``, with maximize criteria.).
This is the real **result**, But is important to check:
- ``consensus_`` must be, True.
- ``ntest_reject_h0_`` must be False.
- ``rank_check_reject_h0_`` must be True.
rank_check_t_ : array or None
T-Test statistic of independence of the the points of the aggregation
function. In other words if some alternative A is different enough
to an alternative B.
rank_check_pval_ : array or None
T-Test P-Value statistic of independence of the the points of the
aggregation function.
rank_check_fdr_ : array or None
Value Benjamini-Yekutieli FDR using the ``alpha_rank`` value.
rank_check_results_ : array or None
``rank_check_pval_ < rank_check_fdr_``. True if the rank is affected
by the sampling.
"""
ntest = attr.ib()
ntest_kwargs = attr.ib()
alpha_norm = attr.ib()
alpha_rank = attr.ib()
climit = attr.ib()
N_ = attr.ib()
I_ = attr.ib()
J_ = attr.ib()
strict_preference_ = attr.ib()
consensus_ = attr.ib(repr=False)
ntest_reject_h0_ = attr.ib(repr=False)
rank_check_results_resume_ = attr.ib(repr=False)
wmtx_ = attr.ib(repr=False)
wsst_ = attr.ib(repr=False)
wssw_ = attr.ib(repr=False)
wssb_ = attr.ib(repr=False)
wssu_ = attr.ib(repr=False)
wivr_ = attr.ib(repr=False)
win_consensus_ = attr.ib(repr=False)
weights_mean_ = attr.ib(repr=False)
wntest_sts_ = attr.ib(repr=False)
wntest_pvals_ = attr.ib(repr=False)
wntest_reject_h0_ = attr.ib(repr=False)
amtx_criteria_ = attr.ib(repr=False)
asst_ = attr.ib(repr=False)
assw_ = attr.ib(repr=False)
assb_ = attr.ib(repr=False)
assu_ = attr.ib(repr=False)
aivr_ = attr.ib(repr=False)
ain_consensus_ = attr.ib(repr=False)
amtx_mean_ = attr.ib(repr=False)
antest_sts_ = attr.ib(repr=False)
antest_pvals_ = attr.ib(repr=False)
antest_reject_h0_ = attr.ib(repr=False)
aggregation_criteria_ = attr.ib(repr=False)
aggregation_mean_ = attr.ib(repr=False)
rank_check_t_ = attr.ib(repr=False)
rank_check_pval_ = attr.ib(repr=False)
rank_check_fdr_ = attr.ib(repr=False)
rank_check_results_ = attr.ib(repr=False)
plot = attr.ib(repr=False, init=False)
_template = jinja2.Template("""
<div class="drv-result" id="drv_result_{{ id }}">
<h5>DRV Results Resume</h5>
<style>
.red, .red code {color: #A52A2A}
.green, .green code {color: #008000}
</style>
<table>
<thead>
<th>Attribute</th>
<th>Value</th>
<tbody>
{% for n, v, cls in rows %}
<tr class={{ cls }}><th>{{ n|safe }}</th><td>{{ v }}</td><tr>
{% endfor %}
<tbody>
</table>
</div>
""")
def __attrs_post_init__(self):
object.__setattr__(self, "plot", plot.PlotProxy(self))
def _repr_html_(self):
if not hasattr(self, "_repr_html"):
cls = {True: "green", False: "red"}
rows = [
("Normal Test (<code>ntest<code>)", self.ntest, ""),
(
"Alpha for Normal Test (<code>alpha_norm</code>)",
self.alpha_norm, ""),
(
"Alpha for T-Test (<code>alpha_rank</code>)",
self.alpha_rank, ""),
("Consensus Limit (<code>climit</code>)", self.climit, ""),
("Number of Participants (<code>N_</code>)", self.N_, ""),
("Number of Alternatives (<code>I_</code>)", self.I_, ""),
("Number of Criteria (<code>J_</code>)", self.J_, ""),
(
"Strict Preference (<code>strict_preference_</code>)",
self.strict_preference_, cls[self.strict_preference_]),
(
"Consensus (<code>consensus</code>)",
self.consensus_, cls[self.consensus_]),
(
"N-Test Reject H0 (<code>ntest_reject_h0_</code>)",
self.ntest_reject_h0_, cls[not self.ntest_reject_h0_]),
(
"Rank Check (<code>rank_check_results_resume_</code>)",
self.rank_check_results_resume_,
cls[self.rank_check_results_resume_])
]
repr_html = self._template.render(rows=rows, id=id(self))
object.__setattr__(self, "_repr_html", repr_html)
return self._repr_html
@property
def has_weights_(self):
"""True if the weight preference if provided so all the
weight sub-problem attributes are available; otherwise the attributes
are setted to None
"""
return self.weights_mean_ is not None
@property
def data_(self):
"""Data object used in the aggregation mean or None"""
if self.aggregation_mean_ is not None:
return self.aggregation_mean_.data
# =============================================================================
# API
# =============================================================================
@attr.s(frozen=True)
class DRVProcess(object):
"""DRV Processes (Decision with Reduction of Variability).
DRV processes have been developed to support Group Decision
Making. They are applicable to the cases in which
all members of the group operate in the same organization and, therefore,
they must share organizational values, knowledge and preferences.
Assumes that it is necessary to generate agreement on the preferences of
group members [1]_.
Parameters
----------
climit : float, optional (default=.25)
Consensus limit. Maximum value of the IVR to asume that the solution
is stable.
The Stability is verified using the normality analysis of priorities
for each element of a subproblem, or by using the IVR
(Índice de Variabilidad Remanente, Remaining Variability Index)
``IVR <= climit`` are indicative of stability.
ntest : 'shapiro' or 'ks' (default='shapiro')
Normality-test. Test to check if the priorities established by group
members must have a random behavior, represented by Normal
Distribution. The values must be 'shapiro' for the Shapito-Wilk test
[2]_ or 'ks' for the Kolmogorov-Smirnov test for goodness of fit. [3]_
ntest_kwargs : dict or None, optional (default=None)
Parameters to the normal test function.
alpha_norm : float, optional (default=0.01)
significance. If the any p-value of n-test is less than ``alpha_norm``,
we reject the null hypothesis.
alpha_rank : float, optional (default=0.05)
significance. If the any FDR Benjamini-Yekutieli of the
rank_check_pval_ is less than ``alpha_rank``, we reject the null
hypothesis (two alternatives are not different enough).
njobs : int, optional (default=-1)
The number of jobs to run in parallel.
If -1, then the number of jobs is set to the number of cores.
For more information check
``joblib <https://pythonhosted.org/joblib/>``_ documentation.
agg_only_consensus : bool, optional (default=True)
Calculate the aggregation only when a consensus is achieved.
References
----------
.. [1] Zanazzi, J. L., Gomes, L. F. A. M., & Dimitroff, M. (2014).
Group decision making applied to preventive maintenance systems.
Pesquisa Operacional, 34(1), 91-105.
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Daniel, Wayne W. (1990). "Kolmogorov–Smirnov one-sample test".
Applied Nonparametric Statistics (2nd ed.). Boston: PWS-Kent.
pp. 319–330. ISBN 0-534-91976-6.
"""
climit: float = attr.ib(default=.25)
ntest: str = attr.ib(default="shapiro")
ntest_kwargs: dict = attr.ib(default=None)
alpha_norm: float = attr.ib(default=0.01)
alpha_rank: float = attr.ib(default=0.05)
njobs: int = attr.ib(default=-1)
agg_only_consensus: bool = attr.ib(default=True)
@climit.validator
def climit_check(self, attribute, value):
if not isinstance(value, float):
raise ValueError("'climit' value must be an instance of float")
elif value < 0 or value > 1:
raise ValueError("'climit' has to be >= 0 and <= 1")
@alpha_norm.validator
def alpha_norm_check(self, attribute, value):
if not isinstance(value, float):
raise ValueError("'alpha_norm' value must be an instance of float")
elif value < 0 or value > 1:
raise ValueError("'alpha_norm' has to be >= 0 and <= 1")
@alpha_rank.validator
def alpha_rank_check(self, attribute, value):
if not isinstance(value, float):
raise ValueError("'alpha_rank' value must be an instance of float")
elif value < 0 or value > 1:
raise ValueError("'alpha_rank' has to be >= 0 and <= 1")
@njobs.validator
def njobs_check(self, attribute, value):
if not isinstance(value, int):
raise ValueError("'njobs' must be an integer")
@ntest.validator
def ntest_check(self, attribute, value):
if value not in NORMAL_TESTS and not callable(value):
ntests = tuple(NORMAL_TESTS)
raise ValueError(f"'ntests' must be a callable or str in {ntests}")
@ntest_kwargs.validator
def ntest_kwargs_check(self, attribute, value):
if value is not None and not isinstance(value, dict):
raise ValueError("'ntest_kwargs' must be a dict or None")
def decide(self, abc: list, weights: np.ndarray = None) -> DRVResult:
"""Execute the DRV Processes.
Parameters
----------
abc : list of 2D array-like
Alternative by criteria list. Every element of the list
is a 2D array where the element $A_{ij}$ of the matrix $k$
represent the valoration of the participant $i$ of the
alternative $j$ by the criteria $k$.
weights : 2D array-like or None (default=None)
Weight valoration matrix. Where the element $W_{ik} represent
the valoration of the participant $i$ of the weight of the
criterion $k$. If is None, all the criteria has the same
weight.
Returns
-------
result : DRVResult
Resume of the entire DRV process. If the problem not achieve a
consensus (``result.consensus == False``) the aggregation phase
are not executed.
"""
# run the rdv
drv_result = drv(
weights,
abc,
ntest=self.ntest,
ntest_kwargs=self.ntest_kwargs,
climit=self.climit,
njobs=self.njobs,
alpha_norm=self.alpha_norm,
alpha_rank=self.alpha_rank,
agg_only_consensus=self.agg_only_consensus)
return DRVResult(
climit=self.climit,
ntest=self.ntest,
alpha_norm=self.alpha_norm,
alpha_rank=self.alpha_rank,
ntest_kwargs=self.ntest_kwargs,
**drv_result)
# =============================================================================
# MAIN
# =============================================================================
if __name__ == "__main__":
print(__doc__)
|
/sc-drv-0.2.1.tar.gz/sc-drv-0.2.1/sc_drv/method.py
| 0.733261 | 0.155751 |
method.py
|
pypi
|
# flake8: noqa
# =============================================================================
# DOCS
# =============================================================================
"""From Matplotlib documentation
- URL: https://matplotlib.org/gallery/images_contours_and_fields/image_annotated_heatmap.html
- License: https://matplotlib.org/users/license.html
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# =============================================================================
# FUNCTIONS
# =============================================================================
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Arguments:
im : The AxesImage to be labeled.
Optional arguments:
data : Data used to annotate. If None, the image's data is used.
valfmt : The format of the annotations inside the heatmap.
This should either use the string format method, e.g.
"$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`.
textcolors : A list or array of two color specifications. The first is
used for values below a threshold, the second for those
above.
threshold : Value in data units according to which the colors from
textcolors are applied. If None (the default) uses the
middle of the colormap as separation.
Further arguments are passed on to the created text labels.
"""
if len(textcolors) != 2:
raise ValueError("Text color must be only 2")
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
text_color = textcolors[int(im.norm(data[i, j]) > threshold)]
kw.update(color=text_color)
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
|
/sc-drv-0.2.1.tar.gz/sc-drv-0.2.1/sc_drv/libs/heatmap.py
| 0.877608 | 0.712757 |
heatmap.py
|
pypi
|
# Battle Cats Save File Editor
[](https://ko-fi.com/M4M53M4MN)
A python save editor for the mobile game The Battle Cats
Join the [discord server](https://discord.gg/DvmMgvn5ZB) if you want to suggest new features, report bugs or get help on how to use the editor (please read the below tutorials / watch the latest [tutorial video](https://www.youtube.com/watch?v=Kr6VaLTXOSY) first before asking for help).
## Thanks to
Lethal's editor for giving me inspiration to start the project and it helped me work out how to patch the save data and edit cf/xp: <https://www.reddit.com/r/BattleCatsCheats/comments/djehhn/editoren/>
Beeven and csehydrogen's open source code, which helped me figure out how to patch save data: [beeven/battlecats](https://github.com/beeven/battlecats), [csehydrogen/BattleCatsHacker](https://github.com/csehydrogen/BattleCatsHacker)
Everyone who's given me saves, which helped to test save parsing/serialising and to test/develop new features
## How to use
If you have a pc: watch a [Tutorial video](https://www.youtube.com/watch?v=Kr6VaLTXOSY), or scroll down for a text tutorial
If you only have an android device: read the [Android text tutorial](https://github.com/fieryhenry/BCSFE-Python#android-tutorial)
If you only have an ios device: watch the [IOS tutorial video](https://www.youtube.com/watch?v=xw-uOqQRYJ8) (Made by Viarules)
## Main tutorial
You no longer need a rooted device nor a rooted android emulator.
Although if you want to get unbanned / fix the elsewhere error you will still need one. I recommend LDPlayer, Nox, or MEmu if needed. Bluestacks is also an option but is more difficult to root as it doesn't have a built in option.
---
1. Install python (You'll need version 3.9 and up) <https://www.python.org/downloads/>
2. Enter the command: `py -m pip install -U battle-cats-save-editor` into command prompt or another terminal to install the editor (**NOT the Windows Python app**). If that doesn't work then use `python3` or `python` instead of `py` in the command
3. Enter the command: `py -m BCSFE_Python` to run the editor. If that doesn't work then use `python3` or `python` instead of `py` in the command
4. Look below for the tutorial that you need, or watch [here](https://www.youtube.com/watch?v=Kr6VaLTXOSY) for a video
#### Using Transfer Codes
If you don't have a rooted device or an emulator setup then do this:
5. Go into the game and look in the top right of the screen and record / remember the game version
6. Go into the in-game transfer system in `Settings-> Data Transfer` and click `Begin Data Transfer`
7. In the editor use the option called `Download save data from the game using transfer and confirmation codes` (enter the corresponding number, not the name itself)
8. Enter the game version that you are using, `en`=english, `kr`=korean, `ja`=japanese, `tw`=taiwan.
9. Enter your transfer code
10. Enter your confirmation code
11. Enter the game version that you recorded earlier in step 5. If you entered everything in correctly it should work and you should be able to select a place to put the save
12. If you get a parsing error please join the [discord server](https://discord.gg/DvmMgvn5ZB) and report it in #bug-reports and / or dm me your save file (preferably <b>not</b> transfer codes)
13. Edit what you want
14. Go into the `Save Management` option and select `Save changes and upload to game servers (get transfer and confirmation codes)`. It may take some time
15. Enter those codes into the game's transfer system (click on `Resume Data Transfer`) (You may need to `Cancel Data Transfer` in-game before doing so)
16. If you press play you may get a `The current Save Data is in violation` message, if so press ok and try again and it should go away, if it doesn't look at the tutorial below
#### Using a rooted device
If you can't upload your save data using the in-game system because your are banned or the `This save data is currently active elsewhere` message appears, you will need direct access to the save data:
If you don't have a rooted device:
5. You will need to get one of the emulators listed earlier, I recommend LD Player because I know that it works with this method. If you change the default install location, make sure to keep a note of it for it later
1. Enable `root permission` in the settings and under `ADB Debugging` select `Open local connection`. You will need to restart LD Player for the changes to work
2. Open the editor and select the option named `Use adb to pull the save from a rooted device` and enter your game version
6. If you get the option to add adb to your path, select enter `y`.
7. The editor will look for adb in default install directories of common emulators and add it automatically
8. If it fails, then you will need to either
1. Enter the path to your emulator's install directory, it might look like `C:\LDPlayer\LDPlayer4.0`
2. Download adb with from [here](https://dl.google.com/android/repository/platform-tools-latest-windows.zip). Extract the zip and copy the folder path (not adb.exe itself) into the editor
9. Now rerun the editor and try the option again. If it still doesn't work you'll need to manually do it, using the tutorial below.
10. If you get a parsing issue please join the [discord server](https://discord.gg/DvmMgvn5ZB) and report it in #bug-reports and / or dm me your save file (preferably not transfer codes)
11. Edit what you want
12. Go into save management and select an option to push save data to the game
13. Enter the game and you should see changes
### Put adb in path
To use the options in the editor to get and push your save data to the game, you will need to have adb in your path system environment variable. The editor will try to do this automatically, but it may not work. So do this if it doesn't (If you're not using windows look up how to do this):
1. If you are using an emulator: Go to your emulator's install directory, if you're
using LDPlayer it will most likely be in `C:/LDPlayer/LDPlayer4.0`.
Then find `adb` in that folder (other emulators might have it in the `bin` directory)
2. If you aren't using an emulator [Download the Android SDK Platform Tools ZIP file for Windows](https://dl.google.com/android/repository/platform-tools-latest-windows.zip), and unzip it.
3. Copy the path to the folder that you are in (not adb.exe itself)
4. Then open the windows start menu and search: `edit the system environment variables` and press enter.
5. Then click on the `Environment Variables` button.
6. Then in the `System variables` box find the variable named `Path`, then
click on the `edit` button.
7. Then click `New` and paste the path into it.
8. Click `Ok` then `Ok` again then `Ok` again.
9. Relaunch powershell and maybe restart your whole pc, and try the command
again.
If this method is too difficult, just use a root file explorer instead
and manually get the files that you want. The path that you will need is: `/data/data/jp.co.ponos.battlecatsen/files/SAVE_DATA`
### How to fix "This save data is currently active elsewhere" or "The current Save Data is in violation"
1. You will need to get access to save data so you will need a rooted device / emulator, so look at the first part of the `Using a rooted device` tutorial.
2. Select the option in `Inquiry Code / Token` to `Fix elsewhere error / Unban account`
3. It may take some time but after, you should be able to choose one of the options in save management to push the save data to the game.
4. If you press play you may get a `The current Save Data is in violation` message, if so press ok and try again and it should go away, if it doesn't then either you've done something wrong or the process didn't work. You may need to follow the tutorial in the second part of the old help video [here](https://www.youtube.com/watch?v=xBnGR1A3A-U) (3:40) and use the `Old Fix elsewhere error / Unban account (needs 2 save files)` feature instead
### How to unban an account
You can get banned for editing in any amount of cat food, rare tickets, platinum tickets or legend tickets.
The way you fix it is the same method as the elsewhere fix, so just follow that.
##### How to prevent a ban in the future
- Instead of editing in platinum tickets use the `Platinum Shards` feature
- Instead of editing in rare tickets use the `Normal Ticket Max Trade Progress (allows for unbannable rare tickets)` feature
- Instead of hacking in cat food, just edit everything in that you can buy with cat food, e.g battle items, catamins, xp, energy refills (leaderships), etc. If you really want catfood then you can clear and unclear catnip missions with the feature `Catnip Challenges / Missions` then entering 1 when asked. You'll need to collect the catfood in-game after each clear though
- Instead of hacking in tickets, just hack in the cats/upgrades you want directly
## Android Tutorial
If you don't have a pc to install and run the editor you can use Termux.
1. Download [F-Droid](https://f-droid.org/F-Droid.apk) - You can download the Termux apk directly but then it won't automatically update
2. Install F-Droid
3. Open it and wait for it to finish `Updating repositories`
4. Tap the green search button in the bottom right and search for `Termux`
5. Tap `Termux Terminal emulator with packages`
6. Tap `INSTALL` and then `OPEN` once installed
7. Once opened enter the command `pkg install python`
8. If that doesn't work then read this: <https://stackoverflow.com/a/71097459>
9. Then run `python -m pip install -U battle-cats-save-editor`
10. If that doesn't work then run `pkg upgrade` and try again
11. Then run `python -m BCSFE_Python`
12. You can then use the editor like normal (If asked to enter the path to a save file, then just enter `SAVE_DATA`)
### Install from source
If you want the latest features and don't mind bugs then you can install the editor from the github.
1. Download [Git](https://git-scm.com/downloads)
2. Run the following commands: (You may have to replace `py` with `python` or `python3`)
```batch
git clone https://github.com/fieryhenry/BCSFE-Python.git
py -m pip install -e BCSFE-Python/
py -m BCSFE_Python
```
If you want to use the editor again all you need to do is run the `py -m BCSFE_Python` command
Then if you want the latest changes you only need to run `git pull` in the downloaded `BCSFE-Python` folder. (use `cd` to change the folder)
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/README.md
| 0.551815 | 0.735024 |
README.md
|
pypi
|
import struct
from typing import Any, Union
import dateutil.parser
from . import helper, parse_save
def write(
save_data: list[int],
number: Union[dict[str, int], int],
length: Union[int, None] = None,
) -> list[int]:
"""Writes a little endian number to the save data"""
if length is None and isinstance(number, dict):
length = number["Length"]
if isinstance(number, dict):
number = number["Value"]
if length is None:
raise ValueError("Length is None")
number = int(number)
data = list(helper.num_to_bytes(number, length))
save_data += data
return save_data
def create_list_separated(data: list[int], length: int) -> list[int]:
"""Creates a list of bytes from a list of numbers"""
lst: list[int] = []
for item in data:
byte_data = list(helper.num_to_bytes(item, length))
lst += byte_data
return lst
def create_list_double(data: list[float]) -> list[int]:
"""Creates a list of bytes from a list of doubles"""
lst: list[int] = []
for item in data:
byte_data = list(struct.pack("d", item))
lst += byte_data
return lst
def write_length_data(
save_data: list[int],
data: Union[list[int], dict[str, list[int]]],
length_bytes: int = 4,
bytes_per_val: int = 4,
write_length: bool = True,
length: Union[int, None] = None,
) -> list[int]:
"""Writes a list of ints to the save data"""
if write_length is False and length is None:
length = len(data)
if isinstance(data, dict):
data = data["Value"]
if write_length:
if length is None:
length = len(data)
length_data = list(helper.num_to_bytes(length, length_bytes))
save_data += length_data
save_data += create_list_separated(data, bytes_per_val)
return save_data
def write_length_doubles(
save_data: list[int],
data: Union[list[float], dict[str, list[float]]],
length_bytes: int = 4,
write_length: bool = True,
length: Union[None, int] = None,
) -> list[int]:
"""Writes a list of doubles to the save data"""
if write_length is False and length is None:
length = len(data)
if isinstance(data, dict):
data = data["Value"]
if write_length:
if length is None:
length = len(data)
length_data = list(helper.num_to_bytes(length, length_bytes))
save_data += length_data
save_data += create_list_double(data)
return save_data
def serialise_time_data_skip(
save_data: list[int],
time_data: str,
time_stamp: float,
dst_flag: bool,
duplicate: dict[str, Any],
dst: int = 0,
) -> list[int]:
time = dateutil.parser.parse(time_data)
save_data = write(save_data, time.year, 4)
save_data = write(save_data, duplicate["yy"], 4)
save_data = write(save_data, time.month, 4)
save_data = write(save_data, duplicate["mm"], 4)
save_data = write(save_data, time.day, 4)
save_data = write(save_data, duplicate["dd"], 4)
save_data = write_double(save_data, time_stamp)
save_data = write(save_data, time.hour, 4)
save_data = write(save_data, time.minute, 4)
save_data = write(save_data, time.second, 4)
if dst_flag:
save_data = write(save_data, dst, 1)
return save_data
def serialise_time_data(
save_data: list[int], time: str, dst_flag: bool, dst: int = 0
) -> list[int]:
time_d = dateutil.parser.parse(time)
if dst_flag:
save_data = write(save_data, dst, 1)
save_data = write(save_data, time_d.year, 4)
save_data = write(save_data, time_d.month, 4)
save_data = write(save_data, time_d.day, 4)
save_data = write(save_data, time_d.hour, 4)
save_data = write(save_data, time_d.minute, 4)
save_data = write(save_data, time_d.second, 4)
return save_data
def serialise_equip_slots(
save_data: list[int], equip_slots: list[list[int]]
) -> list[int]:
save_data = write(save_data, len(equip_slots), 1)
for slot in equip_slots:
save_data = write_length_data(save_data, slot, 0, 4, False)
return save_data
def serialise_main_story(
save_data: list[int], story_chapters: dict[str, list[Any]]
) -> list[int]:
save_data = write_length_data(
save_data, story_chapters["Chapter Progress"], write_length=False
)
for chapter in story_chapters["Times Cleared"]:
save_data = write_length_data(save_data, chapter, write_length=False)
return save_data
def serialise_treasures(save_data: list[int], treasures: list[list[int]]) -> list[int]:
for chapter in treasures:
save_data = write_length_data(save_data, chapter, write_length=False)
return save_data
def serialise_cat_upgrades(
save_data: list[int], cat_upgrades: dict[str, list[int]]
) -> list[int]:
data: list[int] = []
length = len(cat_upgrades["Base"])
for cat_id in range(length):
data.append(cat_upgrades["Plus"][cat_id])
data.append(cat_upgrades["Base"][cat_id])
write_length_data(save_data, data, 4, 2, True, length)
return save_data
def serialise_blue_upgrades(
save_data: list[int], blue_upgrades: dict[str, list[int]]
) -> list[int]:
data: list[int] = []
length = len(blue_upgrades["Base"])
for blue_id in range(length):
data.append(blue_upgrades["Plus"][blue_id])
data.append(blue_upgrades["Base"][blue_id])
write_length_data(save_data, data, 4, 2, False)
return save_data
def serialise_utf8_string(
save_data: list[int],
string: Union[dict[str, str], str],
length_bytes: int = 4,
write_length: bool = True,
length: Union[int, None] = None,
) -> list[int]:
"""Writes a string to the save data"""
if isinstance(string, dict):
string = string["Value"]
data = list(string.encode("utf-8"))
save_data = write_length_data(
save_data, data, length_bytes, 1, write_length, length
)
return save_data
def serialise_event_stages_current(
save_data: list[int], event_current: dict[str, Any]
) -> list[int]:
unknown_val = event_current["unknown"]
total_sub_chapters = event_current["total"] // unknown_val
stars_per_sub_chapter = event_current["stars"]
stages_per_sub_chapter = event_current["stages"]
save_data = write(save_data, unknown_val, 1)
save_data = write(save_data, total_sub_chapters, 2)
save_data = write(save_data, stars_per_sub_chapter, 1)
save_data = write(save_data, stages_per_sub_chapter, 1)
for i in range(len(event_current["Clear"])):
save_data = write_length_data(save_data, event_current["Clear"][i], 1, 1, False)
return save_data
def flatten_list(_2d_list: Union[list[list[Any]], list[Any]]) -> list[Any]:
flat_list: list[Any] = []
# Iterate through the outer list
for element in _2d_list:
if isinstance(element, list):
# If the element is of type list, iterate through the sublist
for item in element:
flat_list.append(item)
else:
flat_list.append(element)
return flat_list
def serialise_event_stages(
save_data: list[int], event_stages: dict[str, Any]
) -> list[int]:
lengths = event_stages["Lengths"]
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
for chapter in event_stages["Value"]["clear_progress"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
clear_amount = [0] * total * stars * stages
clear_amount_data = event_stages["Value"]["clear_amount"]
for i in range(total):
for j in range(stages):
for k in range(stars):
clear_amount[i * stages * stars + j * stars + k] = clear_amount_data[i][
k
][j]
save_data = write_length_data(save_data, clear_amount, 4, 2, False)
for chapter in event_stages["Value"]["unlock_next"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
return save_data
def serialse_purchase_receipts(save_data: list[int], data: dict[Any, Any]) -> list[int]:
save_data = write(save_data, len(data), 4)
for item in data:
save_data = write(save_data, item["unknown_4"], 4)
save_data = write(save_data, len(item["item_packs"]), 4)
for string_dict in item["item_packs"]:
save_data = serialise_utf8_string(save_data, string_dict["Value"])
save_data = write(save_data, string_dict["unknown_1"], 1)
return save_data
def serialise_dumped_data(
save_data: list[int], data: list[dict[str, int]]
) -> list[int]:
for item in data:
save_data = write(save_data, item)
return save_data
def serialise_outbreaks(save_data: list[int], outbreaks: dict[Any, Any]) -> list[int]:
save_data = write(save_data, len(outbreaks), 4)
for chapter_id in outbreaks:
save_data = write(save_data, int(chapter_id), 4)
save_data = write(save_data, len(outbreaks[chapter_id]), 4)
for level_id in outbreaks[chapter_id]:
save_data = write(save_data, level_id, 4)
save_data = write(save_data, outbreaks[chapter_id][level_id], 1)
return save_data
def serialise_ototo_cat_cannon(
save_data: list[int], ototo_cannon: dict[int, Any]
) -> list[int]:
save_data = write(save_data, len(ototo_cannon), 4)
for cannon_id in ototo_cannon:
cannon = ototo_cannon[cannon_id]
save_data = write(save_data, int(cannon_id), 4)
save_data = write(save_data, cannon["len_val"], 4)
save_data = write(save_data, cannon["unlock_flag"], 4)
levels = cannon["levels"]
save_data = write(save_data, levels["effect"], 4)
if cannon["len_val"] == 4:
save_data = write(save_data, levels["foundation"], 4)
save_data = write(save_data, levels["style"], 4)
return save_data
def serialise_uncanny_current(
save_data: list[int], uncanny_current: dict[str, Any]
) -> list[int]:
total_sub_chapters = uncanny_current["total"]
stars_per_sub_chapter = uncanny_current["stars"]
stages_per_sub_chapter = uncanny_current["stages"]
save_data = write(save_data, total_sub_chapters, 4)
save_data = write(save_data, stages_per_sub_chapter, 4)
save_data = write(save_data, stars_per_sub_chapter, 4)
for i in range(len(uncanny_current["Clear"])):
save_data = write_length_data(
save_data, uncanny_current["Clear"][i], 4, 4, False
)
return save_data
def serialise_event_timed_scores(
save_data: list[int], timed_scores: dict[str, Any]
) -> list[int]:
total_sub_chapters = timed_scores["total"]
stars_per_sub_chapter = timed_scores["stars"]
stages_per_sub_chapter = timed_scores["stages"]
save_data = write(save_data, total_sub_chapters, 4)
save_data = write(save_data, stages_per_sub_chapter, 4)
save_data = write(save_data, stars_per_sub_chapter, 4)
for i in range(len(timed_scores["Score"])):
save_data = write_length_data(save_data, timed_scores["Score"][i], 4, 4, False)
return save_data
def serialise_uncanny_progress(
save_data: list[int], uncanny: dict[str, Any]
) -> list[int]:
lengths = uncanny["Lengths"]
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
for chapter in uncanny["Value"]["clear_progress"]:
save_data = write_length_data(save_data, chapter, 4, 4, False)
clear_amount = [0] * total * stars * stages
clear_amount_data = uncanny["Value"]["clear_amount"]
for i in range(total):
for j in range(stages):
for k in range(stars):
clear_amount[i * stages * stars + j * stars + k] = clear_amount_data[i][
k
][j]
save_data = write_length_data(save_data, clear_amount, 4, 4, False)
for chapter in uncanny["Value"]["unlock_next"]:
save_data = write_length_data(save_data, chapter, 4, 4, False)
return save_data
def serialise_talent_data(save_data: list[int], talents: dict[str, Any]) -> list[int]:
save_data = write(save_data, len(talents), 4)
for cat_id in talents:
cat_talent_data = talents[cat_id]
save_data = write(save_data, int(cat_id), 4)
save_data = write(save_data, len(cat_talent_data), 4)
for talent in cat_talent_data:
save_data = write(save_data, talent["id"], 4)
save_data = write(save_data, talent["level"], 4)
return save_data
def serialise_gauntlet_current(
save_data: list[int], gauntlet_current: dict[str, Any]
) -> list[int]:
save_data = write(save_data, gauntlet_current["total"], 2)
save_data = write(save_data, gauntlet_current["stages"], 1)
save_data = write(save_data, gauntlet_current["stars"], 1)
for i in range(len(gauntlet_current["Clear"])):
save_data = write_length_data(
save_data, gauntlet_current["Clear"][i], 1, 1, False
)
return save_data
def serialise_gauntlet_progress(
save_data: list[int], gauntlets: dict[str, Any]
) -> list[int]:
lengths = gauntlets["Lengths"]
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
for chapter in gauntlets["Value"]["clear_progress"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
clear_amount = [0] * total * stars * stages
clear_amount_data = gauntlets["Value"]["clear_amount"]
for i in range(total):
for j in range(stages):
for k in range(stars):
clear_amount[i * stages * stars + j * stars + k] = clear_amount_data[i][
k
][j]
save_data = write_length_data(save_data, clear_amount, 4, 2, False)
for chapter in gauntlets["Value"]["unlock_next"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
return save_data
def serialise_legend_quest_current(
save_data: list[int], legend_quest_current: dict[str, Any]
) -> list[int]:
save_data = write(save_data, legend_quest_current["total"], 1)
save_data = write(save_data, legend_quest_current["stages"], 1)
save_data = write(save_data, legend_quest_current["stars"], 1)
for i in range(len(legend_quest_current["Clear"])):
save_data = write_length_data(
save_data, legend_quest_current["Clear"][i], 1, 1, False
)
return save_data
def serialise_legend_quest_progress(
save_data: list[int], legend_quests: dict[str, Any]
) -> list[int]:
lengths = legend_quests["Lengths"]
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
for chapter in legend_quests["Value"]["clear_progress"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
clear_amount = [0] * total * stars * stages
clear_amount_data = legend_quests["Value"]["clear_amount"]
for i in range(total):
for j in range(stages):
for k in range(stars):
clear_amount[i * stages * stars + j * stars + k] = clear_amount_data[i][
k
][j]
tries = [0] * total * stars * stages
tries_data = legend_quests["Value"]["tries"]
for i in range(total):
for j in range(stages):
for k in range(stars):
tries[i * stages * stars + j * stars + k] = tries_data[i][k][j]
save_data = write_length_data(save_data, clear_amount, 4, 2, False)
save_data = write_length_data(save_data, tries, 4, 2, False)
for chapter in legend_quests["Value"]["unlock_next"]:
save_data = write_length_data(save_data, chapter, 1, 1, False)
return save_data
def serialise_talent_orbs(
save_data: list[int], talent_orbs: dict[str, int], game_verison: dict[str, int]
) -> list[int]:
save_data = write(save_data, len(talent_orbs), 2)
for orb_id in talent_orbs:
save_data = write(save_data, int(orb_id), 2)
if game_verison["Value"] < 110400:
save_data = write(save_data, talent_orbs[orb_id], 1)
else:
save_data = write(save_data, talent_orbs[orb_id], 2)
return save_data
def serialise_aku(save_data: list[int], aku: dict[str, Any]) -> list[int]:
lengths = aku["Lengths"]
save_data = write(save_data, lengths["total"], 2)
save_data = write(save_data, lengths["stages"], 1)
save_data = write(save_data, lengths["stars"], 1)
save_data = serialise_gauntlet_progress(save_data, aku)
return save_data
def serialise_tower(save_data: list[int], tower: dict[str, Any]) -> list[int]:
save_data = write(save_data, tower["current"]["total"], 4)
save_data = write(save_data, tower["current"]["stars"], 4)
for i in range(len(tower["current"]["selected"])):
save_data = write_length_data(
save_data, tower["current"]["selected"][i], 4, 4, False
)
save_data = write(save_data, tower["progress"]["total"], 4)
save_data = write(save_data, tower["progress"]["stars"], 4)
for i in range(len(tower["progress"]["clear_progress"])):
save_data = write_length_data(
save_data, tower["progress"]["clear_progress"][i], 4, 4, False
)
total = tower["progress"]["total"]
stages = tower["progress"]["stages"]
stars = tower["progress"]["stars"]
save_data = write(save_data, total, 4)
save_data = write(save_data, stages, 4)
save_data = write(save_data, stars, 4)
clear_amount = [0] * total * stars * stages
clear_amount_data = tower["progress"]["clear_amount"]
for i in range(total):
for j in range(stages):
for k in range(stars):
clear_amount[i * stages * stars + j * stars + k] = clear_amount_data[i][
k
][j]
save_data = write_length_data(save_data, clear_amount, 4, 4, False)
save_data = serialise_dumped_data(save_data, tower["data"])
return save_data
def exit_serialiser(save_data: list[int], save_stats: dict[str, Any]) -> list[int]:
return serialise_utf8_string(save_data, save_stats["hash"], write_length=False)
def check_gv(
save_data: list[int], save_stats: dict[str, Any], game_version: int
) -> dict[str, Any]:
if save_stats["game_version"]["Value"] < game_version:
save_data = exit_serialiser(save_data, save_stats)
return {"save_data": save_data, "exit": True}
else:
return {"save_data": save_data, "exit": False}
def serialise_medals(save_data: list[int], medals: dict[str, Any]) -> list[int]:
save_data = write_length_data(save_data, medals["medal_data_1"], 2, 2)
medal_data_2 = medals["medal_data_2"]
save_data = write(save_data, len(medal_data_2), 2)
for medal_id in medal_data_2:
save_data = write(save_data, medal_id, 2)
save_data = write(save_data, medal_data_2[medal_id], 1)
return save_data
def serialise_play_time(save_data: list[int], play_time: dict[str, Any]) -> list[int]:
frames = helper.time_to_frames(play_time)
save_data = write(save_data, frames, 4)
return save_data
def serialise_mission_segment(save_data: list[int], data: dict[int, Any]) -> list[int]:
save_data = write(save_data, len(data), 4)
for mission in data:
save_data = write(save_data, mission, 4)
save_data = write(save_data, data[mission], 4)
return save_data
def serialise_missions(
save_data: list[int], missions_data: dict[str, Any]
) -> list[int]:
save_data = serialise_mission_segment(save_data, missions_data["states"])
save_data = serialise_mission_segment(save_data, missions_data["requirements"])
save_data = serialise_mission_segment(save_data, missions_data["clear_types"])
save_data = serialise_mission_segment(save_data, missions_data["gamatoto"])
save_data = serialise_mission_segment(save_data, missions_data["nyancombo"])
save_data = serialise_mission_segment(save_data, missions_data["user_rank"])
save_data = serialise_mission_segment(save_data, missions_data["expiry"])
save_data = serialise_mission_segment(save_data, missions_data["preparing"])
return save_data
def serialise_dojo(save_data: list[int], dojo_data: dict[int, Any]) -> list[int]:
save_data = write(save_data, len(dojo_data), 4)
for subchapter_id in dojo_data:
subchapter_data = dojo_data[subchapter_id]
save_data = write(save_data, subchapter_id, 4)
save_data = write(save_data, len(subchapter_data), 4)
for stage_id in subchapter_data:
score = subchapter_data[stage_id]
save_data = write(save_data, stage_id, 4)
save_data = write(save_data, score, 4)
return save_data
def write_double(save_data: list[int], number: float) -> list[int]:
"""Writes a double to the save data"""
if isinstance(number, dict):
number = number["Value"]
number = float(number)
data = struct.pack("d", number)
save_data += data
return save_data
def start_serialize(save_stats: dict[str, Any]) -> bytes:
"""Starts the serialisation process"""
try:
save_data = serialize_save(save_stats)
except Exception as e: # pylint: disable=broad-except
helper.colored_text(
"\nError: An error has occurred while serializing your save data:",
base=helper.RED,
)
game_version = save_stats["game_version"]["Value"]
if game_version < 110000:
helper.colored_text(
f"\nThis save is from before &11.0.0& (current save version is &{helper.gv_to_str(game_version)}&), so this is likely the cause for the issue. &The save editor is not designed to work with saves from before 11.0.0&"
)
raise e
return save_data
def serialise_gold_pass(save_data: list[int], gold_pass: dict[str, Any]) -> list[int]:
"""Serialises the gold pass data"""
save_data = write(save_data, gold_pass["officer_id"])
save_data = write(save_data, gold_pass["renewal_times"])
save_data = write_double(save_data, gold_pass["start_date"])
save_data = write_double(save_data, gold_pass["expiry_date"])
save_data = write_length_doubles(
save_data, gold_pass["unknown_2"], write_length=False
)
save_data = write_double(save_data, gold_pass["start_date_2"])
save_data = write_double(save_data, gold_pass["expiry_date_2"])
save_data = write_double(save_data, gold_pass["unknown_3"])
save_data = write(save_data, gold_pass["flag_2"])
save_data = write_double(save_data, gold_pass["expiry_date_3"])
save_data = write(save_data, len(gold_pass["claimed_rewards"]), 4)
for item_id, amount in gold_pass["claimed_rewards"].items():
save_data = write(save_data, item_id, 4)
save_data = write(save_data, amount, 4)
save_data = write(save_data, gold_pass["unknown_4"])
save_data = write(save_data, gold_pass["unknown_5"])
save_data = write(save_data, gold_pass["unknown_6"])
return save_data
def serialise_unlock_popups(
save_data: list[int],
unlock_popups: list[tuple[int, int]],
unknown_118: dict[str, int],
):
"""Serialises the unlock popups"""
save_data = write(save_data, len(unlock_popups), 4)
save_data = write(save_data, unknown_118)
for popup_id in unlock_popups:
save_data = write(save_data, popup_id[1], 1)
save_data = write(save_data, popup_id[0], 4)
return save_data
def serialise_cleared_slots(
save_data: list[int], cleared_slots: dict[str, Any]
) -> list[int]:
"""
Serialises the cleared slots
Args:
save_data (list[int]): The save data
cleared_slots (dict[str, Any]): The cleared slots
Returns:
list[int]: The save data
"""
cleared_slot_data = parse_save.ClearedSlots.from_dict(cleared_slots)
save_data = write(save_data, len(cleared_slot_data.slots), 2)
for slot in cleared_slot_data.slots:
save_data = write(save_data, slot.slot_index, 2)
for cat in slot.cats:
save_data = write(save_data, cat.cat_id, 2)
save_data = write(save_data, cat.cat_form, 1)
save_data = write(save_data, slot.separator, 3)
save_data = write(save_data, cleared_slot_data.end_index, 2)
for stages_slot in cleared_slot_data.slot_stages:
save_data = write(save_data, stages_slot.slot_index, 2)
save_data = write(save_data, len(stages_slot.stages), 2)
for stage in stages_slot.stages:
save_data = write(save_data, stage.stage_id, 4)
return save_data
def serialise_enigma_data(save_data: list[int], enigma_data: dict[str, Any]):
"""
Serialises the enigma data
Args:
save_data (list[int]): The save data
enigma_data (dict[str, Any]): The enigma data
"""
save_data = write(save_data, enigma_data["energy_since_1"], 4)
save_data = write(save_data, enigma_data["energy_since_2"], 4)
save_data = write(save_data, enigma_data["enigma_level"], 1)
save_data = write(save_data, enigma_data["unknown_2"], 1)
save_data = write(save_data, enigma_data["unknown_3"], 1)
save_data = write(save_data, len(enigma_data["stages"]), 1)
for stage in enigma_data["stages"]:
save_data = write(save_data, stage["level"], 4)
save_data = write(save_data, stage["stage_id"], 4)
save_data = write(save_data, stage["decoding_status"], 1)
save_data = write_double(save_data, stage["start_time"])
return save_data
def serialise_cat_shrine(
save_data: list[int], shrine_data: dict[str, Any]
) -> list[int]:
"""
Serialises the cat shrine data
Args:
save_data (list[int]): The save data
shrine_data (dict[str, Any]): The shrine data
Returns:
list[int]: The save data
"""
save_data = write_double(save_data, shrine_data["stamp_1"])
save_data = write_double(save_data, shrine_data["stamp_2"])
save_data = write(save_data, shrine_data["shrine_gone"], 1)
save_data = write_length_data(save_data, shrine_data["flags"], 1, 1)
save_data = write(save_data, shrine_data["xp_offering"], 4)
return save_data
def write_variable_length_int(save_data: list[int], i: int) -> list[int]:
"""
Writes a variable length integer to the save data (I have no idea how this works and what this does)
Args:
save_data (list[int]): The save data
i (int): The integer to write
Returns:
list[int]: The save data
"""
i_2 = 0
i_3 = 0
i = int(i)
while i >= 128:
i_2 |= ((i & 127) | 32768) << (i_3 * 8)
i_3 += 1
i >>= 7
i_4 = i_2 | (i << (i_3 * 8))
i_5 = i_3 + 1
for i_6 in range(i_5):
i_7 = (i_4 >> (((i_5 - i_6) - 1) * 8)) & 255
save_data = write(save_data, i_7, 1)
return save_data
def set_variable_data(
save_data: list[int], data: tuple[dict[int, int], dict[int, int]]
) -> list[int]:
"""
Sets the variable data
Args:
save_data (list[int]): The save data
data (tuple[dict[int, int], dict[int, int]]): The variable data
Returns:
list[int]: The save data
"""
save_data = write_variable_length_int(save_data, len(data[0]))
for key, value in data[0].items():
save_data = write_variable_length_int(save_data, key)
save_data = write_variable_length_int(save_data, value)
save_data = write_variable_length_int(save_data, len(data[1]))
for key, value in data[1].items():
save_data = write_variable_length_int(save_data, key)
save_data = write(save_data, value, 1)
return save_data
def serialise_login_bonuses(save_data: list[int], login_bonuses: dict[int, int]):
"""
Serialises the login bonuses
Args:
save_data (list[int]): The save data
login_bonuses (dict[int, int]): The login bonuses
"""
save_data = write(save_data, len(login_bonuses), 4)
for key, value in login_bonuses.items():
save_data = write(save_data, key, 4)
save_data = write(save_data, value, 4)
return save_data
def serialise_tower_item_obtained(save_data: list[int], data: list[list[bool]]):
"""
Serialises the tower item obtained data
Args:
save_data (list[int]): The save data
data (list[list[bool]]): The tower item obtained data
"""
save_data = write(save_data, len(data), 4)
save_data = write(save_data, len(data[0]), 4)
for row in data:
for item in row:
save_data = write(save_data, item, 1)
return save_data
def write_dict(save_data: list[int], data: dict[Any, Any]) -> list[int]:
"""
Writes a dictionary to the save data
Args:
save_data (list[int]): The save data
data (dict[Any, Any]): The dictionary
Returns:
list[int]: The save data
"""
save_data = write(save_data, len(data), 4)
for key, value in data.items():
save_data = write(save_data, key, 4)
if isinstance(value, str):
save_data = serialise_utf8_string(save_data, value)
elif isinstance(value, bool):
save_data = write(save_data, value, 1)
else:
save_data = write(save_data, value, 4)
return save_data
def serialise_zero_legends(save_data: list[int], data: list[Any]):
"""
Serialises the zero legends data
Args:
save_data (list[int]): The save data
data (list[Any]): The zero legends data
"""
save_data = write(save_data, len(data), 2)
for chapter in data:
unknown_1 = chapter["unknown_1"]
save_data = write(save_data, unknown_1, 1)
save_data = write(save_data, len(chapter["stars"]), 1)
for star in chapter["stars"]:
selected_stage = star["selected_stage"]
stages_cleared = star["stages_cleared"]
unlock_next = star["unlock_next"]
save_data = write(save_data, selected_stage, 1)
save_data = write(save_data, stages_cleared, 1)
save_data = write(save_data, unlock_next, 1)
save_data = write(save_data, len(star["stages"]), 2)
for clear_amount in star["stages"]:
save_data = write(save_data, clear_amount, 2)
return save_data
def serialize_save(save_stats: dict[str, Any]) -> bytes:
"""Serialises the save stats"""
save_data: list[int] = []
save_data = write(save_data, save_stats["game_version"])
save_data = write(save_data, save_stats["unknown_1"])
save_data = write(save_data, save_stats["mute_music"])
save_data = write(save_data, save_stats["mute_sound_effects"])
save_data = write(save_data, save_stats["cat_food"])
save_data = write(save_data, save_stats["current_energy"])
if save_stats["extra_time_data"]:
if save_stats["extra_time_data"]["Value"] != 0:
save_data = write(save_data, save_stats["extra_time_data"])
save_data = serialise_time_data_skip(
save_data,
save_stats["time"],
save_stats["time_stamp"],
save_stats["dst"],
save_stats["duplicate_time"],
save_stats["dst_val"],
)
save_data = write_length_data(
save_data, save_stats["unknown_flags_1"], write_length=False
)
save_data = write(save_data, save_stats["upgrade_state"])
save_data = write(save_data, save_stats["xp"])
save_data = write(save_data, save_stats["tutorial_cleared"])
save_data = write_length_data(
save_data, save_stats["unknown_flags_2"], write_length=False
)
save_data = write(save_data, save_stats["unknown_flag_1"])
save_data = serialise_equip_slots(save_data, save_stats["slots"])
save_data = write(save_data, save_stats["cat_stamp_current"])
save_data = write_length_data(
save_data, save_stats["cat_stamp_collected"], write_length=False
)
save_data = write(save_data, save_stats["unknown_2"])
save_data = write(save_data, save_stats["daily_reward_flag"])
save_data = write_length_data(
save_data, save_stats["unknown_116"], write_length=False
)
save_data = serialise_main_story(save_data, save_stats["story_chapters"])
save_data = serialise_treasures(save_data, save_stats["treasures"])
save_data = write_length_data(save_data, save_stats["enemy_guide"])
save_data = write_length_data(save_data, save_stats["cats"])
save_data = serialise_cat_upgrades(save_data, save_stats["cat_upgrades"])
save_data = write_length_data(save_data, save_stats["current_forms"])
save_data = serialise_blue_upgrades(save_data, save_stats["blue_upgrades"])
save_data = write_length_data(save_data, save_stats["menu_unlocks"])
save_data = write_length_data(save_data, save_stats["new_dialogs_1"])
save_data = write_length_data(save_data, save_stats["battle_items"], 4, 4, False, 6)
save_data = write_length_data(save_data, save_stats["new_dialogs_2"])
save_data = write(save_data, save_stats["unknown_6"])
save_data = write_length_data(
save_data, save_stats["unknown_7"], write_length=False
)
save_data = write(save_data, save_stats["lock_item"])
save_data = write_length_data(save_data, save_stats["locked_items"], 1, 1, False, 6)
save_data = serialise_time_data(
save_data, save_stats["second_time"], save_stats["dst"], save_stats["dst_val"]
)
save_data = write_length_data(
save_data, save_stats["unknown_8"], write_length=False
)
save_data = serialise_time_data(
save_data, save_stats["third_time"], save_stats["dst"], save_stats["dst_val"]
)
save_data = write(save_data, save_stats["unknown_9"])
save_data = serialise_utf8_string(save_data, save_stats["thirty2_code"])
save_data = set_variable_data(save_data, save_stats["unknown_10"])
save_data = write_length_data(
save_data, save_stats["unknown_11"], write_length=False
)
save_data = write(save_data, save_stats["normal_tickets"])
save_data = write(save_data, save_stats["rare_tickets"])
save_data = write_length_data(save_data, save_stats["gatya_seen_cats"])
save_data = write_length_data(
save_data, save_stats["unknown_12"], write_length=False
)
if save_stats["cat_storage"]["len"]:
save_data = write(save_data, len(save_stats["cat_storage"]["ids"]), 2)
save_data = write_length_data(
save_data, save_stats["cat_storage"]["ids"], 2, 4, False
)
save_data = write_length_data(
save_data, save_stats["cat_storage"]["types"], 2, 4, False
)
save_data = serialise_event_stages_current(save_data, save_stats["event_current"])
save_data = serialise_event_stages(save_data, save_stats["event_stages"])
save_data = write_length_data(
save_data, save_stats["unknown_15"], write_length=False
)
save_data = write_length_data(save_data, save_stats["unit_drops"])
save_data = write(save_data, save_stats["rare_gacha_seed"])
save_data = write(save_data, save_stats["unknown_17"])
save_data = write(save_data, save_stats["unknown_18"])
save_data = serialise_time_data(
save_data, save_stats["fourth_time"], save_stats["dst"], save_stats["dst_val"]
)
save_data = write_length_data(save_data, save_stats["unknown_105"], 4, 4, False)
save_data = write_length_data(
save_data, save_stats["unknown_107"], write_length=False, bytes_per_val=1
)
if save_stats["dst"]:
save_data = serialise_utf8_string(save_data, save_stats["unknown_110"])
unknown_108 = helper.format_text(save_stats["unknown_108"])
save_data = write(save_data, len(unknown_108), 4)
for i in range(len(unknown_108)):
save_data = serialise_utf8_string(save_data, unknown_108[i])
if save_stats["dst"]:
save_data = write_length_doubles(
save_data, save_stats["time_stamps"], write_length=False
)
save_data = write(save_data, len(save_stats["unknown_112"]), 4)
for string in save_stats["unknown_112"]:
save_data = serialise_utf8_string(save_data, string)
save_data = write(save_data, save_stats["energy_notice"])
save_data = write(save_data, save_stats["game_version_2"])
save_data = write(save_data, save_stats["unknown_111"])
save_data = write(save_data, save_stats["unlocked_slots"])
save_data = write(save_data, save_stats["unknown_20"]["Length_1"], 4)
save_data = write(save_data, save_stats["unknown_20"]["Length_2"], 4)
save_data = write_length_data(
save_data, save_stats["unknown_20"], write_length=False
)
save_data = write_length_doubles(
save_data, save_stats["time_stamps_2"][:-1], write_length=False
)
save_data = write(save_data, save_stats["trade_progress"])
if save_stats["dst"]:
save_data = write_double(save_data, save_stats["time_stamps_2"][-1])
else:
save_data = write(save_data, save_stats["unknown_24"])
save_data = serialise_cat_upgrades(save_data, save_stats["catseye_related_data"])
save_data = write_length_data(
save_data, save_stats["unknown_22"], write_length=False
)
save_data = write_length_data(save_data, save_stats["user_rank_rewards"], 4, 1)
if not save_stats["dst"]:
save_data = write_double(save_data, save_stats["time_stamps_2"][-1])
save_data = write_length_data(save_data, save_stats["unlocked_forms"])
save_data = serialise_utf8_string(save_data, save_stats["transfer_code"])
save_data = serialise_utf8_string(save_data, save_stats["confirmation_code"])
save_data = write(save_data, save_stats["transfer_flag"])
lengths = save_stats["stage_data_related_1"]["Lengths"]
length = lengths[0] * lengths[1] * lengths[2]
save_data = write_length_data(save_data, lengths, write_length=False)
save_data = write_length_data(
save_data, save_stats["stage_data_related_1"], 4, 1, False, length
)
save_data = serialise_event_timed_scores(
save_data, save_stats["event_timed_scores"]
)
save_data = serialise_utf8_string(save_data, save_stats["inquiry_code"])
save_data = serialise_play_time(save_data, save_stats["play_time"])
save_data = write(save_data, save_stats["unknown_25"])
save_data = write(save_data, save_stats["backup_state"])
if save_stats["dst"]:
save_data = write(save_data, save_stats["unknown_119"])
save_data = write(save_data, save_stats["gv_44"])
save_data = write(save_data, save_stats["unknown_120"])
save_data = write_length_data(
save_data,
flatten_list(save_stats["itf_timed_scores"]),
4,
4,
write_length=False,
)
save_data = write(save_data, save_stats["unknown_27"])
save_data = write_length_data(save_data, save_stats["cat_related_data_1"])
save_data = write(save_data, save_stats["unknown_28"])
save_data = write(save_data, save_stats["gv_45"])
save_data = write(save_data, save_stats["gv_46"])
save_data = write(save_data, save_stats["unknown_29"])
save_data = write_length_data(save_data, save_stats["lucky_tickets_1"])
save_data = write_length_data(save_data, save_stats["unknown_32"])
save_data = write(save_data, save_stats["gv_47"])
save_data = write(save_data, save_stats["gv_48"])
if not save_stats["dst"]:
save_data = write(save_data, save_stats["energy_notice"])
save_data = write_double(save_data, save_stats["account_created_time_stamp"])
save_data = write_length_data(save_data, save_stats["unknown_35"])
save_data = write(save_data, save_stats["unknown_36"])
save_data = write(save_data, save_stats["user_rank_popups"])
save_data = write(save_data, save_stats["unknown_37"])
save_data = write(save_data, save_stats["gv_49"])
save_data = write(save_data, save_stats["gv_50"])
save_data = write(save_data, save_stats["gv_51"])
save_data = write_length_data(
save_data, save_stats["cat_guide_collected"], bytes_per_val=1
)
save_data = write(save_data, save_stats["gv_52"])
save_data = write_length_doubles(
save_data, save_stats["time_stamps_3"], write_length=False
)
save_data = write_length_data(save_data, save_stats["cat_fruit"])
save_data = write_length_data(save_data, save_stats["cat_related_data_3"])
save_data = write_length_data(save_data, save_stats["catseye_cat_data"])
save_data = write_length_data(save_data, save_stats["catseyes"])
save_data = write_length_data(save_data, save_stats["catamins"])
seconds = helper.time_to_seconds(save_stats["gamatoto_time_left"])
save_data = write_double(save_data, float(seconds))
save_data = write(save_data, save_stats["gamatoto_exclamation"])
save_data = write(save_data, save_stats["gamatoto_xp"])
save_data = write(save_data, save_stats["gamamtoto_destination"])
save_data = write(save_data, save_stats["gamatoto_recon_length"])
save_data = write(save_data, save_stats["unknown_43"])
save_data = write(save_data, save_stats["gamatoto_complete_notification"])
save_data = write_length_data(save_data, save_stats["unknown_44"], bytes_per_val=1)
save_data = write_length_data(
save_data, save_stats["unknown_45"], bytes_per_val=12 * 4
)
save_data = write(save_data, save_stats["gv_53"])
save_data = write_length_data(save_data, save_stats["helpers"])
save_data = write(save_data, save_stats["unknown_47"])
save_data = write(save_data, save_stats["gv_54"])
save_data = serialse_purchase_receipts(save_data, save_stats["purchases"])
save_data = write(save_data, save_stats["gv_54"])
save_data = write(save_data, save_stats["gamatoto_skin"])
save_data = write(save_data, save_stats["platinum_tickets"])
save_data = serialise_login_bonuses(save_data, save_stats["login_bonuses"])
save_data = write(save_data, save_stats["unknown_49"])
save_data = write_length_data(
save_data, save_stats["announcment"], write_length=False
)
save_data = write(save_data, save_stats["backup_counter"])
save_data = write_length_data(
save_data, save_stats["unknown_131"], write_length=False
)
save_data = write(save_data, save_stats["gv_55"])
save_data = write(save_data, save_stats["unknown_51"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_113"])
save_data = serialise_dojo(save_data, save_stats["dojo_data"])
save_data = write(save_data, save_stats["dojo_item_lock"])
save_data = write_length_data(
save_data, save_stats["dojo_locks"], write_length=False, bytes_per_val=1
)
save_data = write(save_data, save_stats["unknown_114"])
save_data = write(save_data, save_stats["gv_58"])
save_data = write(save_data, save_stats["unknown_115"])
save_data = serialise_outbreaks(save_data, save_stats["outbreaks"])
save_data = write_double(save_data, save_stats["unknown_52"])
save_data = write_length_data(
save_data, save_stats["item_schemes"]["to_obtain_ids"]
)
save_data = write_length_data(save_data, save_stats["item_schemes"]["received_ids"])
save_data = serialise_outbreaks(save_data, save_stats["current_outbreaks"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_55"])
save_data = write_double(save_data, save_stats["time_stamp_4"])
save_data = write(save_data, save_stats["gv_60"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_117"])
save_data = write(save_data, save_stats["gv_61"])
save_data = serialise_unlock_popups(
save_data, save_stats["unlock_popups"], save_stats["unknown_118"]
)
save_data = write_length_data(save_data, save_stats["base_materials"])
save_data = write(save_data, save_stats["unknown_56"])
save_data = write(save_data, save_stats["unknown_57"])
save_data = write(save_data, save_stats["unknown_58"])
save_data = write(save_data, save_stats["engineers"])
save_data = serialise_ototo_cat_cannon(save_data, save_stats["ototo_cannon"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_59"])
save_data = serialise_tower(save_data, save_stats["tower"])
save_data = serialise_missions(save_data, save_stats["missions"])
save_data = serialise_tower_item_obtained(
save_data, save_stats["tower_item_obtained"]
)
save_data = serialise_dumped_data(save_data, save_stats["unknown_61"])
save_data = write(save_data, save_stats["challenge"]["Score"])
save_data = write(save_data, save_stats["challenge"]["Cleared"])
save_data = write(save_data, save_stats["gv_67"])
save_data = write_dict(save_data, save_stats["weekly_event_missions"])
save_data = write(save_data, save_stats["won_dojo_reward"])
save_data = write(save_data, save_stats["event_flag_update_flag"])
save_data = write(save_data, save_stats["gv_68"])
save_data = write_dict(save_data, save_stats["completed_one_level_in_chapter"])
save_data = write_dict(save_data, save_stats["displayed_cleared_limit_text"])
save_data = write_dict(save_data, save_stats["event_start_dates"])
save_data = write_length_data(save_data, save_stats["stages_beaten_twice"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_102"])
save_data = serialise_uncanny_current(save_data, save_stats["uncanny_current"])
save_data = serialise_uncanny_progress(save_data, save_stats["uncanny"])
save_data = write(save_data, save_stats["unknown_62"])
save_data = write_length_data(
save_data, save_stats["unknown_63"], write_length=False
)
save_data = serialise_uncanny_current(
save_data, save_stats["unknown_64"]["current"]
)
save_data = serialise_uncanny_progress(
save_data, save_stats["unknown_64"]["progress"]
)
save_data = write(save_data, save_stats["unknown_65"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_66"])
save_data = write_length_data(
save_data, save_stats["lucky_tickets_2"], write_length=False
)
save_data = write_length_data(
save_data, save_stats["unknown_67"], write_length=False
)
save_data = write(save_data, save_stats["unknown_68"])
save_data = write(save_data, save_stats["gv_77"])
save_data = serialise_gold_pass(save_data, save_stats["gold_pass"])
save_data = serialise_talent_data(save_data, save_stats["talents"])
save_data = write(save_data, save_stats["np"])
save_data = write(save_data, save_stats["unknown_70"])
save_data = write(save_data, save_stats["gv_80000"])
save_data = write(save_data, save_stats["unknown_71"])
save_data = write(save_data, save_stats["leadership"])
save_data = write(save_data, save_stats["officer_pass_cat_id"])
save_data = write(save_data, save_stats["officer_pass_cat_form"])
save_data = write(save_data, save_stats["gv_80200"])
save_data = write(save_data, save_stats["filibuster_stage_id"])
save_data = write(save_data, save_stats["filibuster_stage_enabled"])
save_data = write(save_data, save_stats["gv_80300"])
save_data = write_length_data(save_data, save_stats["unknown_74"])
save_data = write(save_data, save_stats["gv_80500"])
save_data = write_length_data(save_data, save_stats["unknown_75"], 2)
save_data = serialise_legend_quest_current(
save_data, save_stats["legend_quest_current"]
)
save_data = serialise_legend_quest_progress(save_data, save_stats["legend_quest"])
save_data = write_length_data(
save_data, save_stats["unknown_133"], bytes_per_val=1, write_length=False
)
save_data = write_length_data(
save_data, save_stats["legend_quest_ids"], write_length=False
)
save_data = serialise_dumped_data(save_data, save_stats["unknown_76"])
save_data = write(save_data, save_stats["gv_80700"])
if save_stats["dst"]:
if save_stats["gv_100600"]["Value"] == 100600:
save_data = write(save_data, save_stats["unknown_104"])
save_data = write(save_data, save_stats["gv_100600"])
save_data = write(save_data, save_stats["restart_pack"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_101"])
save_data = serialise_medals(save_data, save_stats["medals"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_103"])
save_data = serialise_gauntlet_current(save_data, save_stats["gauntlet_current"])
save_data = serialise_gauntlet_progress(save_data, save_stats["gauntlets"])
save_data = write_length_data(
save_data, save_stats["unknown_77"], bytes_per_val=1, write_length=False
)
save_data = write(save_data, save_stats["gv_90300"])
save_data = serialise_gauntlet_current(save_data, save_stats["unknown_78"])
save_data = serialise_gauntlet_progress(save_data, save_stats["unknown_79"])
save_data = write_length_data(
save_data, save_stats["unknown_80"], bytes_per_val=1, write_length=False
)
save_data = serialise_enigma_data(save_data, save_stats["enigma_data"])
save_data = serialise_cleared_slots(save_data, save_stats["cleared_slot_data"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_121"])
save_data = serialise_gauntlet_current(
save_data, save_stats["collab_gauntlets_current"]
)
save_data = serialise_gauntlet_progress(save_data, save_stats["collab_gauntlets"])
save_data = write_length_data(
save_data, save_stats["unknown_84"], bytes_per_val=1, write_length=False
)
save_data = serialise_dumped_data(save_data, save_stats["unknown_85"])
save_data = serialise_talent_orbs(
save_data, save_stats["talent_orbs"], save_stats["game_version"]
)
save_data = serialise_dumped_data(save_data, save_stats["unknown_86"])
save_data = serialise_cat_shrine(save_data, save_stats["cat_shrine"])
save_data = write(save_data, save_stats["unknown_130"])
save_data = write(save_data, save_stats["gv_90900"])
if save_stats["game_version"]["Value"] >= 110600:
save_data = write(save_data, len(save_stats["slot_names"]), 1)
for slot_name in save_stats["slot_names"]:
save_data = serialise_utf8_string(save_data, slot_name)
save_data = write(save_data, save_stats["gv_91000"])
save_data = write(save_data, save_stats["legend_tickets"])
save_data = write_length_data(
save_data, save_stats["unknown_87"], bytes_per_val=5, length_bytes=1
)
save_data = write(save_data, save_stats["unknown_88"])
save_data = serialise_utf8_string(save_data, save_stats["token"])
save_data = write(save_data, save_stats["unknown_89"])
save_data = write(save_data, save_stats["unknown_90"])
save_data = write(save_data, save_stats["unknown_91"])
save_data = write(save_data, save_stats["gv_100000"])
data = check_gv(save_data, save_stats, 100100)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = write(save_data, save_stats["date_int"])
save_data = write(save_data, save_stats["gv_100100"])
data = check_gv(save_data, save_stats, 100300)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = write_length_data(
save_data, save_stats["unknown_93"], bytes_per_val=19, write_length=False
)
save_data = write(save_data, save_stats["gv_100300"])
data = check_gv(save_data, save_stats, 100700)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_94"])
save_data = write(save_data, save_stats["platinum_shards"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_100"])
save_data = write(save_data, save_stats["gv_100700"])
data = check_gv(save_data, save_stats, 100900)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_aku(save_data, save_stats["aku"])
save_data = write(save_data, save_stats["unknown_95"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_96"])
save_data = write(save_data, save_stats["gv_100900"])
data = check_gv(save_data, save_stats, 101000)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = write(save_data, save_stats["unknown_97"])
save_data = write(save_data, save_stats["gv_101000"])
data = check_gv(save_data, save_stats, 110000)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_98"])
save_data = write(save_data, save_stats["gv_110000"])
data = check_gv(save_data, save_stats, 110500)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_gauntlet_current(
save_data, save_stats["behemoth_culling_current"]
)
save_data = serialise_gauntlet_progress(save_data, save_stats["behemoth_culling"])
save_data = write_length_data(
save_data, save_stats["unknown_124"], bytes_per_val=1, write_length=False
)
save_data = write(save_data, save_stats["unknown_125"])
save_data = write(save_data, save_stats["gv_110500"])
data = check_gv(save_data, save_stats, 110600)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = write(save_data, save_stats["unknown_126"])
save_data = write(save_data, save_stats["gv_110600"])
data = check_gv(save_data, save_stats, 110700)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_127"])
if save_stats["dst"]:
save_data = write(save_data, save_stats["unknown_128"])
save_data = write(save_data, save_stats["gv_110700"])
data = check_gv(save_data, save_stats, 110800)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = write(save_data, save_stats["shrine_dialogs"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_129"])
save_data = write(save_data, save_stats["dojo_3x_speed"])
save_data = serialise_dumped_data(save_data, save_stats["unknown_132"])
save_data = write(save_data, save_stats["gv_110800"])
data = check_gv(save_data, save_stats, 110900)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_135"])
save_data = write(save_data, save_stats["gv_110900"])
data = check_gv(save_data, save_stats, 120000)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_zero_legends(save_data, save_stats["zero_legends"])
save_data = write(save_data, save_stats["unknown_136"])
save_data = write(save_data, save_stats["gv_120000"])
data = check_gv(save_data, save_stats, 120100)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_137"])
save_data = write(save_data, save_stats["gv_120100"])
data = check_gv(save_data, save_stats, 120200)
save_data = data["save_data"]
if data["exit"]:
return bytes(save_data)
save_data = serialise_dumped_data(save_data, save_stats["unknown_138"])
save_data = write(save_data, save_stats["gv_120200"])
save_data = write(save_data, save_stats["extra_data"])
save_data = exit_serialiser(save_data, save_stats)
return bytes(save_data)
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/serialise_save.py
| 0.727879 | 0.3333 |
serialise_save.py
|
pypi
|
import filecmp
import json
from multiprocessing import Process
import os
import shutil
import sys
import time
from typing import Any, Callable, Generator, Optional, Union
import colored # type: ignore
from . import (
user_input_handler,
server_handler,
patcher,
serialise_save,
parse_save,
config_manager,
user_info,
)
GREEN = "#008000"
RED = "#FF0000"
DARK_YELLOW = "#D7C32A"
BLACK = "#000000"
WHITE = "#FFFFFF"
CYAN = "#00FFFF"
def get_time() -> int:
"""Get current time in seconds"""
return int(time.time())
def get_iso_time() -> str:
"""Get the current time in iso format"""
return time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
def print_line_seperator(base: str, char: str = "-", length: int = 80):
"""Print a line of a char"""
width = shutil.get_terminal_size().columns
if width < length:
length = width
colored_text(char * length, base)
def get_dirs(path: str) -> list[str]:
"""Get all directories in a path"""
if not os.path.exists(path):
return []
return [dir for dir in os.listdir(path) if os.path.isdir(os.path.join(path, dir))]
def delete_dir(path: str) -> None:
"""Delete a directory and all of its contents"""
if not os.path.exists(path):
return
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def create_dirs(path: str) -> None:
"""Create directories if they don't exist"""
if not os.path.exists(path):
os.makedirs(path)
def offset_list(lst: list[int], offset: int) -> list[int]:
"""Offset each value in an list by a certain amount"""
new_list: list[int] = []
for item in lst:
new_list.append(item + offset)
return new_list
def copy_first_n(lst: list[Any], number: int) -> list[Any]:
"""Get the nth item in a list of lists"""
new_list: list[Any] = []
for item in lst:
new_list.append(item[number])
return new_list
def get_file(file_name: str) -> str:
"""Get file in files folder"""
file_path = os.path.join(get_local_files_path(), file_name)
return file_path
def get_files_in_dir(dir_path: str) -> list[str]:
"""Get all files in a directory"""
files: list[str] = []
for file in os.listdir(dir_path):
if os.path.isfile(os.path.join(dir_path, file)):
file_path = os.path.join(dir_path, file)
files.append(file_path)
return files
def find_files_in_dir(dir_path: str, file_name: str) -> list[str]:
"""Find all files in a directory with a certain name"""
files: list[str] = []
for file in os.listdir(dir_path):
if file_name in file:
files.append(file)
return files
def get_local_files_path() -> str:
"""Get the local files path"""
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "files")
return dir_path
def read_file_string(file_path: str, create: bool = False) -> str:
"""Reads a file and returns its contents as a string"""
try:
with open(file_path, "r", encoding="utf-8") as file:
return file.read()
except FileNotFoundError as err:
if create:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
write_file_string(file_path, "")
return ""
raise Exception("File not found: " + file_path) from err
except UnicodeDecodeError as err:
raise Exception("Error reading file: " + file_path + ": " + str(err)) from err
def chunks(lst: list[Any], chunk_len: int) -> Generator[Any, Any, Any]:
"""Split list into chunks of n"""
for i in range(0, len(lst), chunk_len):
yield lst[i : i + chunk_len]
def frames_to_time(frames: int) -> dict[str, Any]:
"""Turn frames into hours, minutes, seconds, frames"""
frames = clamp_int(frames)
seconds, frames = divmod(frames, 30)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return {"hh": hours, "mm": minutes, "ss": seconds, "frames": frames}
def clamp_int(value: int) -> int:
"""
Clamp an integer to the range of a signed 32 bit integer
Args:
value (int): The value to clamp
Returns:
int: The clamped value
"""
return clamp(value, 0, (2**31) - 1)
def num_to_bytes(num: int, length: int) -> bytes:
"""Turn number into little endian bytes"""
return num.to_bytes(length, byteorder="little")
def seconds_to_time(seconds: int) -> dict[str, Any]:
"""Turn seconds into hours, minutes, seconds"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return {"hh": hours, "mm": minutes, "ss": seconds}
def time_to_seconds(time: dict[str, Any]) -> int:
"""Turn hours, minutes, seconds into seconds"""
seconds = time["ss"]
seconds += time["mm"] * 60
seconds += time["hh"] * 60 * 60
return seconds
def time_to_frames(time: dict[str, Any]) -> int:
"""Turn hours, minutes, seconds, frames into frames"""
total_frames = time["frames"]
total_frames += time["ss"] * 30
total_frames += time["mm"] * 60 * 30
total_frames += time["hh"] * 60 * 60 * 30
total_frames = clamp_int(total_frames)
return total_frames
def check_int(value: str) -> Union[int, None]:
"""Check if a string is an integer"""
value = str(value).strip(" ")
try:
return int(value)
except ValueError:
return None
def check_int_max(value: str, max_value: Optional[int] = None) -> Optional[int]:
val = check_int(value)
if val is None:
return None
if max_value is not None:
return clamp(val, 0, max_value)
return clamp_int(val)
def int_to_str_ls(int_list: list[int]) -> list[str]:
"""Turn list of ints to list of strings"""
str_list: list[str] = []
for i in int_list:
str_list.append(str(i))
return str_list
def parse_int_list(lst: list[str], offset: int) -> list[int]:
"""Turn string list to int list"""
new_list: list[int] = []
for item in lst:
try:
new_list.append(int(item) + offset)
except ValueError:
pass
return new_list
def clamp(value: int, min_value: int, max_value: int) -> int:
"""Clamp a value between two values"""
return max(min(value, max_value), min_value)
def write_file_bytes(file_path: str, data: bytes) -> bytes:
"""Write file as bytes"""
try:
with open(file_path, "wb") as file:
file.write(data)
except PermissionError as err:
raise Exception("Permission denied: " + file_path) from err
return data
def get_save_path() -> str:
"""Get the save path from the env variable"""
save_path = os.environ.get("BC_SAVE_PATH")
if save_path is None:
raise Exception("BC_SAVE_PATH not set")
return save_path
def set_save_path(path: str) -> None:
"""Set the save path in the env variable"""
os.environ["BC_SAVE_PATH"] = os.path.abspath(path)
def get_text_splitter(isjp: bool):
"""Get the text splitter for the current save stats"""
if isjp:
return ","
return "|"
def get_save_file_filetype() -> list[tuple[str, str]]:
"""Get the file types for the save file"""
return [("Battle Cats Save Files", "*SAVE_DATA*"), ("All Files", "*.*")]
def read_file_bytes(file_path: str) -> bytes:
"""Read file as bytes"""
with open(file_path, "rb") as file:
return file.read()
def write_file_string(file_path: str, data: str):
"""Write file as string"""
with open(file_path, "w", encoding="utf-8") as file:
file.write(data)
def config_clamp(value: int, min: int, max: int, clamp_max_int: bool = True):
"""Clamp a value between 0 and a max value"""
disable = config_manager.get_config_value_category("EDITOR", "DISABLE_MAXES")
if disable:
if clamp_max_int:
return clamp_int(value)
return value
return clamp(value, min, max)
def check_clamp(
values: Any, max_value: int, min_value: int = 0, offset: int = -1
) -> list[int]:
"""turn a list of strings into a list of ints and clamp them between a min and max"""
if isinstance(values, str):
values = [values]
int_values: list[int] = []
for value in values:
value = str(value).strip(" ")
value = check_int(value)
if value is None:
continue
value = clamp(value, min_value, max_value)
value += offset
int_values.append(value)
return int_values
def encode_ls(lst: list[int]) -> dict[int, Any]:
"""Encode a list of integers into a dictionary"""
return {i: lst[i] for i in range(len(lst))}
def parse_int_list_list(list_of_lists: list[list[str]]) -> list[list[Any]]:
"""Turn list of list of strings into list of list of ints"""
new_lists: list[list[Any]] = []
for lst in list_of_lists:
new_list: list[Any] = []
for item in lst:
try:
new_list.append(int(item))
except ValueError:
new_list.append(item)
new_lists.append(new_list)
new_lists = [line for line in new_lists if line != []]
return new_lists
def check_hex(value: str) -> Union[str, None]:
"""Check if a string is a hex number"""
value = str(value).strip(" ")
try:
int(value, 16)
return value
except ValueError:
return None
def check_dec(value: str) -> Union[str, None]:
"""Check if a string is a decimal number"""
value = str(value).strip(" ")
try:
int(value)
return value
except ValueError:
return None
def str_to_gv(game_version: str) -> str:
"""Turn a game version with semantic versioning to integer representation"""
split_gv = game_version.split(".")
if len(split_gv) == 2:
split_gv.append("0")
final = ""
for split in split_gv:
final += split.zfill(2)
return final.lstrip("0")
def gv_to_str(game_version: int) -> str:
"""Turn a game version with integer representation to semantic versioning"""
split_gv = str(game_version).zfill(6)
split_gv = [str(int(split_gv[i : i + 2])) for i in range(0, len(split_gv), 2)]
return ".".join(split_gv)
def load_json(json_path: str) -> Any:
"""Load a json file"""
return json.loads(read_file_string(json_path))
def is_jp(save_stats: dict[str, Any]) -> bool:
"""Check if the save is a Japanese save"""
return save_stats["version"] == "jp"
def check_data_is_jp(save_stats: dict[str, Any]) -> bool:
"""Check if the save data is a Japanese save, checking the config file"""
if config_manager.get_config_value_category("EDITOR", "ONLY_GET_EN_DATA"):
return False
return is_jp(save_stats)
def check_managed_items(save_stats: dict[str, Any], path: str) -> None:
"""Check if the user has untracked bannable items"""
info = user_info.UserInfo(save_stats["inquiry_code"])
if info.has_managed_items():
upload = (
user_input_handler.colored_input(
"You have untracked bannable items that need to be uploaded. Do you want to upload them now? (&y&/&n&) (I recommend saying &y& to avoid bans):"
)
== "y"
)
if upload:
server_handler.meta_data_upload_handler(save_stats, path)
colored_text("&Uploaded meta data", new=GREEN)
else:
delete = (
user_input_handler.colored_input(
"Do you want to remove your item logs? (&y&/&n&):"
)
== "y"
)
if delete:
info.clear_managed_items()
colored_text("&Removed item logs", new=GREEN)
def exit_editor():
"""Exit the editor"""
sys.exit(0)
def check_changes(_: Any):
"""
Check if the user wants to exit the editor
Args:
_ (Any): Unused
"""
try:
save_path = get_save_path()
except Exception:
return
temp_file_path = os.path.join(
config_manager.get_app_data_folder(), "SAVE_DATA_temp"
)
if not os.path.exists(temp_file_path):
return
is_identical = are_identical_files(save_path, temp_file_path)
if is_identical:
return
ask_save_changes()
def exit_check_changes(_: Any = None):
check_changes(None)
exit_editor()
def ask_save_changes():
"""
Ask if the user wants to save the changes
"""
save = (
user_input_handler.colored_input(
"You have unsaved changes. Would you like to save them? (&y&/&n&):"
)
== "y"
)
if save:
current_path = get_save_path()
temp_file_path = os.path.join(
config_manager.get_app_data_folder(), "SAVE_DATA_temp"
)
if os.path.exists(temp_file_path):
data = read_file_bytes(temp_file_path)
save_stats = parse_save.start_parse(data, get_country_code(data))
check_managed_items(save_stats, temp_file_path)
write_file_bytes(current_path, read_file_bytes(temp_file_path))
colored_text(
f"Save data saved to &{current_path}&",
base=GREEN,
new=WHITE,
)
def are_identical_files(file1: str, file2: str) -> bool:
"""Check if two files are identical"""
return filecmp.cmp(file1, file2)
def check_cat_ids(cat_ids: list[int], save_stats: dict[str, Any]) -> list[int]:
"""Check if a list of cat ids is valid"""
new_cat_ids: list[int] = []
for cat_id in cat_ids:
if cat_id > len(save_stats["cats"]) - 1:
colored_text(f"Invalid cat id {cat_id}", base=RED)
continue
new_cat_ids.append(cat_id)
return new_cat_ids
def error_text(text: str):
"""Print error text"""
colored_text(text, base=RED)
def is_android() -> bool:
"""Check if the user is on android"""
return "ANDROID_ROOT" in os.environ
def colored_text(
text: str,
base: str = WHITE,
new: str = DARK_YELLOW,
split_char: str = "&",
end: str = "\n",
):
"""Print text with colors"""
color_new = colored.fg(new) # type: ignore
color_base = colored.fg(base) # type: ignore
color_reset = colored.fg(WHITE) # type: ignore
text_split: list[str] = split_text(text, split_char)
for i, text_section in enumerate(text_split):
if i % 2:
print(f"{color_new}{text_section}{color_base}", end="")
else:
print(f"{color_base}{text_section}{color_base}", end="")
print(color_reset, end=end)
def split_text(text: str, split_char: str = "&") -> list[str]:
"""Split text on split_char, allowing for escaped split chars"""
text_split: list[str] = []
current_string = ""
skip = 0
for i, char in enumerate(text):
if skip > 0:
skip -= 1
continue
if char == "\\":
if text[i + 1] == split_char:
current_string += split_char
skip = 1
continue
if char == split_char:
text_split.append(current_string)
current_string = ""
else:
current_string += char
text_split.append(current_string)
return text_split
def colored_list(
items: list[str],
extra_data: Any = None,
index: bool = True,
offset: Union[None, int] = None,
):
"""Print a list with colors and extra data if provided"""
final = ""
for i, item in enumerate(items):
if index:
final += f"{i+1}. "
final += f"&{item}&"
if extra_data:
if extra_data[i] is not None:
if isinstance(offset, int) and isinstance(extra_data[i], int):
final += f" &:& {extra_data[i]+offset}"
else:
final += f" &:& {extra_data[i]}"
final += "\n"
final = final.rstrip("\n")
colored_text(final)
def calculate_user_rank(save_stats: dict[str, Any]):
"""Calculate the user rank"""
user_rank = 0
for cat_id, cat_flag in enumerate(save_stats["cats"]):
if cat_flag == 0:
continue
user_rank += save_stats["cat_upgrades"]["Base"][cat_id] + 1
user_rank += save_stats["cat_upgrades"]["Plus"][cat_id]
for skill_id in range(len(save_stats["blue_upgrades"]["Base"])):
if skill_id == 1:
continue
user_rank += save_stats["blue_upgrades"]["Base"][skill_id] + 1
user_rank += save_stats["blue_upgrades"]["Plus"][skill_id]
return user_rank
def write_save_data(save_data: bytes, country_code: str, path: str, prompt: bool):
"""Patch the save data and write it"""
save_data = patcher.patch_save_data(save_data, country_code)
if prompt:
new_path = save_file("Save File", get_save_file_filetype(), path)
if new_path is None:
colored_text("Save cancelled", new=RED)
return
path = new_path
write_file_bytes(path, save_data)
colored_text(f"Saved to: &{os.path.abspath(path)}&", new=GREEN)
return save_data
def select_dir(title: str, default_dir: str) -> str:
"""
Select a directory from the user
Args:
title (str): Title of the dialog
default_dir (str): Default directory to select
Returns:
str: Selected directory
"""
if not has_tkinter():
return default_dir
from tkinter import filedialog # type: ignore
root = setup_tk()
dir_path = filedialog.askdirectory(title=title, initialdir=default_dir, parent=root)
return dir_path
def has_tkinter() -> bool:
"""Check if tkinter is installed"""
try:
import tkinter # type: ignore
except ImportError:
colored_text("tkinter is not installed", new=RED)
return False
try:
setup_tk()
except tkinter.TclError:
colored_text("error setting up tkinter", new=RED)
return False
return True
def setup_tk() -> Any:
"""
Setup the tkinter window
Returns:
Tk: Tkinter window
"""
from tkinter import Tk
root = Tk()
root.withdraw()
root.wm_attributes("-topmost", 1) # type: ignore
return root
def run_in_parallel(fns: list[Process]) -> None:
"""
Run a list of functions in parallel
Args:
fns (list[Process]): List of functions to run in parallel
"""
proc: list[Process] = []
for fn in fns:
fn.start()
proc.append(fn)
for p in proc:
p.join()
def run_in_background(func: Callable[..., Any]) -> None:
"""
Run a function in the background
Args:
fn (Callable[..., Any]): Function to run in the background
"""
Process(target=func).start()
def get_cc(save_stats: dict[str, Any]) -> str:
"""Get the country code"""
if is_jp(save_stats):
return "jp"
return "en"
def get_lang(jp: bool) -> str:
"""Get the language code"""
if jp:
return "ja"
return "en"
def get_save_path_home() -> str:
"""
Get the save path
Returns:
str: Save path
"""
save_name = get_default_save_name()
if config_manager.get_config_value("FIXED_SAVE_PATH"):
path = os.path.join(get_home_path(), "bc_saves", os.path.basename(save_name))
create_dirs(os.path.dirname(path))
return path
return save_name
def save_file(
title: str, file_types: list[tuple[str, str]], path: str
) -> Optional[str]:
"""Save a file with tkinter"""
if not has_tkinter():
return path
setup_tk()
from tkinter import filedialog
try:
path_d = filedialog.asksaveasfile(
mode="w",
confirmoverwrite=True,
initialfile=os.path.basename(path),
filetypes=file_types,
title=title,
)
if not path_d:
return None
except PermissionError:
print(
colored_text(
"Permission denied. Make sure the file is not in use", base=RED
)
)
exit_editor()
return
return path_d.name
def select_file(
title: str,
file_types: list[tuple[str, str]],
default_dir: str = "",
initial_file: str = "",
) -> str:
"""Select a file with tkinter"""
if not has_tkinter():
path = user_input_handler.colored_input(
f"Enter the path to the file ({title}): "
)
if not os.path.isfile(path):
colored_text("Invalid path", new=RED)
return ""
return path
setup_tk()
from tkinter import filedialog
file_path = filedialog.askopenfilename(
initialdir=default_dir,
title=title,
filetypes=file_types,
initialfile=initial_file,
)
return file_path
def get_home_path() -> str:
"""
Get the home path
Returns:
str: Home path
"""
return os.path.expanduser("~")
def get_default_save_name() -> str:
"""
Get the default save name
Returns:
str: Default save name
"""
save_name = config_manager.get_config_value("DEFAULT_SAVE_FILE_PATH")
if not save_name:
save_name = "SAVE_DATA"
return save_name
def load_save_file(path: str) -> dict[str, Any]:
"""Load a save file, get the country code, create a backup and parse the save data"""
save_data = read_file_bytes(path)
country_code = get_country_code(save_data)
colored_text(f"Game version: &{country_code}&")
save_stats = parse_save.start_parse(save_data, country_code)
if config_manager.get_config_value_category("START_UP", "CREATE_BACKUP"):
write_file_bytes(path + "_backup", save_data)
colored_text(
f"Backup created at: &{os.path.abspath(path + '_backup')}&", new=GREEN
)
return {
"save_data": save_data,
"country_code": country_code,
"save_stats": save_stats,
}
def get_country_code(save_data: bytes) -> str:
"""Ask the user for their country code if it cannot be detected"""
country_code = patcher.detect_game_version(save_data)
if country_code is None:
country_code = ask_cc()
return country_code
def ask_cc():
"""Ask the user for their country code"""
default_gv = config_manager.get_config_value("DEFAULT_COUNTRY_CODE")
if default_gv:
if len(default_gv) == 2:
colored_text(f"Using default country code: &{default_gv}&")
return default_gv
country_code = user_input_handler.colored_input(
"Enter your country code (&en&, &jp&, &kr&, &tw&):"
)
return country_code
def export_json(save_stats: dict[str, Any], path: str) -> None:
"""Export the save stats to a json file"""
ordered_data = parse_save.re_order(save_stats)
if os.path.isdir(path):
path = os.path.join(path, f"{get_save_path_home()}.json")
write_file_string(path, json.dumps(ordered_data, indent=4))
colored_text(f"Successfully wrote json to &{os.path.abspath(path)}&")
def load_json_handler(json_path: str) -> Union[None, str]:
"""Load a save_data json file and serialise it"""
save_stats = load_json(json_path)
save_data = serialise_save.start_serialize(save_stats)
save_data = patcher.patch_save_data(save_data, save_stats["version"])
path = save_file(
"Save file",
get_save_file_filetype(),
os.path.join(os.path.dirname(json_path), get_save_path_home()),
)
if path is None:
return None
write_file_bytes(path, save_data)
return path
def format_text(text: list[str]) -> list[str]:
for i, order in enumerate(text):
if order.startswith("bcsfe:"):
try:
counter = int(order.split(":")[1])
except ValueError:
counter = 0
text[i] = f"bcsfe:{counter + 1}"
break
else:
text.append("bcsfe:1")
return text
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/helper.py
| 0.599954 | 0.167968 |
helper.py
|
pypi
|
import collections
import datetime
import enum
import json
import struct
import traceback
from typing import Any, Optional, Union
from . import helper
from . import updater
address = 0
save_data_g = None
def re_order(data: dict[str, Any]) -> collections.OrderedDict[str, Any]:
"""Move all unknown vals to the bottom of the json"""
priority: list[str] = json.loads(
helper.read_file_string(helper.get_file("order.json"))
)
ordered_data = collections.OrderedDict(data)
for item in ordered_data.copy():
if "unknown" in item:
ordered_data.move_to_end(item)
for i in range(len(priority)):
ordered_data.move_to_end(priority[len(priority) - 1 - i], False)
return ordered_data
def set_address(val: int):
"""Set the address to a specific value"""
global address
address = val
def next_int_len(number: int) -> dict[str, int]:
"""Get the next int of a specified byte length from the save file"""
if number < 0:
raise Exception("Invalid number")
if save_data_g is None:
raise Exception("Invalid save data")
if number > len(save_data_g):
raise Exception("Byte length is greater than the length of the save data")
val = convert_little(save_data_g[address : address + number])
data: dict[str, int] = {}
set_address(address + number)
data["Value"] = val
data["Length"] = number
return data
def generate_empty_len(length: int) -> dict[str, int]:
"""Generate an empty dict with a length and value of 0"""
data: dict[str, int] = {}
data["Length"] = length
data["Value"] = 0
return data
def next_int(number: int) -> int:
return next_int_len(number)["Value"]
def skip(number: int):
"""Skip a number of bytes"""
set_address(address + number)
def convert_little(byte_data: bytes) -> int:
"""Convert a byte array to an int in little endian"""
return int.from_bytes(byte_data, byteorder="little", signed=False)
def get_time_data_skip(dst_flag: bool) -> dict[str, Any]:
year = next_int(4)
year_2 = next_int(4)
month = next_int(4)
month_2 = next_int(4)
day = next_int(4)
day_2 = next_int(4)
time_stamp = get_double()
hour = next_int(4)
minute = next_int(4)
second = next_int(4)
dst = 0
if dst_flag:
dst = next_int(1)
time = datetime.datetime(year, month, day, hour, minute, second)
return {
"time": time.isoformat(),
"time_stamp": time_stamp,
"dst": dst,
"duplicate": {"yy": year_2, "mm": month_2, "dd": day_2},
}
def get_time_data(dst_flag: bool) -> str:
if dst_flag:
_ = next_int(1)
year = next_int(4)
month = next_int(4)
day = next_int(4)
hour = next_int(4)
minute = next_int(4)
second = next_int(4)
time = datetime.datetime(year, month, day, hour, minute, second)
return time.isoformat()
def get_length_data(
length_bytes: int = 4, separator: int = 4, length: Union[int, None] = None
) -> list[int]:
data: list[int] = []
if length is None:
length = next_int(length_bytes)
if save_data_g is None:
raise Exception("Invalid save data")
if length > len(save_data_g):
raise Exception("Length too large")
for _ in range(length):
data.append(next_int(separator))
return data
def get_length_doubles(
length_bytes: int = 4, length: Union[int, None] = None
) -> list[float]:
data: list[float] = []
if length is None:
length = next_int(length_bytes)
if save_data_g is None:
raise Exception("Invalid save data")
if length > len(save_data_g):
raise Exception("Length too large")
for _ in range(length):
data.append(get_double())
return data
def get_equip_slots() -> list[list[int]]:
length = next_int(1)
data = get_length_data(1, length=length * 10)
slots: list[list[int]] = []
for i in range(length):
start_pos = 10 * i
end_pos = 10 * (i + 1)
slots.append(data[start_pos:end_pos])
data = slots
return data
def get_main_story_levels() -> dict[str, Any]:
chapter_progress: list[int] = []
for _ in range(10):
chapter_progress.append(next_int(4))
times_cleared: list[list[int]] = []
for _ in range(10):
chapter_times: list[int] = []
for _ in range(51):
chapter_times.append(next_int(4))
times_cleared.append(chapter_times)
times_cleared_dict = times_cleared
return {
"Chapter Progress": chapter_progress,
"Times Cleared": times_cleared_dict,
}
def get_treasures() -> list[list[int]]:
treasures: list[list[int]] = []
for _ in range(10):
chapter: list[int] = []
for _ in range(49):
chapter.append(next_int(4))
treasures.append(chapter)
return treasures
def get_cat_upgrades() -> dict[str, Any]:
length = next_int(4)
data = get_length_data(4, 2, length * 2)
base_levels = data[1::2]
plus_levels = data[0::2]
data_dict = {"Base": base_levels, "Plus": plus_levels}
return data_dict
def get_blue_upgrades() -> dict[str, Any]:
length = 11
data = get_length_data(4, 2, length * 2)
base_levels = data[1::2]
plus_levels = data[0::2]
data_dict = {"Base": base_levels, "Plus": plus_levels}
return data_dict
def get_utf8_string(length: Union[int, None] = None) -> str:
data = get_length_data(4, 1, length)
data = bytes(data).decode("utf-8")
return data
def read_variable_length_int() -> int:
"""
Read a variable length int from the save file
Returns:
int: The value of the variable length int
"""
i = 0
for _ in range(4):
i_3 = i << 7
read = next_int(1)
i = i_3 | (read & 127)
if (read & 128) == 0:
return i
return i
def load_bonus_hash() -> tuple[dict[int, int], dict[int, int]]:
"""
Get the variable data from the save file
Returns:
tuple[dict[int, int], dict[int, int]]: The variable data
"""
length_1 = read_variable_length_int()
data_1: dict[int, int] = {}
for _ in range(length_1):
key = read_variable_length_int()
val = read_variable_length_int()
data_1[key] = val
length_2 = read_variable_length_int()
data_2: dict[int, int] = {}
for _ in range(length_2):
key = read_variable_length_int()
val = next_int(1)
data_2[key] = val
return (data_1, data_2)
def get_event_stages_current() -> dict[str, Any]:
unknown_val = next_int(1)
total_sub_chapters = next_int(2) * unknown_val
stars_per_sub_chapter = next_int(1)
stages_per_sub_chapter = next_int(1)
clear_progress = get_length_data(1, 1, total_sub_chapters * stars_per_sub_chapter)
clear_progress = list(helper.chunks(clear_progress, stars_per_sub_chapter))
return {
"Clear": clear_progress,
"unknown": unknown_val,
"total": total_sub_chapters,
"stages": stages_per_sub_chapter,
"stars": stars_per_sub_chapter,
}
def get_event_stages(lengths: dict[str, Any]) -> dict[str, Any]:
total_sub_chapters = lengths["total"]
stars_per_sub_chapter = lengths["stars"]
stages_per_sub_chapter = lengths["stages"]
clear_progress = get_length_data(1, 1, total_sub_chapters * stars_per_sub_chapter)
clear_amount = get_length_data(
1, 2, total_sub_chapters * stages_per_sub_chapter * stars_per_sub_chapter
)
unlock_next = get_length_data(1, 1, total_sub_chapters * stars_per_sub_chapter)
clear_progress = list(helper.chunks(clear_progress, stars_per_sub_chapter))
clear_amount = list(
helper.chunks(clear_amount, stages_per_sub_chapter * stars_per_sub_chapter)
)
unlock_next = list(helper.chunks(unlock_next, stars_per_sub_chapter))
clear_amount_sep: list[list[list[int]]] = []
for clear_amount_val in clear_amount:
sub_chapter_clears: list[list[int]] = []
for j in range(stars_per_sub_chapter):
sub_chapter_clears.append(clear_amount_val[j::stars_per_sub_chapter])
clear_amount_sep.append(sub_chapter_clears)
clear_amount = clear_amount_sep
return {
"Value": {
"clear_progress": clear_progress,
"clear_amount": clear_amount,
"unlock_next": unlock_next,
},
"Lengths": lengths,
}
def get_purchase_receipts() -> list[dict[str, Any]]:
total_strs = next_int(4)
data: list[dict[Any, Any]] = []
for _ in range(total_strs):
data_dict: dict[str, Any] = {}
data_dict["unknown_4"] = next_int(4)
strings = next_int(4)
item_packs: list[Any] = []
for _ in range(strings):
strings_dict = {}
strings_dict["Value"] = get_utf8_string()
strings_dict["unknown_1"] = next_int(1)
item_packs.append(strings_dict)
data_dict["item_packs"] = item_packs
data.append(data_dict)
return data
def get_dojo_data_maybe() -> dict[int, Any]:
# everything here is speculative and might not be correct
dojo_data: dict[int, Any] = {}
total_subchapters = next_int(4)
for _ in range(total_subchapters):
subchapter_id = next_int(4)
subchapter_data = {}
total_stages = next_int(4)
for _ in range(total_stages):
stage_id = next_int(4)
score = next_int(4)
subchapter_data[stage_id] = score
dojo_data[subchapter_id] = subchapter_data
return dojo_data
def get_data_before_outbreaks() -> list[dict[str, Any]]:
data: list[dict[str, Any]] = []
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
length_2 = next_int_len(4)
data.append(length_2)
length_3 = next_int_len(4)
data.append(length_3)
for _ in range(length_3["Value"]):
val_1 = next_int_len(4)
data.append(val_1)
val_2 = next_int_len(1)
data.append(val_2)
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
val_1 = next_int_len(4)
data.append(val_1)
val_2 = next_int_len(1)
data.append(val_2)
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
val_1 = next_int_len(4)
data.append(val_1)
val_2 = next_int_len(4)
data.append(val_2)
length = next_int_len(4)
data.append(length)
val_1 = next_int_len(4)
data.append(val_1)
for _ in range(length["Value"]):
val_2 = next_int_len(8)
data.append(val_2)
val_3 = next_int_len(4)
data.append(val_3)
gv_56 = next_int_len(4) # 0x38
data.append(gv_56)
val_1 = next_int_len(1)
data.append(val_1)
length = next_int_len(4)
data.append(length)
val_2 = next_int_len(4)
data.append(val_2)
for _ in range(length["Value"]):
val_3 = next_int_len(1)
data.append(val_3)
val_4 = next_int_len(4)
data.append(val_4)
return data
def get_outbreaks() -> dict[int, Any]:
chapters_count = next_int(4)
outbreaks: dict[int, Any] = {}
for _ in range(chapters_count):
chapter_id = next_int(4)
stages_count = next_int(4)
chapter = {}
for _ in range(stages_count):
stage_id = next_int(4)
outbreak_cleared_flag = next_int(1)
chapter[stage_id] = outbreak_cleared_flag
outbreaks[chapter_id] = chapter
return outbreaks
def get_mission_data_maybe() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
val_1 = next_int_len(4)
data.append(val_1)
val_2 = next_int_len(1)
data.append(val_2)
return data
def get_unlock_popups() -> tuple[list[tuple[int, int]], dict[str, int]]:
"""Get unlock popups and unlock flags"""
length = next_int_len(4)
val_1 = next_int_len(4)
data: list[tuple[int, int]] = []
for _ in range(length["Value"]):
flag = next_int(1)
popup_id = next_int(4)
data.append((popup_id, flag))
return data, val_1
def get_unknown_data():
data: list[dict[str, int]] = []
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
length_2 = next_int_len(4)
data.append(length_2)
val_1 = next_int_len(4)
data.append(val_1)
unknown_val_2 = next_int_len(1)
data.append(unknown_val_2)
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
val_1 = next_int_len(4)
data.append(val_1)
val_2 = next_int_len(1)
data.append(val_2)
return data
def get_cat_cannon_data() -> dict[int, dict[str, Any]]:
length = next_int(4)
cannon_data: dict[int, dict[str, Any]] = {}
for _ in range(length):
cannon: dict[str, Any] = {}
cannon_id = next_int(4)
len_val = next_int(4)
unlock_flag = next_int(4)
effect_level = next_int(4)
foundation_level = 0
style_level = 0
if len_val == 4:
foundation_level = next_int(4)
style_level = next_int(4)
cannon["levels"] = {
"effect": effect_level,
"foundation": foundation_level,
"style": style_level,
}
cannon["unlock_flag"] = unlock_flag
cannon["len_val"] = len_val
cannon_data[cannon_id] = cannon
return cannon_data
def get_data_near_ht() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
val = next_int_len(1)
data.append(val)
for _ in range(val["Value"]):
unknown_val_2 = next_int_len(3)
data.append(unknown_val_2)
unknown_val_3 = next_int_len(8)
data.append(unknown_val_3)
gv_64 = next_int_len(4) # 0x40
data.append(gv_64)
length = next_int_len(4)
data.append(length)
val_1 = next_int_len(4)
data.append(val_1)
val_3 = {"Value": 0}
for _ in range(length["Value"]):
val_2 = next_int_len(4)
data.append(val_2)
val_3 = next_int_len(4)
data.append(val_3)
val_1 = next_int_len(4)
data.append(val_1)
val_4 = {"Value": 0}
for _ in range(val_3["Value"]):
length = next_int_len(4)
data.append(length)
for _ in range(length["Value"]):
val_2 = next_int_len(4)
data.append(val_2)
val_4 = next_int_len(4)
data.append(val_4)
val_1 = next_int_len(4)
data.append(val_1)
for _ in range(val_4["Value"]):
val_2 = next_int_len(1)
data.append(val_2)
val_3 = next_int_len(4)
data.append(val_3)
return data
def get_ht_it_data() -> dict[str, Any]:
total = next_int(4)
stars = next_int(4)
current_data: dict[str, Any] = {}
current_data = {"total": total, "stars": stars, "selected": []}
for _ in range(total):
for _ in range(stars):
current_data["selected"].append(next_int(4))
current_data["selected"] = list(helper.chunks(current_data["selected"], 4))
total = next_int(4)
stars = next_int(4)
progress_data: dict[str, Any] = {}
progress_data = {
"total": total,
"stars": stars,
"clear_progress": [],
"clear_amount": [],
"unlock_next": [],
}
for _ in range(total):
for _ in range(stars):
progress_data["clear_progress"].append(next_int(4))
progress_data["clear_progress"] = list(
helper.chunks(progress_data["clear_progress"], 4)
)
total = next_int(4)
stages = next_int(4)
progress_data["stages"] = stages
stars = next_int(4)
clear_amount = get_length_data(4, 4, total * stages * stars)
clear_amount = list(helper.chunks(clear_amount, stages * stars))
clear_amount_sep: list[list[list[int]]] = []
for clear_amount_val in clear_amount:
sub_chapter_clears: list[list[int]] = []
for j in range(stars):
sub_chapter_clears.append(clear_amount_val[j::stars])
clear_amount_sep.append(sub_chapter_clears)
progress_data["clear_amount"] = clear_amount_sep
data: list[dict[str, int]] = []
length = next_int_len(4)
data.append(length)
length_2 = next_int_len(4)
data.append(length_2)
for _ in range(length["Value"]):
for _ in range(length_2["Value"]):
data.append(next_int_len(4))
return {"data": data, "current": current_data, "progress": progress_data}
def get_mission_segment() -> dict[int, int]:
missions: dict[int, int] = {}
length = next_int(4)
for _ in range(length):
mission_id = next_int(4)
mission_value = next_int(4)
missions[mission_id] = mission_value
return missions
def get_mission_data() -> dict[str, Any]:
missions: dict[str, dict[int, int]] = {}
missions["states"] = get_mission_segment()
missions["requirements"] = get_mission_segment()
missions["clear_types"] = get_mission_segment()
missions["gamatoto"] = get_mission_segment()
missions["nyancombo"] = get_mission_segment()
missions["user_rank"] = get_mission_segment()
missions["expiry"] = get_mission_segment()
missions["preparing"] = get_mission_segment()
return missions
def get_data_after_challenge() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
val_22 = next_int_len(4)
data.append(val_22)
gv_69 = next_int_len(4) # 0x45
data.append(gv_69)
val_54 = next_int_len(4)
data.append(val_54)
val_118 = next_int_len(4)
data.append(val_118)
for _ in range(val_54["Value"]):
val_15 = next_int_len(1)
data.append(val_15)
val_118 = next_int_len(4)
data.append(val_118)
val_54 = next_int_len(4)
data.append(val_54)
for _ in range(val_118["Value"]):
val_65 = next_int_len(8)
data.append(val_65)
val_54 = next_int_len(4)
data.append(val_54)
return data
def get_data_after_tower() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
gv_66 = next_int_len(4) # 0x42
data.append(gv_66)
data.append(next_int_len(4 * 2))
data.append(next_int_len(1 * 3))
data.append(next_int_len(4 * 3))
data.append(next_int_len(1 * 3))
data.append(next_int_len(1))
data.append(next_int_len(8))
val_54 = next_int_len(4)
data.append(val_54)
val_61 = next_int_len(4)
data.append(val_61)
for _ in range(val_54["Value"]):
for _ in range(val_61["Value"]):
val_22 = next_int_len(4)
data.append(val_22)
val_54 = next_int_len(4)
data.append(val_54)
val_61 = next_int_len(4)
data.append(val_61)
for _ in range(val_54["Value"]):
for _ in range(val_61["Value"]):
val_22 = next_int_len(4)
data.append(val_22)
val_54 = next_int_len(4)
data.append(val_54)
val_61 = next_int_len(4)
data.append(val_61)
val_57 = next_int_len(4)
data.append(val_57)
for _ in range(val_54["Value"]):
for _ in range(val_61["Value"]):
for _ in range(val_57["Value"]):
val_22 = next_int_len(4)
data.append(val_22)
val_54 = next_int_len(4)
data.append(val_54)
val_61 = next_int_len(4)
data.append(val_61)
for _ in range(val_54["Value"]):
for _ in range(val_61["Value"]):
val_22 = next_int_len(4)
data.append(val_22)
val_54 = next_int_len(4)
data.append(val_54)
for _ in range(val_54["Value"] - 1):
val_22 = next_int_len(4)
data.append(val_22)
return data
def get_uncanny_current() -> dict[str, Any]:
total_subchapters = next_int(4)
stages_per_subchapter = next_int(4)
stars = next_int(4)
if total_subchapters < 1:
next_int(4)
raise Exception("Invalid total subchapters")
else:
clear_progress = get_length_data(4, 4, total_subchapters * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
return {
"Clear": clear_progress,
"total": total_subchapters,
"stages": stages_per_subchapter,
"stars": stars,
}
def get_event_timed_scores() -> dict[str, Any]:
total_subchapters = next_int(4)
stages_per_subchapter = next_int(4)
stars = next_int(4)
score = get_length_data(4, 4, total_subchapters * stars * stages_per_subchapter)
score = list(helper.chunks(score, stars * stages_per_subchapter))
return {
"Score": score,
"total": total_subchapters,
"stages": stages_per_subchapter,
"stars": stars,
}
def get_uncanny_progress(lengths: dict[str, Any]) -> dict[str, Any]:
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
clear_progress = get_length_data(4, 4, total * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
clear_amount = get_length_data(4, 4, total * stages * stars)
unlock_next = get_length_data(4, 4, total * stars)
clear_amount = list(helper.chunks(clear_amount, stages * stars))
unlock_next = list(helper.chunks(unlock_next, stars))
clear_amount_sep: list[list[list[int]]] = []
for clear_amount_val in clear_amount:
sub_chapter_clears: list[list[int]] = []
for j in range(stars):
sub_chapter_clears.append(clear_amount_val[j::stars])
clear_amount_sep.append(sub_chapter_clears)
clear_amount = clear_amount_sep
return {
"Value": {
"clear_progress": clear_progress,
"clear_amount": clear_amount,
"unlock_next": unlock_next,
},
"Lengths": lengths,
}
def get_data_after_uncanny() -> dict[str, Any]:
lengths = get_uncanny_current()
return {"current": lengths, "progress": get_uncanny_progress(lengths)}
def get_gold_pass_data() -> dict[str, Any]:
"""Get gold pass related data"""
data: dict[str, Any] = {}
data["officer_id"] = next_int_len(4)
data["renewal_times"] = next_int_len(4)
data["start_date"] = get_double()
data["expiry_date"] = get_double()
data["unknown_2"] = get_length_doubles(length=2)
data["start_date_2"] = get_double()
data["expiry_date_2"] = get_double()
data["unknown_3"] = get_double()
data["flag_2"] = next_int_len(4)
data["expiry_date_3"] = get_double()
number_of_rewards = next_int(4)
claimed_rewards: dict[int, int] = {}
for _ in range(number_of_rewards):
item_id = next_int(4)
amount = next_int(4)
claimed_rewards[item_id] = amount
data["claimed_rewards"] = claimed_rewards
data["unknown_4"] = next_int_len(8)
data["unknown_5"] = next_int_len(1)
data["unknown_6"] = next_int_len(1)
return data
def get_talent_data() -> dict[int, list[dict[str, int]]]:
total_cats = next_int(4)
talents: dict[int, list[dict[str, int]]] = {}
for _ in range(total_cats):
cat_id = next_int(4)
cat_data: list[dict[str, int]] = []
number_of_talents = next_int(4)
for _ in range(number_of_talents):
talent_id = next_int(4)
talent_level = next_int(4)
talent = {"id": talent_id, "level": talent_level}
cat_data.append(talent)
talents[cat_id] = cat_data
return talents
def get_medals() -> dict[str, Any]:
medal_data_1 = get_length_data(2, 2)
total_medals = next_int(2)
medals = {}
for _ in range(total_medals):
medal_id = next_int(2)
medal_flag = next_int(1)
medals[medal_id] = medal_flag
return {"medal_data_1": medal_data_1, "medal_data_2": medals}
def get_data_after_medals() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(1))
val_2 = next_int_len(2)
data.append(val_2)
val_3 = next_int_len(2)
data.append(val_3)
for _ in range(val_2["Value"]):
val_1 = next_int_len(1)
data.append(val_1)
val_3 = next_int_len(2)
data.append(val_3)
val_2 = next_int_len(2)
data.append(val_2)
val_6c = val_3
for _ in range(val_6c["Value"]):
val_2 = next_int_len(2)
data.append(val_2)
for _ in range(val_2["Value"]):
val_3 = next_int_len(2)
data.append(val_3)
val_4 = next_int_len(2)
data.append(val_4)
val_2 = next_int_len(2)
data.append(val_2)
val_7c = val_2
for _ in range(val_7c["Value"]):
val_2 = next_int_len(2)
data.append(val_2)
val_12 = next_int_len(4)
data.append(val_12)
data.append(next_int_len(4)) # 90000
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(8))
data.append(next_int_len(4)) # 90100
val_18 = next_int_len(2)
data.append(val_18)
for _ in range(val_18["Value"]):
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(2))
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(2))
val_18 = next_int_len(2)
data.append(val_18)
for _ in range(val_18["Value"]):
val_32 = next_int_len(4)
data.append(val_32)
val_48 = next_int_len(8)
data.append(val_48)
return data
def get_data_after_after_leadership(dst: bool) -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(4))
if not dst:
data.append(next_int_len(5))
if dst:
data.append(next_int_len(12))
else:
data.append(next_int_len(7))
return data
def get_legend_quest_current() -> dict[str, Any]:
total_subchapters = next_int(1)
stages_per_subchapter = next_int(1)
stars = next_int(1)
clear_progress = get_length_data(4, 1, total_subchapters * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
return {
"Clear": clear_progress,
"total": total_subchapters,
"stages": stages_per_subchapter,
"stars": stars,
}
def get_legend_quest_progress(lengths: dict[str, Any]):
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
clear_progress = get_length_data(4, 1, total * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
clear_amount = get_length_data(4, 2, total * stars * stages)
tries = get_length_data(4, 2, total * stars * stages)
unlock_next = get_length_data(4, 1, total * stars)
unlock_next = list(helper.chunks(unlock_next, stars))
clear_amount = list(helper.chunks(clear_amount, stages * stars))
tries = list(helper.chunks(tries, stages * stars))
clear_amount_sep: list[list[list[int]]] = []
stage_ids_sep: list[list[list[int]]] = []
for clear_amount_val in clear_amount:
sub_chapter_clears: list[list[int]] = []
for j in range(stars):
sub_chapter_clears.append(clear_amount_val[j::stars])
clear_amount_sep.append(sub_chapter_clears)
clear_amount = clear_amount_sep
for stage_id_val in tries:
sub_chapter_ids: list[list[int]] = []
for j in range(stars):
sub_chapter_ids.append(stage_id_val[j::stars])
stage_ids_sep.append(sub_chapter_ids)
tries = stage_ids_sep
return {
"Value": {
"clear_progress": clear_progress,
"clear_amount": clear_amount,
"tries": tries,
"unlock_next": unlock_next,
},
"Lengths": lengths,
}
def get_data_after_leadership() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(2))
data.append(next_int_len(1))
data.append(next_int_len(4)) # 80600
val_54 = next_int_len(4)
data.append(val_54)
if val_54["Value"] > 0:
val_118 = next_int_len(4)
data.append(val_118)
val_55 = next_int_len(4)
data.append(val_55)
for _ in range(val_55["Value"]):
val_61 = next_int_len(4)
data.append(val_61)
for _ in range(val_54["Value"] - 1):
val_61 = next_int_len(4)
data.append(val_61)
val_61 = next_int_len(4)
data.append(val_61)
return data
def get_gauntlet_current() -> dict[str, Any]:
total_subchapters = next_int(2)
stages_per_subchapter = next_int(1)
stars = next_int(1)
clear_progress = get_length_data(4, 1, total_subchapters * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
return {
"Clear": clear_progress,
"total": total_subchapters,
"stages": stages_per_subchapter,
"stars": stars,
}
def get_gauntlet_progress(
lengths: dict[str, Any], unlock: bool = True
) -> dict[str, Any]:
total = lengths["total"]
stars = lengths["stars"]
stages = lengths["stages"]
clear_progress = get_length_data(4, 1, total * stars)
clear_progress = list(helper.chunks(clear_progress, stars))
clear_amount = get_length_data(4, 2, total * stages * stars)
unlock_next = []
if unlock:
unlock_next = get_length_data(4, 1, total * stars)
unlock_next = list(helper.chunks(unlock_next, stars))
clear_amount = list(helper.chunks(clear_amount, stages * stars))
clear_amount_sep: list[list[list[int]]] = []
for clear_amount_val in clear_amount:
sub_chapter_clears: list[list[int]] = []
for j in range(stars):
sub_chapter_clears.append(clear_amount_val[j::stars])
clear_amount_sep.append(sub_chapter_clears)
clear_amount = clear_amount_sep
return {
"Value": {
"clear_progress": clear_progress,
"clear_amount": clear_amount,
"unlock_next": unlock_next,
},
"Lengths": lengths,
}
class ClearedSlots:
class Slot:
class Cat:
def __init__(self, cat_id: int, cat_form: int):
self.cat_id = cat_id
self.cat_form = cat_form
def to_dict(self) -> dict[str, Any]:
return {
"cat_id": self.cat_id,
"cat_form": self.cat_form,
}
@staticmethod
def from_dict(data: dict[str, Any]) -> "ClearedSlots.Slot.Cat":
return ClearedSlots.Slot.Cat(data["cat_id"], data["cat_form"])
def __init__(self, cats: list[Cat], slot_index: int, separator: int):
self.cats = cats
self.slot_index = slot_index
self.separator = separator
def to_dict(self) -> dict[str, Any]:
return {
"cats": [cat.to_dict() for cat in self.cats],
"slot_index": self.slot_index,
"separator": self.separator,
}
@staticmethod
def from_dict(data: dict[str, Any]):
return ClearedSlots.Slot(
[ClearedSlots.Slot.Cat.from_dict(cat) for cat in data["cats"]],
data["slot_index"],
data["separator"],
)
class StageSlot:
class Stage:
def __init__(self, stage_id: int):
self.stage_id = stage_id
def to_dict(self) -> dict[str, Any]:
return {
"stage_id": self.stage_id,
}
@staticmethod
def from_dict(data: dict[str, Any]) -> "ClearedSlots.StageSlot.Stage":
return ClearedSlots.StageSlot.Stage(data["stage_id"])
def __init__(self, slot_index: int, stages: list[Stage]):
self.slot_index = slot_index
self.stages = stages
def to_dict(self) -> dict[str, Any]:
return {
"slot_index": self.slot_index,
"stages": [stage.to_dict() for stage in self.stages],
}
@staticmethod
def from_dict(data: dict[str, Any]) -> "ClearedSlots.StageSlot":
return ClearedSlots.StageSlot(
data["slot_index"],
[
ClearedSlots.StageSlot.Stage.from_dict(stage)
for stage in data["stages"]
],
)
def __init__(self, slots: list[Slot], slot_stages: list[StageSlot], end_index: int):
self.slots = slots
self.slot_stages = slot_stages
self.end_index = end_index
def to_dict(self) -> dict[str, Any]:
return {
"slots": [slot.to_dict() for slot in self.slots],
"slot_stages": [stage.to_dict() for stage in self.slot_stages],
"end_index": self.end_index,
}
@staticmethod
def from_dict(data: dict[str, Any]) -> "ClearedSlots":
return ClearedSlots(
[ClearedSlots.Slot.from_dict(slot) for slot in data["slots"]],
[ClearedSlots.StageSlot.from_dict(stage) for stage in data["slot_stages"]],
data["end_index"],
)
def get_enigma_stages() -> dict[str, Any]:
"""
Gets the enigma stages
Returns:
dict[str, Any]: The enigma stages
"""
enigma_data: dict[str, Any] = {}
enigma_data["energy_since_1"] = next_int(4)
enigma_data["energy_since_2"] = next_int(4)
enigma_data["enigma_level"] = next_int(1)
enigma_data["unknown_2"] = next_int(1)
enigma_data["unknown_3"] = next_int(1)
total_stages = next_int(1)
stages: list[dict[str, Any]] = []
for _ in range(total_stages):
data = {}
data["level"] = next_int(4) # 0 = inferior, 1 = normal, 2 = superior
data["stage_id"] = next_int(4)
data["decoding_status"] = next_int(
1
) # 0 = not decoded, 1 = decoded, 2 = revealed
data["start_time"] = get_double()
stages.append(data)
enigma_data["stages"] = stages
return enigma_data
def get_cleared_slots() -> tuple[dict[str, Any], list[dict[str, int]]]:
"""
Returns the line ups of the cleared stages
Returns:
dict[str, Any]: The line ups of the cleared stages
"""
total_slots = next_int(2)
index = next_int(2)
slots: list[ClearedSlots.Slot] = []
for _ in range(total_slots):
cats: list[ClearedSlots.Slot.Cat] = []
for _ in range(10):
cat_id = next_int(2)
cat_form = next_int(1)
cat_data = ClearedSlots.Slot.Cat(cat_id, cat_form)
cats.append(cat_data)
separator = next_int(3)
slot = ClearedSlots.Slot(cats, index, separator)
index = next_int(2)
slots.append(slot)
cleared_slot_data: list[ClearedSlots.StageSlot] = []
index_2 = next_int(2)
for _ in range(index):
total_stages = next_int(2)
stages: list[ClearedSlots.StageSlot.Stage] = []
for _ in range(total_stages):
stage_id = next_int(4)
stage = ClearedSlots.StageSlot.Stage(stage_id)
stages.append(stage)
stages_data = ClearedSlots.StageSlot(index_2, stages)
index_2 = next_int(2)
cleared_slot_data.append(stages_data)
data_2: list[dict[str, int]] = []
data_2.append({"Value": index_2, "Length": 2})
for _ in range(index_2):
val_18 = next_int_len(2)
data_2.append(val_18)
val_4 = next_int_len(1)
data_2.append(val_4)
data_2.append(next_int_len(4)) # 90400
cleared_slots = ClearedSlots(slots, cleared_slot_data, index)
return cleared_slots.to_dict(), data_2
def get_data_after_gauntlets() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(4 * 2))
data.append(next_int_len(1 * 3))
val_4 = next_int_len(1)
data.append(val_4)
for _ in range(val_4["Value"]):
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(1))
data.append(next_int_len(8))
return data
def get_data_after_orbs() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
val_31 = next_int_len(2)
data.append(val_31)
for _ in range(val_31["Value"]):
val_18 = next_int_len(2)
data.append(val_18)
val_5 = next_int_len(1)
data.append(val_5)
for _ in range(val_5["Value"]):
val_6 = next_int_len(1)
data.append(val_6)
val_18 = next_int_len(2)
data.append(val_18)
data.append(next_int_len(1))
data.append(next_int_len(4)) # 90700
length = next_int_len(2)
data.append(length)
for _ in range(length["Value"]):
data.append(next_int_len(4))
data.append(next_int_len(1 * 10))
data.append(next_int_len(4)) # 90800
data.append(next_int_len(1))
return data
def get_cat_shrine_data() -> dict[str, Any]:
"""
Gets the cat shrine data
Returns:
dict[str, Any]: The cat shrine data
"""
stamp_1 = get_double()
stamp_2 = get_double()
shrine_gone = next_int(1)
flags: list[int] = get_length_data(1, 1)
xp_offering = next_int(4)
return {
"flags": flags,
"xp_offering": xp_offering,
"shrine_gone": shrine_gone,
"stamp_1": stamp_1,
"stamp_2": stamp_2,
}
def get_slot_names(save_stats: dict[str, Any]) -> list[str]:
total_slots = len(save_stats["slots"])
if save_stats["game_version"]["Value"] >= 110600:
total_slots = next_int(1)
names: list[str] = []
for _ in range(total_slots):
names.append(get_utf8_string())
return names
def get_talent_orbs(game_version: dict[str, Any]) -> dict[int, int]:
talent_orb_data: dict[int, int] = {}
total_orbs = next_int(2)
for _ in range(total_orbs):
orb_id = next_int(2)
if game_version["Value"] < 110400:
amount = next_int(1)
else:
amount = next_int(2)
talent_orb_data[orb_id] = amount
return talent_orb_data
def data_after_after_gauntlets() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(1))
data.append(next_int_len(8 * 2))
data.append(next_int_len(4))
data.append(next_int_len(1 * 2))
data.append(next_int_len(8 * 2))
data.append(next_int_len(4)) # 90500
return data
def get_data_near_end_after_shards() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(1))
data.append(next_int_len(4)) # 100600
val_2 = next_int_len(2)
data.append(val_2)
val_3 = next_int_len(2)
data.append(val_3)
for _ in range(val_2["Value"]):
val_1 = next_int_len(1)
data.append(val_1)
val_3 = next_int_len(2)
data.append(val_3)
val_6c = val_3
val_2 = next_int_len(2)
data.append(val_2)
for _ in range(val_6c["Value"]):
val_2 = next_int_len(2)
data.append(val_2)
for _ in range(val_2["Value"]):
val_3 = next_int_len(2)
data.append(val_3)
val_4 = next_int_len(2)
data.append(val_4)
val_2 = next_int_len(2)
data.append(val_2)
val_7c = val_2
for _ in range(val_7c["Value"]):
val_2 = next_int_len(2)
data.append(val_2)
val_12 = next_int_len(4)
data.append(val_12)
return data
def get_data_near_end() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
val_5 = next_int_len(1)
data.append(val_5)
if 0 < val_5["Value"]:
val_33 = next_int_len(4)
data.append(val_33)
if val_5["Value"] != 1:
val_33 = next_int_len(4)
data.append(val_33)
if val_5["Value"] != 2:
val_32 = val_5["Value"] + 2
for _ in range(val_32):
data.append(next_int_len(4))
data.append(next_int_len(1))
data.append(next_int_len(4)) # 100400
data.append(next_int_len(8))
return data
def get_aku() -> dict[str, Any]:
total = next_int(2)
stages = next_int(1)
stars = next_int(1)
return get_gauntlet_progress(
{"total": total, "stages": stages, "stars": stars}, False
)
def get_data_after_aku() -> list[dict[str, int]]:
data_1: list[dict[str, int]] = []
val_6 = next_int_len(2)
data_1.append(val_6)
val_7 = next_int_len(2)
data_1.append(val_7)
for _ in range(val_6["Value"]):
val_7 = next_int_len(2)
data_1.append(val_7)
for _ in range(val_7["Value"]):
data_1.append(next_int_len(2))
val_7 = next_int_len(2)
data_1.append(val_7)
val_4c = val_7
for _ in range(val_4c["Value"]):
data_1.append(next_int_len(2))
data_1.append(next_int_len(8))
val_5 = next_int_len(2)
data_1.append(val_5)
for _ in range(val_5["Value"]):
data_1.append(next_int_len(2))
data_1.append(next_int_len(8))
data_1.append(next_int_len(1))
return data_1
def get_data_near_end_after_aku() -> list[dict[str, int]]:
data_2: list[dict[str, int]] = []
val_4 = next_int_len(2)
data_2.append(val_4)
for _ in range(val_4["Value"]):
data_2.append(next_int_len(4))
data_2.append(next_int_len(1))
data_2.append(next_int_len(1))
return data_2
def exit_parser(save_stats: dict[str, Any]) -> dict[str, Any]:
save_stats["hash"] = get_utf8_string(32)
return save_stats
def check_gv(save_stats: dict[str, Any], game_version: int) -> dict[str, Any]:
if save_stats["game_version"]["Value"] < game_version:
save_stats = exit_parser(save_stats)
save_stats["exit"] = True
save_stats["extra_data"] = next_int_len(0)
else:
save_stats["exit"] = False
return save_stats
def get_play_time() -> dict[str, Any]:
raw_val = next_int_len(4)
frames = raw_val["Value"]
play_time_data = helper.frames_to_time(frames)
return play_time_data
def start_parse(save_data: bytes, country_code: str) -> dict[str, Any]:
"""Start the parser and handle any exceptions."""
try:
save_stats = parse_save(save_data, country_code)
except Exception: # pylint: disable=broad-except
helper.colored_text(
f"\nError: An error has occurred while parsing your save data (address = {address}):",
base=helper.RED,
)
traceback.print_exc()
game_version = get_game_version(save_data)
if game_version < 110000:
helper.colored_text(
f"\nThis save is from before &11.0.0& (current save version is &{helper.gv_to_str(game_version)}&), so this is likely the cause for the issue. &The save editor is not designed to work with saves from before 11.0.0&"
)
else:
helper.colored_text(
"\nPlease report this to &#bug-reports&, and/or &dm me your save& on discord"
)
helper.exit_editor()
return {}
return save_stats
def get_game_version(save_data: bytes) -> int:
"""Get the game version from the save data."""
return convert_little(save_data[0:3])
def find_date() -> int:
"""Find the date of the save, used because for some reason in some saves there is like 40 zero bytes before the main save data"""
for _ in range(100):
val = next_int(4)
if val >= 2000 and val <= 3000:
return address - 4
raise Exception("Could not find date")
def get_dst(save_data: bytes, offset: int) -> bool:
"""Get if the save has daylight savings from the save data, this is used to handle jp differences."""
dst = False
if save_data[offset] >= 15 and save_data[offset] <= 20:
dst = True
elif save_data[offset - 1] >= 15 and save_data[offset - 1] <= 20:
dst = False # Offset in jp due to no dst
return dst
def get_double() -> float:
"""Get a double from the save data."""
if save_data_g is None:
raise Exception("No save data loaded")
data = save_data_g[address : address + 8]
val = struct.unpack("d", data)[0]
set_address(address + 8)
return val
def get_110800_data() -> list[dict[str, int]]:
"""
Get the data from 11.7.0
Returns:
list[dict[str, int]]: The data
"""
data: list[dict[str, int]] = []
u_var_38 = next_int_len(1)
data.append(u_var_38)
return data
def get_110800_data_2() -> list[dict[str, int]]:
"""
Get the data from 11.7.0
Returns:
list[dict[str, int]]: The data
"""
data: list[dict[str, int]] = []
u_var_38 = next_int_len(1)
data.append(u_var_38)
u_var_38 = next_int_len(1)
data.append(u_var_38)
return data
def get_110700_data() -> list[dict[str, int]]:
"""
Get the data from 110600
Returns:
list[dict[str, int]]: The data
"""
data: list[dict[str, int]] = []
i_var_32 = next_int_len(4)
data.append(i_var_32)
for _ in range(i_var_32["Value"]):
pi_var_33 = next_int_len(4)
data.append(pi_var_33)
f_var_54 = next_int_len(8)
data.append(f_var_54)
f_var_54 = next_int_len(8)
data.append(f_var_54)
return data
def get_login_bonuses() -> dict[int, int]:
"""
Get the login bonuses
Returns:
dict[int, int]: The login bonuses
"""
length = next_int(4)
data: dict[int, int] = {}
for _ in range(length):
login_id = next_int(4)
data[login_id] = next_int(4)
return data
def get_tower_item_obtained() -> list[list[int]]:
total_stars = next_int(4)
total_stages = next_int(4)
data: list[list[int]] = []
for _ in range(total_stars):
star_data: list[int] = []
for _ in range(total_stages):
star_data.append(next_int(1))
data.append(star_data)
return data
def get_dict(
key_type: type, value_type: type, length: Optional[int] = None
) -> dict[Any, Any]:
if length is None:
length = next_int(4)
data: dict[Any, Any] = {}
for _ in range(length):
if key_type == int:
key = next_int(4)
else:
raise Exception("Invalid key type")
if value_type == int:
data[key] = next_int(4)
elif value_type == str:
data[key] = get_utf8_string()
elif value_type == bool:
data[key] = next_int(1) == 1
else:
raise Exception("Invalid value type")
return data
class BackupState(enum.Enum):
IDLE = 0
GO_TO_CAN_BACKUP = 1
IN_CAN_BACKUP = 2
GO_TO_CHECK_BAN = 3
IN_CHECK_BAN = 4
GO_TO_BACKUP = 5
IN_BACKUP = 6
FINISHED = 7
def get_110900_data() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(4))
data.append(next_int_len(2))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
ivar_14 = next_int_len(1)
data.append(ivar_14)
for _ in range(ivar_14["Value"]):
data.append(next_int_len(2))
svar6 = next_int_len(2)
data.append(svar6)
for _ in range(svar6["Value"]):
data.append(next_int_len(2))
svar6 = next_int_len(2)
data.append(svar6)
for _ in range(svar6["Value"]):
data.append(next_int_len(2))
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(2))
data.append(next_int_len(2))
data.append(next_int_len(2))
data.append(next_int_len(2))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
svar6 = next_int_len(2)
data.append(svar6)
for _ in range(svar6["Value"]):
data.append(next_int_len(2))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
data.append(next_int_len(1))
cvar4 = next_int_len(1)
data.append(cvar4)
if 0 < cvar4["Value"]:
data.append(next_int_len(2))
if cvar4["Value"] != 1:
data.append(next_int_len(2))
if cvar4["Value"] != 2:
data.append(next_int_len(2))
if cvar4["Value"] != 3:
data.append(next_int_len(2))
if cvar4["Value"] != 4:
ivar32 = cvar4["Value"] + 4
for _ in range(ivar32):
data.append(next_int_len(2))
return data
def get_zero_legends() -> list[Any]:
total_chapters = next_int(2)
chapters: list[dict[str, Any]] = []
for _ in range(total_chapters):
unknown_1 = next_int(1)
total_stars = next_int(1)
stars: list[dict[str, Any]] = []
for _ in range(total_stars):
selected_stage = next_int(1)
stages_cleared = next_int(1)
unlock_next = next_int(1)
total_stages = next_int(2)
stages: list[Any] = []
for _ in range(total_stages):
clear_amount = next_int(2)
stages.append(clear_amount)
stars.append(
{
"selected_stage": selected_stage,
"stages_cleared": stages_cleared,
"unlock_next": unlock_next,
"stages": stages,
}
)
chapters.append(
{
"unknown_1": unknown_1,
"stars": stars,
}
)
return chapters
def get_120100_data() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
svar19 = next_int_len(2)
data.append(svar19)
for _ in range(svar19["Value"]):
data.append(next_int_len(2))
return data
def get_120200_data() -> list[dict[str, int]]:
data: list[dict[str, int]] = []
data.append(next_int_len(1))
data.append(next_int_len(2))
cvar4 = next_int_len(1)
data.append(cvar4)
for _ in range(cvar4["Value"]):
data.append(next_int_len(2))
data.append(next_int_len(2))
return data
def parse_save(
save_data: bytes,
country_code: Union[str, None],
dst: Optional[bool] = None,
) -> dict[str, Any]:
"""Parse the save data."""
if country_code == "ja" or country_code == "":
country_code = "jp"
set_address(0)
global save_data_g
save_data_g = save_data
save_stats: dict[str, Any] = {}
save_stats["editor_version"] = updater.get_local_version()
save_stats["game_version"] = next_int_len(4)
save_stats["version"] = country_code
save_stats["unknown_1"] = next_int_len(1)
save_stats["mute_music"] = next_int_len(1)
save_stats["mute_sound_effects"] = next_int_len(1)
save_stats["cat_food"] = next_int_len(4)
save_stats["current_energy"] = next_int_len(4)
old_address = address
new_address = find_date()
set_address(old_address)
extra = new_address - old_address
save_stats["extra_time_data"] = next_int_len(extra)
if dst is None:
dst = get_dst(save_data, address + 118)
save_stats["dst"] = dst
if (
save_stats["version"] == "jp"
and dst
or save_stats["version"] != "jp"
and not dst
):
helper.colored_text(
"Warning: DST detected is not correct for this save version, this may cause issues with save parsing.",
helper.RED,
)
data = get_time_data_skip(save_stats["dst"])
save_stats["time"] = data["time"]
save_stats["dst_val"] = data["dst"]
save_stats["time_stamp"] = data["time_stamp"]
save_stats["duplicate_time"] = data["duplicate"]
save_stats["unknown_flags_1"] = get_length_data(length=3)
save_stats["upgrade_state"] = next_int_len(4)
save_stats["xp"] = next_int_len(4)
save_stats["tutorial_cleared"] = next_int_len(4)
save_stats["unknown_flags_2"] = get_length_data(length=12)
save_stats["unknown_flag_1"] = next_int_len(1)
save_stats["slots"] = get_equip_slots()
save_stats["cat_stamp_current"] = next_int_len(4)
save_stats["cat_stamp_collected"] = get_length_data(length=30)
save_stats["unknown_2"] = next_int_len(4)
save_stats["daily_reward_flag"] = next_int_len(4)
save_stats["unknown_116"] = get_length_data(length=10)
save_stats["story_chapters"] = get_main_story_levels()
save_stats["treasures"] = get_treasures()
try:
save_stats["enemy_guide"] = get_length_data()
except Exception:
return parse_save(save_data, country_code, not dst)
if len(save_stats["enemy_guide"]) == 0:
return parse_save(save_data, country_code, not dst)
save_stats["cats"] = get_length_data()
save_stats["cat_upgrades"] = get_cat_upgrades()
save_stats["current_forms"] = get_length_data()
save_stats["blue_upgrades"] = get_blue_upgrades()
save_stats["menu_unlocks"] = get_length_data()
save_stats["new_dialogs_1"] = get_length_data()
save_stats["battle_items"] = get_length_data(4, 4, 6)
save_stats["new_dialogs_2"] = get_length_data()
save_stats["unknown_6"] = next_int_len(4)
save_stats["unknown_7"] = get_length_data(length=21)
save_stats["lock_item"] = next_int_len(1)
save_stats["locked_items"] = get_length_data(1, 1, 6)
save_stats["second_time"] = get_time_data(save_stats["dst"])
save_stats["unknown_8"] = get_length_data(length=50)
save_stats["third_time"] = get_time_data(save_stats["dst"])
save_stats["unknown_9"] = next_int_len(6 * 4)
save_stats["thirty2_code"] = get_utf8_string()
save_stats["unknown_10"] = load_bonus_hash()
save_stats["unknown_11"] = get_length_data(length=4)
save_stats["normal_tickets"] = next_int_len(4)
save_stats["rare_tickets"] = next_int_len(4)
save_stats["gatya_seen_cats"] = get_length_data()
save_stats["unknown_12"] = get_length_data(length=10)
length = next_int(2)
cat_storage_len = True
if length != 128:
skip(-2)
cat_storage_len = False
length = 100
cat_storage_id = get_length_data(2, 4, length)
cat_storage_type = get_length_data(2, 4, length)
save_stats["cat_storage"] = {
"ids": cat_storage_id,
"types": cat_storage_type,
"len": cat_storage_len,
}
current_sel = get_event_stages_current()
save_stats["event_current"] = current_sel
save_stats["event_stages"] = get_event_stages(current_sel)
save_stats["unknown_15"] = get_length_data(length=38)
save_stats["unit_drops"] = get_length_data()
save_stats["rare_gacha_seed"] = next_int_len(4)
save_stats["unknown_17"] = next_int_len(12)
save_stats["unknown_18"] = next_int_len(4)
save_stats["fourth_time"] = get_time_data(save_stats["dst"])
save_stats["unknown_105"] = get_length_data(length=5)
save_stats["unknown_107"] = get_length_data(separator=1, length=3)
if save_stats["dst"]:
save_stats["unknown_110"] = get_utf8_string()
else:
save_stats["unknown_110"] = ""
total_strs = next_int(4)
unknown_108: list[str] = []
for _ in range(total_strs):
unknown_108.append(get_utf8_string())
save_stats["unknown_108"] = unknown_108
if save_stats["dst"]:
save_stats["time_stamps"] = get_length_doubles(length=3)
length = next_int(4)
strs: list[str] = []
for _ in range(length):
strs.append(get_utf8_string())
save_stats["unknown_112"] = strs
save_stats["energy_notice"] = next_int_len(1)
save_stats["game_version_2"] = next_int_len(4)
else:
save_stats["time_stamps"] = [0, 0, 0]
save_stats["unknown_112"] = []
save_stats["energy_notice"] = generate_empty_len(1)
save_stats["game_version_2"] = generate_empty_len(4)
save_stats["unknown_111"] = next_int_len(4)
save_stats["unlocked_slots"] = next_int_len(1)
length_1 = next_int(4)
length_2 = next_int(4)
unknown_20: dict[str, Any] = {}
unknown_20 = {"Value": get_length_data(4, 4, length_1 * length_2)}
unknown_20["Length_1"] = length_1
unknown_20["Length_2"] = length_2
save_stats["unknown_20"] = unknown_20
save_stats["time_stamps_2"] = get_length_doubles(length=4)
save_stats["trade_progress"] = next_int_len(4)
if save_stats["dst"]:
save_stats["time_stamps_2"].append(get_double())
save_stats["unknown_24"] = generate_empty_len(4)
else:
save_stats["unknown_24"] = next_int_len(4)
save_stats["catseye_related_data"] = get_cat_upgrades()
save_stats["unknown_22"] = get_length_data(length=11)
save_stats["user_rank_rewards"] = get_length_data(4, 1)
if not save_stats["dst"]:
save_stats["time_stamps_2"].append(get_double())
save_stats["unlocked_forms"] = get_length_data()
save_stats["transfer_code"] = get_utf8_string()
save_stats["confirmation_code"] = get_utf8_string()
save_stats["transfer_flag"] = next_int_len(1)
lengths = [next_int(4), next_int(4), next_int(4)]
length = lengths[0] * lengths[1] * lengths[2]
save_stats["stage_data_related_1"] = {
"Value": get_length_data(4, 1, length),
"Lengths": lengths,
}
save_stats["event_timed_scores"] = get_event_timed_scores()
save_stats["inquiry_code"] = get_utf8_string()
save_stats["play_time"] = get_play_time()
save_stats["unknown_25"] = next_int_len(1)
save_stats["backup_state"] = next_int_len(4)
if save_stats["dst"]:
save_stats["unknown_119"] = next_int_len(1)
else:
save_stats["unknown_119"] = generate_empty_len(1)
save_stats["gv_44"] = next_int_len(4)
save_stats["unknown_120"] = next_int_len(4)
save_stats["itf_timed_scores"] = list(
helper.chunks(get_length_data(4, 4, 51 * 3), 51)
)
save_stats["unknown_27"] = next_int_len(4)
save_stats["cat_related_data_1"] = get_length_data()
save_stats["unknown_28"] = next_int_len(1)
save_stats["gv_45"] = next_int_len(4)
save_stats["gv_46"] = next_int_len(4)
save_stats["unknown_29"] = next_int_len(4)
save_stats["lucky_tickets_1"] = get_length_data()
save_stats["unknown_32"] = get_length_data()
save_stats["gv_47"] = next_int_len(4)
save_stats["gv_48"] = next_int_len(4)
if not save_stats["dst"]:
save_stats["energy_notice"] = next_int_len(1)
save_stats["account_created_time_stamp"] = get_double()
save_stats["unknown_35"] = get_length_data()
save_stats["unknown_36"] = next_int_len(15)
save_stats["user_rank_popups"] = next_int_len(3)
save_stats["unknown_37"] = next_int_len(1)
save_stats["gv_49"] = next_int_len(4)
save_stats["gv_50"] = next_int_len(4)
save_stats["gv_51"] = next_int_len(4)
save_stats["cat_guide_collected"] = get_length_data(4, 1)
save_stats["gv_52"] = next_int_len(4)
save_stats["time_stamps_3"] = get_length_doubles(length=5)
save_stats["cat_fruit"] = get_length_data()
save_stats["cat_related_data_3"] = get_length_data()
save_stats["catseye_cat_data"] = get_length_data()
save_stats["catseyes"] = get_length_data()
save_stats["catamins"] = get_length_data()
save_stats["gamatoto_time_left"] = helper.seconds_to_time(int(get_double()))
save_stats["gamatoto_exclamation"] = next_int_len(1)
save_stats["gamatoto_xp"] = next_int_len(4)
save_stats["gamamtoto_destination"] = next_int_len(4)
save_stats["gamatoto_recon_length"] = next_int_len(4)
save_stats["unknown_43"] = next_int_len(4)
save_stats["gamatoto_complete_notification"] = next_int_len(4)
save_stats["unknown_44"] = get_length_data(4, 1)
save_stats["unknown_45"] = get_length_data(4, 12 * 4)
save_stats["gv_53"] = next_int_len(4)
save_stats["helpers"] = get_length_data()
save_stats["unknown_47"] = next_int_len(1)
save_stats["gv_54"] = next_int_len(4)
save_stats["purchases"] = get_purchase_receipts()
save_stats["gv_54"] = next_int_len(4)
save_stats["gamatoto_skin"] = next_int_len(4)
save_stats["platinum_tickets"] = next_int_len(4)
save_stats["login_bonuses"] = get_login_bonuses()
save_stats["unknown_49"] = next_int_len(16)
save_stats["announcment"] = get_length_data(length=32)
save_stats["backup_counter"] = next_int_len(4)
save_stats["unknown_131"] = get_length_data(length=3)
save_stats["gv_55"] = next_int_len(4)
save_stats["unknown_51"] = next_int_len(1)
save_stats["unknown_113"] = get_data_before_outbreaks()
save_stats["dojo_data"] = get_dojo_data_maybe()
save_stats["dojo_item_lock"] = next_int_len(1)
save_stats["dojo_locks"] = get_length_data(1, 1, 2)
save_stats["unknown_114"] = next_int_len(4)
save_stats["gv_58"] = next_int_len(4) # 0x3a
save_stats["unknown_115"] = next_int_len(8)
save_stats["outbreaks"] = get_outbreaks()
save_stats["unknown_52"] = get_double()
save_stats["item_schemes"] = {}
save_stats["item_schemes"]["to_obtain_ids"] = get_length_data()
save_stats["item_schemes"]["received_ids"] = get_length_data()
save_stats["current_outbreaks"] = get_outbreaks()
save_stats["unknown_55"] = get_mission_data_maybe()
save_stats["time_stamp_4"] = get_double()
save_stats["gv_60"] = next_int_len(4)
save_stats["unknown_117"] = get_unknown_data()
save_stats["gv_61"] = next_int_len(4)
data = get_unlock_popups()
save_stats["unlock_popups"] = data[0]
save_stats["unknown_118"] = data[1]
save_stats["base_materials"] = get_length_data()
save_stats["unknown_56"] = next_int_len(8)
save_stats["unknown_57"] = next_int_len(1)
save_stats["unknown_58"] = next_int_len(4)
save_stats["engineers"] = next_int_len(4)
save_stats["ototo_cannon"] = get_cat_cannon_data()
save_stats["unknown_59"] = get_data_near_ht()
save_stats["tower"] = get_ht_it_data()
save_stats["missions"] = get_mission_data()
save_stats["tower_item_obtained"] = get_tower_item_obtained()
save_stats["unknown_61"] = get_data_after_tower()
save_stats["challenge"] = {"Score": next_int_len(4), "Cleared": next_int_len(1)}
save_stats["gv_67"] = next_int_len(4) # 0x43
save_stats["weekly_event_missions"] = get_dict(int, bool)
save_stats["won_dojo_reward"] = next_int_len(1)
save_stats["event_flag_update_flag"] = next_int_len(1)
save_stats["gv_68"] = next_int_len(4) # 0x44
save_stats["completed_one_level_in_chapter"] = get_dict(int, int)
save_stats["displayed_cleared_limit_text"] = get_dict(int, bool)
save_stats["event_start_dates"] = get_dict(int, int)
save_stats["stages_beaten_twice"] = get_length_data()
save_stats["unknown_102"] = get_data_after_challenge()
lengths = get_uncanny_current()
save_stats["uncanny_current"] = lengths
save_stats["uncanny"] = get_uncanny_progress(lengths)
total = lengths["total"]
save_stats["unknown_62"] = next_int_len(4)
save_stats["unknown_63"] = get_length_data(length=total)
save_stats["unknown_64"] = get_data_after_uncanny()
total = save_stats["unknown_64"]["progress"]["Lengths"]["total"]
save_stats["unknown_65"] = next_int_len(4)
val_61 = save_stats["unknown_65"]
save_stats["unknown_66"] = []
unknown_66: list[Any] = []
for _ in range(total):
val_61 = next_int_len(4)
unknown_66.append(val_61)
save_stats["unknown_66"] = unknown_66
val_54 = 0x37
if val_61["Value"] < 0x38:
val_54 = val_61["Value"]
save_stats["lucky_tickets_2"] = get_length_data(length=val_54)
save_stats["unknown_67"] = []
if 0x37 < val_61["Value"]:
save_stats["unknown_67"] = get_length_data(4, 4, val_61["Value"])
save_stats["unknown_68"] = next_int_len(1)
save_stats["gv_77"] = next_int_len(4) # 0x4d
save_stats["gold_pass"] = get_gold_pass_data()
save_stats["talents"] = get_talent_data()
save_stats["np"] = next_int_len(4)
save_stats["unknown_70"] = next_int_len(1)
save_stats["gv_80000"] = next_int_len(4) # 80000
save_stats["unknown_71"] = next_int_len(1)
save_stats["leadership"] = next_int_len(2)
save_stats["officer_pass_cat_id"] = next_int_len(2)
save_stats["officer_pass_cat_form"] = next_int_len(2)
save_stats["gv_80200"] = next_int_len(4) # 80200
save_stats["filibuster_stage_id"] = next_int_len(1)
save_stats["filibuster_stage_enabled"] = next_int_len(1)
save_stats["gv_80300"] = next_int_len(4) # 80300
save_stats["unknown_74"] = get_length_data()
save_stats["gv_80500"] = next_int_len(4) # 80500
save_stats["unknown_75"] = get_length_data(2, 4)
lengths = get_legend_quest_current()
save_stats["legend_quest_current"] = lengths
save_stats["legend_quest"] = get_legend_quest_progress(lengths)
save_stats["unknown_133"] = get_length_data(4, 1, lengths["total"])
save_stats["legend_quest_ids"] = get_length_data(4, 4, lengths["stages"])
save_stats["unknown_76"] = get_data_after_leadership()
save_stats["gv_80700"] = next_int_len(4) # 80700
if save_stats["dst"]:
save_stats["unknown_104"] = next_int_len(1)
save_stats["gv_100600"] = next_int_len(4)
if save_stats["gv_100600"]["Value"] != 100600:
skip(-5)
else:
save_stats["unknown_104"] = generate_empty_len(1)
save_stats["gv_100600"] = generate_empty_len(4)
save_stats["restart_pack"] = next_int_len(1)
save_stats["unknown_101"] = get_data_after_after_leadership(save_stats["dst"])
save_stats["medals"] = get_medals()
save_stats["unknown_103"] = get_data_after_medals()
lengths = get_gauntlet_current()
save_stats["gauntlet_current"] = lengths
save_stats["gauntlets"] = get_gauntlet_progress(lengths)
save_stats["unknown_77"] = get_length_data(4, 1, lengths["total"])
save_stats["gv_90300"] = next_int_len(4) # 90300
lengths = get_gauntlet_current()
save_stats["unknown_78"] = lengths
save_stats["unknown_79"] = get_gauntlet_progress(lengths)
save_stats["unknown_80"] = get_length_data(4, 1, lengths["total"])
save_stats["enigma_data"] = get_enigma_stages()
data = get_cleared_slots()
save_stats["cleared_slot_data"] = data[0]
save_stats["unknown_121"] = data[1]
lengths = get_gauntlet_current()
save_stats["collab_gauntlets_current"] = lengths
save_stats["collab_gauntlets"] = get_gauntlet_progress(lengths)
save_stats["unknown_84"] = get_length_data(4, 1, lengths["total"])
save_stats["unknown_85"] = data_after_after_gauntlets()
save_stats["talent_orbs"] = get_talent_orbs(save_stats["game_version"])
save_stats["unknown_86"] = get_data_after_orbs()
save_stats["cat_shrine"] = get_cat_shrine_data()
save_stats["unknown_130"] = next_int_len(4 * 5)
save_stats["gv_90900"] = next_int_len(4) # 90900
save_stats["slot_names"] = get_slot_names(save_stats)
save_stats["gv_91000"] = next_int_len(4)
save_stats["legend_tickets"] = next_int_len(4)
save_stats["unknown_87"] = get_length_data(1, 5)
save_stats["unknown_88"] = next_int_len(2)
save_stats["token"] = get_utf8_string()
save_stats["unknown_89"] = next_int_len(1 * 3)
save_stats["unknown_90"] = next_int_len(8)
save_stats["unknown_91"] = next_int_len(8)
save_stats["gv_100000"] = next_int_len(4) # 100000
save_stats = check_gv(save_stats, 100100)
if save_stats["exit"]:
return save_stats
save_stats["date_int"] = next_int_len(4)
save_stats["gv_100100"] = next_int_len(4) # 100100
save_stats = check_gv(save_stats, 100300)
if save_stats["exit"]:
return save_stats
save_stats["unknown_93"] = get_length_data(4, 19, 6)
save_stats["gv_100300"] = next_int_len(4) # 100300
save_stats = check_gv(save_stats, 100700)
if save_stats["exit"]:
return save_stats
save_stats["unknown_94"] = get_data_near_end()
save_stats["platinum_shards"] = next_int_len(4)
save_stats["unknown_100"] = get_data_near_end_after_shards()
save_stats["gv_100700"] = next_int_len(4) # 100700
save_stats = check_gv(save_stats, 100900)
if save_stats["exit"]:
return save_stats
save_stats["aku"] = get_aku()
save_stats["unknown_95"] = next_int_len(1 * 2)
save_stats["unknown_96"] = get_data_after_aku()
save_stats["gv_100900"] = next_int_len(4) # 100900
save_stats = check_gv(save_stats, 101000)
if save_stats["exit"]:
return save_stats
save_stats["unknown_97"] = next_int_len(1)
save_stats["gv_101000"] = next_int_len(4) # 101000
save_stats = check_gv(save_stats, 110000)
if save_stats["exit"]:
return save_stats
save_stats["unknown_98"] = get_data_near_end_after_aku()
save_stats["gv_110000"] = next_int_len(4) # 110000
save_stats = check_gv(save_stats, 110500)
if save_stats["exit"]:
return save_stats
data = get_gauntlet_current()
save_stats["behemoth_culling_current"] = data
save_stats["behemoth_culling"] = get_gauntlet_progress(data)
save_stats["unknown_124"] = get_length_data(4, 1, data["total"])
save_stats["unknown_125"] = next_int_len(1)
save_stats["gv_110500"] = next_int_len(4) # 110500
save_stats = check_gv(save_stats, 110600)
if save_stats["exit"]:
return save_stats
save_stats["unknown_126"] = next_int_len(1)
save_stats["gv_110600"] = next_int_len(4) # 110600
save_stats = check_gv(save_stats, 110700)
if save_stats["exit"]:
return save_stats
save_stats["unknown_127"] = get_110700_data()
if save_stats["dst"]:
save_stats["unknown_128"] = next_int_len(1)
else:
save_stats["unknown_128"] = generate_empty_len(1)
save_stats["gv_110700"] = next_int_len(4) # 110700
save_stats = check_gv(save_stats, 110800)
if save_stats["exit"]:
return save_stats
save_stats["shrine_dialogs"] = next_int_len(4)
save_stats["unknown_129"] = get_110800_data()
save_stats["dojo_3x_speed"] = next_int_len(1)
save_stats["unknown_132"] = get_110800_data_2()
save_stats["gv_110800"] = next_int_len(4) # 110800
save_stats = check_gv(save_stats, 110900)
if save_stats["exit"]:
return save_stats
save_stats["unknown_135"] = get_110900_data()
save_stats["gv_110900"] = next_int_len(4) # 110900
save_stats = check_gv(save_stats, 120000)
if save_stats["exit"]:
return save_stats
save_stats["zero_legends"] = get_zero_legends()
save_stats["unknown_136"] = next_int_len(1)
save_stats["gv_120000"] = next_int_len(4) # 120000
save_stats = check_gv(save_stats, 120100)
if save_stats["exit"]:
return save_stats
save_stats["unknown_137"] = get_120100_data()
save_stats["gv_120100"] = next_int_len(4) # 120100
save_stats = check_gv(save_stats, 120200)
if save_stats["exit"]:
return save_stats
save_stats["unknown_138"] = get_120200_data()
save_stats["gv_120200"] = next_int_len(4) # 120200
save_stats = check_gv(save_stats, 120200)
if save_stats["exit"]:
return save_stats
length = len(save_data) - address - 32
save_stats["extra_data"] = next_int_len(length)
save_stats = exit_parser(save_stats)
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/parse_save.py
| 0.708213 | 0.365598 |
parse_save.py
|
pypi
|
from typing import Optional, Union
from . import (
managed_item,
user_input_handler,
helper,
locale_handler,
config_manager,
user_info,
)
class Bannable:
def __init__(
self,
type: "managed_item.ManagedItemType",
inquiry_code: str,
work_around: str = "",
):
self.type = type
self.inquiry_code = inquiry_code
self.work_around = work_around
class Int:
def __init__(self, value: Optional[int], byte_size: int = 4, signed: bool = True):
self.value = value
self.byte_size = byte_size
self.signed = signed
def get_max_value(self) -> int:
if self.signed:
return (2 ** (self.byte_size * 8 - 1)) - 1
return (2 ** (self.byte_size * 8)) - 1
class IntItem:
def __init__(
self,
name: str,
value: Int,
max_value: Optional[int],
bannable: Optional[Bannable] = None,
offset: int = 0,
):
self.name = name
self.__value = value
disable_maxes = config_manager.get_config_value_category(
"EDITOR", "DISABLE_MAXES"
)
self.max_value = max_value
if disable_maxes:
self.max_value = None
self.bannable = bannable
self.offset = offset
self.locale_manager = locale_handler.LocalManager.from_config()
def get_max_value(self) -> int:
if self.max_value is not None:
return self.max_value
return self.__value.get_max_value()
def show_ban_warning(self) -> bool:
if self.bannable is None:
return True
helper.colored_text(self.locale_manager.search_key("ban_warning") % self.name)
if self.bannable.work_around:
helper.colored_text(self.bannable.work_around)
return user_input_handler.get_yes_no(
self.locale_manager.search_key("ban_warning_leave")
)
def edit(self) -> None:
end = not self.show_ban_warning()
if end:
return
original_value = self.__value.value
helper.colored_text(
self.locale_manager.search_key("current_item_value")
% (self.name, self.get_value_off())
)
max_str = ""
if self.max_value is not None:
max_str = " " + self.locale_manager.search_key("max_str") % self.max_value
new_value = user_input_handler.get_int(
self.locale_manager.search_key("enter_value_text") % (self.name, max_str),
)
new_value -= self.offset
new_value = helper.clamp(new_value, 0, self.get_max_value())
self.__value.value = new_value
helper.colored_text(
self.locale_manager.search_key("item_value_changed")
% (
self.name,
0 if original_value is None else original_value,
self.get_value_off(),
)
)
if self.bannable is not None and self.__value.value != original_value:
new_value = self.__value.value
if original_value is None:
original_value = 0
info = user_info.UserInfo(self.bannable.inquiry_code)
info.update_item(self.bannable.type, self.__value.value - original_value)
def get_value_off(self) -> int:
if self.__value.value is None:
return 0
return self.__value.value + self.offset
def get_value(self) -> int:
if self.__value.value is None:
return 0
return self.__value.value
def get_value_none(self) -> Optional[int]:
return self.__value.value
def set_value(self, value: int) -> None:
self.__value.value = value
class IntItemGroup:
def __init__(self, group_name: str, items: list[IntItem]):
self.items = items
self.locale_manager = locale_handler.LocalManager.from_config()
self.group_name = group_name
def get_values(self) -> list[int]:
return [item.get_value() for item in self.items]
def get_values_none(self) -> list[Optional[int]]:
return [item.get_value_none() for item in self.items]
def get_values_off(self) -> list[int]:
return [item.get_value_off() for item in self.items]
def all_none(self) -> bool:
return all([item.get_value_none() is None for item in self.items])
def get_names(self) -> list[str]:
return [item.name for item in self.items]
def edit(self) -> None:
if not self.items:
return
ids, individual = user_input_handler.select_options(
self.get_names(),
self.locale_manager.search_key("select_l"),
self.get_values_off() if not self.all_none() else None,
)
if individual:
for id in ids:
self.items[id].edit()
else:
max_value = self.get_max_max_value()
offset = self.items[ids[0]].offset
max_str = ""
if self.items[ids[0]].max_value is not None:
max_str = " " + self.locale_manager.search_key("max_str") % (
max_value + offset
)
new_value = user_input_handler.get_int(
self.locale_manager.search_key("enter_value_text")
% (self.group_name, max_str)
)
new_value -= offset
entered_value = helper.clamp(new_value, 0, max_value)
for id in ids:
max_value = self.items[id].get_max_value()
value = helper.clamp(new_value, 0, max_value)
self.items[id].set_value(value)
helper.colored_text(
self.locale_manager.search_key("success_set")
% (self.group_name, entered_value + offset)
)
def get_max_max_value(self) -> int:
return max([item.get_max_value() for item in self.items])
@staticmethod
def from_lists(
names: list[str],
values: Optional[list[int]],
maxes: Union[list[int], int, None],
group_name: str,
offset: int = 0,
) -> "IntItemGroup":
items: list[IntItem] = []
for i in range(len(names)):
max_value = maxes[i] if isinstance(maxes, list) else maxes
try:
value = values[i] if values is not None else None
except IndexError:
value = None
items.append(
IntItem(
names[i],
Int(value),
max_value,
offset=offset,
)
)
return IntItemGroup(group_name, items)
class StrItem:
def __init__(self, name: str, value: str):
self.name = name
self.value = value
self.locale_manager = locale_handler.LocalManager.from_config()
def edit(self) -> None:
original_value = self.value
helper.colored_text(
self.locale_manager.search_key("current_item_value")
% (self.name, self.value)
)
new_value = user_input_handler.colored_input(
self.locale_manager.search_key("enter_value_text") % (self.name, "")
)
self.value = new_value
helper.colored_text(
self.locale_manager.search_key("item_value_changed")
% (self.name, original_value, self.value)
)
def get_value(self) -> str:
return self.value
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/item.py
| 0.85186 | 0.157234 |
item.py
|
pypi
|
import os
from typing import Optional
import requests
from . import helper
URL = "https://raw.githubusercontent.com/fieryhenry/BCData/master/"
def download_file(
game_version: str,
pack_name: str,
file_name: str,
get_data: bool = True,
print_progress: bool = True,
) -> bytes:
"""
Downloads the file.
Args:
game_version (str): The game version to download from.
pack_name (str): The pack name to download from.
file_name (str): The file name to download.
get_data (bool, optional): Whether to return the data. Defaults to True.
print_progress (bool, optional): Whether to print the progress. Defaults to True.
Returns:
bytes: The data of the file.
"""
path = helper.get_file(os.path.join("game_data", game_version, pack_name))
file_path = os.path.join(path, file_name)
if os.path.exists(file_path):
if get_data:
return helper.read_file_bytes(file_path)
return b""
if print_progress:
helper.colored_text(
f"Downloading game data file &{file_name}& from &{pack_name}& with game version &{game_version}&",
helper.GREEN,
helper.WHITE,
)
url = URL + game_version + "/" + pack_name + "/" + file_name
response = requests.get(url)
helper.create_dirs(path)
helper.write_file_bytes(file_path, response.content)
return response.content
def get_latest_versions() -> Optional[list[str]]:
"""
Gets the latest versions of the game data.
Returns:
Optional[list[str]]: The latest versions of the game data.
"""
try:
response = requests.get(URL + "latest.txt")
except requests.exceptions.ConnectionError:
return None
versions = response.text.splitlines()
return versions
def get_latest_version(is_jp: bool) -> Optional[str]:
"""
Gets the latest version of the game data.
Args:
is_jp (bool): Whether to get the japanese version.
Returns:
str: The latest version of the game data.
"""
versions = get_latest_versions()
if versions is None:
return None
if is_jp:
return versions[1]
else:
return versions[0]
def get_file_latest(pack_name: str, file_name: str, is_jp: bool) -> Optional[bytes]:
"""
Gets the latest version of the file.
Args:
pack_name (str): The pack name to find.
file_name (str): The file name to find.
is_jp (bool): Whether to get the japanese version.
Returns:
Optional[bytes]: The data of the file.
"""
version = get_latest_version(is_jp)
if version is None:
return None
return download_file(version, pack_name, file_name)
def get_file_latest_path(path: str, is_jp: bool) -> Optional[bytes]:
"""
Gets the latest version of the file.
Args:
path (str): The path to find.
is_jp (bool): Whether to get the japanese version.
Returns:
Optional[bytes]: The data of the file.
"""
version = get_latest_version(is_jp)
if version is None:
return None
packname, filename = path.split("/")
return download_file(version, packname, filename)
def get_path(pack_name: str, file_name: str, is_jp: bool) -> Optional[str]:
"""
Gets the path of the file.
Args:
pack_name (str): The pack name to find.
file_name (str): The file name to find.
is_jp (bool): Whether to get the japanese version.
Returns:
Optional[str]: The path of the file.
"""
version = get_latest_version(is_jp)
if version is None:
return None
return os.path.join("game_data", version, pack_name, file_name)
def check_remove(new_version: str, is_jp: bool):
"""
Checks if older game data is downloaded, and deletes if out of date.
Args:
new_version (str): The new version.
is_jp (bool): Whether to get the japanese version.
"""
all_versions = helper.get_dirs(helper.get_file("game_data"))
for version in all_versions:
if is_jp:
if "jp" not in version:
continue
if version != new_version:
helper.delete_dir(helper.get_file(os.path.join("game_data", version)))
else:
if "jp" in version:
continue
if version != new_version:
helper.delete_dir(helper.get_file(os.path.join("game_data", version)))
def check_remove_handler():
"""
Checks if older game data is downloaded, and deletes if out of date.
"""
versions = get_latest_versions()
if versions is None:
return None
check_remove(versions[0], is_jp=False)
check_remove(versions[1], is_jp=True)
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/game_data_getter.py
| 0.800692 | 0.283066 |
game_data_getter.py
|
pypi
|
import json
from typing import Any
from . import config_manager, managed_item, helper
import os
class ManagedItems:
def __init__(self, managed_items: dict[managed_item.ManagedItemType, int]):
self.managed_items = managed_items
def to_dict(self) -> dict[str, int]:
"""Convert to dict"""
return {
item.value: self.managed_items[item]
for item in managed_item.ManagedItemType
}
@staticmethod
def from_dict(data: dict[str, int]) -> "ManagedItems":
"""Convert from dict"""
managed_items = {
managed_item.ManagedItemType(item): data[item] for item in data
}
return ManagedItems(managed_items)
class UserInfo:
def __init__(self, inquiry_code: str):
self.inquiry_code = inquiry_code
self.read_user_info()
def get_path(self) -> str:
"""Get the path to the user info"""
app_data_folder = config_manager.get_app_data_folder()
path = os.path.join(app_data_folder, "user_info", self.inquiry_code + ".json")
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
def create_empty_user_info(self):
managed_items = {item: 0 for item in managed_item.ManagedItemType}
managed_items = ManagedItems(managed_items)
data = {
"managedItems": managed_items.to_dict(),
"password": "",
"authToken": "",
}
self.write_user_info(data)
def read_user_info(self):
if not os.path.exists(self.get_path()):
self.create_empty_user_info()
data = helper.read_file_string(self.get_path())
try:
data = json.loads(data)
except json.JSONDecodeError:
self.create_empty_user_info()
data = helper.read_file_string(self.get_path())
data = json.loads(data)
self.managed_items = ManagedItems.from_dict(data["managedItems"])
self.password = data["password"]
self.auth_token = data["authToken"]
def write_user_info(self, data: dict[str, Any]):
helper.write_file_string(self.get_path(), json.dumps(data, indent=4))
def save(self):
data = {
"managedItems": self.managed_items.to_dict(),
"password": self.password,
"authToken": self.auth_token,
}
self.write_user_info(data)
def get_managed_items(self) -> ManagedItems:
return self.managed_items
def get_password(self) -> str:
return self.password
def get_auth_token(self) -> str:
return self.auth_token
def set_managed_items(self, managed_items: ManagedItems):
self.managed_items = managed_items
self.save()
def set_password(self, password: str):
self.password = password
self.save()
def set_auth_token(self, auth_token: str):
self.auth_token = auth_token
self.save()
def clear_managed_items(self):
self.managed_items = ManagedItems(
{item: 0 for item in managed_item.ManagedItemType}
)
self.save()
def get_managed_items_lst(self) -> list[managed_item.ManagedItem]:
items: list[managed_item.ManagedItem] = []
for item in self.managed_items.managed_items:
value = self.managed_items.managed_items[item]
if value > 0:
detail_type = managed_item.DetailType.GET
elif value < 0:
detail_type = managed_item.DetailType.USE
value = abs(value)
else:
continue
items.append(managed_item.ManagedItem(value, detail_type, item))
return items
def has_managed_items(self) -> bool:
for item in self.managed_items.managed_items:
value = self.managed_items.managed_items[item]
if value != 0:
return True
return False
def update_item(self, item_type: managed_item.ManagedItemType, amount: int):
self.managed_items.managed_items[item_type] += amount
self.save()
@staticmethod
def clear_all_items():
app_data_folder = config_manager.get_app_data_folder()
path = os.path.join(app_data_folder, "user_info")
os.makedirs(path, exist_ok=True)
files = helper.get_files_in_dir(path)
for file in files:
if file.endswith(".json"):
info = UserInfo(file.replace(".json", ""))
info.clear_managed_items()
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/user_info.py
| 0.572245 | 0.18954 |
user_info.py
|
pypi
|
import os
import subprocess
from typing import Optional
from . import helper
def get_data_path() -> str:
"""
Get the data path
Returns:
str: The data path
"""
return "/data/data/"
def get_installed_battlecats_versions() -> Optional[list[str]]:
"""
Get a list of installed battle cats versions
Returns:
Optional[list[str]]: A list of installed battle cats versions
"""
if not is_ran_as_root():
return None
path = get_data_path()
if not os.path.exists(path):
return None
versions: list[str] = []
for folder in os.listdir(path):
if folder == "jp.co.ponos.battlecats":
versions.append("jp")
elif folder.startswith("jp.co.ponos.battlecats"):
versions.append(folder.replace("jp.co.ponos.battlecats", ""))
return versions
def pull_save_data(game_version: str) -> Optional[str]:
"""
Pull save data from a game version
Args:
game_version (str): The game version to pull from
Returns:
Optional[str]: The path to the pulled save data
"""
if not is_ran_as_root():
return None
package_name = "jp.co.ponos.battlecats" + game_version.replace("jp", "")
path = get_data_path() + package_name + "/files/SAVE_DATA"
if not os.path.exists(path):
return None
return path
def is_ran_as_root() -> bool:
"""
Check if the program is ran as root
Returns:
bool: If the program is ran as root
"""
if not helper.is_android():
return False
try:
os.listdir(get_data_path())
except PermissionError:
helper.colored_text(
"Root access is required to get installed game versions. Try adding sudo before the run command",
base=helper.RED,
)
return False
return True
def rerun_game(version: str) -> None:
"""
Rerun the game on the device without adb
Args:
version (str): The game version to rerun
"""
if not is_ran_as_root():
return
package_name = "jp.co.ponos.battlecats" + version.replace("jp", "")
subprocess.run(
f"sudo pkill -f {package_name}", capture_output=True, check=False, shell=True
)
subprocess.run(
f"sudo monkey -p {package_name} -c android.intent.category.LAUNCHER 1",
capture_output=True,
check=False,
shell=True,
)
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/root_handler.py
| 0.684791 | 0.23421 |
root_handler.py
|
pypi
|
import subprocess
from typing import Any, Optional
import requests
from . import config_manager, helper
def update(latest_version: str, command: str = "py") -> bool:
"""Update pypi package testing for py and python"""
helper.colored_text("Updating...", base=helper.GREEN)
try:
full_cmd = f"{command} -m pip install --upgrade battle-cats-save-editor=={latest_version}"
subprocess.run(
full_cmd,
shell=True,
capture_output=True,
check=True,
)
helper.colored_text("Update successful", base=helper.GREEN)
return True
except subprocess.CalledProcessError:
return False
def try_update(latest_version: str):
"""
Try to update the editor
Args:
latest_version (str): The latest version of the editor
"""
success = update(latest_version, "py")
if success:
return
success = update(latest_version, "python3")
if success:
return
success = update(latest_version, "python")
if success:
return
helper.colored_text(
"Update failed\nYou may need to manually update with py -m pip install -U battle-cats-save-editor",
base=helper.RED,
)
def get_local_version() -> str:
"""Returns the local version of the editor"""
return "1.8"
def get_version_info() -> Optional[tuple[str, str]]:
"""Gets the latest version of the program"""
package_name = "battle-cats-save-editor"
try:
response = requests.get(f"https://pypi.org/pypi/{package_name}/json")
response.raise_for_status()
data = response.json()
except requests.exceptions.RequestException:
return None
info = (
get_pypi_version(data),
get_latest_prerelease_version(data),
)
return info
def get_pypi_version(data: dict[str, Any]) -> str:
"""Get latest pypi version of the program"""
return data["info"]["version"]
def get_latest_prerelease_version(data: dict[str, Any]) -> str:
"""Get latest prerelease version of the program"""
releases = list(data["releases"])
releases.reverse()
for release in releases:
if "b" in release:
return release
return ""
def pypi_is_newer(local_version: str, pypi_version: str, remove_b: bool = True) -> bool:
"""Checks if the local version is newer than the pypi version"""
if remove_b:
if "b" in pypi_version:
pypi_version = pypi_version.split("b")[0]
if "b" in local_version:
local_version = local_version.split("b")[0]
return pypi_version > local_version
def check_update(version_info: tuple[str, str]) -> tuple[bool, str]:
"""Checks if the editor is updated"""
local_version = get_local_version()
pypi_version, latest_prerelease_version = version_info
check_pre = "b" in local_version or config_manager.get_config_value_category(
"START_UP", "UPDATE_TO_BETAS"
)
if check_pre and pypi_is_newer(
local_version, latest_prerelease_version, remove_b=False
):
helper.colored_text("Prerelease update available\n", base=helper.GREEN)
return True, latest_prerelease_version
if pypi_is_newer(local_version, pypi_version):
helper.colored_text("Stable update available\n", base=helper.GREEN)
return True, pypi_version
helper.colored_text("No update available\n", base=helper.GREEN)
return False, local_version
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/updater.py
| 0.767646 | 0.178633 |
updater.py
|
pypi
|
import enum
import os
import re
import subprocess
from typing import Union
from . import helper, user_input_handler, config_manager
class ADBExceptionTypes(enum.Enum):
"""ADB exception types"""
NO_DEVICE = enum.auto()
DEVICE_OFFLINE = enum.auto()
PATH_NOT_FOUND = enum.auto()
ADB_NOT_INSTALLED = enum.auto()
MORE_THAN_ONE_DEVICE = enum.auto()
UNKNOWN = enum.auto()
class ADBException(Exception):
"""ADB exception"""
def __init__(self, exception_type: ADBExceptionTypes, message: str = ""):
"""Initialize exception."""
super().__init__(message)
self.message = message
self.exception_type = exception_type
def adb_pull(package_name: str, device_file_path: str, local_file_path: str):
"""Pull a file from a device"""
if local_file_path:
local_file_path = f'"{local_file_path}"'
path = f"/data/data/{package_name}/{device_file_path}"
run_adb_command(f'pull "{path}" {local_file_path}')
def find_game_versions() -> list[str]:
"""
Find installed game versions
Returns:
list[str]: List of game versions
"""
package_name = "jp.co.ponos.battlecats"
try:
run_adb_command("devices")
except ADBException as exception:
return adb_err_handler(exception)
try:
output = str(
subprocess.run(
"adb shell ls /data/data/", capture_output=True, check=True, shell=True
).stdout
)
except subprocess.CalledProcessError:
return []
package_names: list[str] = re.findall(f"{package_name}..", output)
for i, package_name in enumerate(package_names):
package_names[i] = (
package_name.replace("\\n", "jp")
.replace(" ", "jp")
.replace("jp.co.ponos.battlecats", "")
)
return package_names
def adb_push(package_name: str, local_file_path: str, device_file_path: str):
"""Push a file to a device"""
path = f"/data/data/{package_name}/{device_file_path}"
run_adb_command(f'push "{local_file_path}" "{path}"')
def adb_delete_file(package_name: str, device_file_path: str, options: str = ""):
"""Delete a file on a device"""
path = f"/data/data/{package_name}/{device_file_path}"
run_adb_command(f'shell "su 0 rm "{path}" {options}"')
def adb_close_process(package_name: str):
"""Close a process"""
run_adb_command(f"shell am force-stop {package_name}")
def adb_run_process(package_name: str):
"""Run a process"""
run_adb_command(f"shell monkey -p {package_name} -v 1")
def adb_reboot():
"""Reboot adb server"""
helper.run_in_background(adb_reboot_background)
def adb_reboot_background():
"""
Reboot adb server in background
"""
adb_kill_server()
is_adb_installed()
def adb_root():
"""Start adb server as root"""
subprocess.run("adb root", shell=True, check=True, text=True, capture_output=True)
def is_adb_installed():
"""Test if adb is installed"""
try:
subprocess.run(
"adb start-server",
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
except subprocess.CalledProcessError:
return False
return True
def run_adb_command(command: str) -> bool:
"""Run an ADB command"""
command = f"adb {command}"
if not is_adb_installed():
raise ADBException(ADBExceptionTypes.ADB_NOT_INSTALLED)
try:
adb_root()
subprocess.run(command, shell=True, check=True, text=True, capture_output=True)
except subprocess.CalledProcessError as err:
adb_error_handler(err)
return True
def adb_kill_server():
"""Kill ADB server"""
try:
subprocess.run(
"adb kill-server", shell=True, check=True, text=True, capture_output=True
)
except subprocess.CalledProcessError as err:
adb_error_handler(err)
def adb_error_handler(err: subprocess.CalledProcessError):
"""Handle ADB errors"""
error_text = str(err.stderr).lower()
if not error_text:
error_text = str(err.stdout).lower()
if not error_text:
error_text = str(err.output).lower()
if "not found" in error_text:
raise ADBException(ADBExceptionTypes.NO_DEVICE)
if "offline" in error_text:
raise ADBException(ADBExceptionTypes.DEVICE_OFFLINE)
if "does not exist" in error_text:
raise ADBException(ADBExceptionTypes.PATH_NOT_FOUND)
if "'adb' is not recognized" in error_text:
raise ADBException(ADBExceptionTypes.ADB_NOT_INSTALLED)
if "more than one device" in error_text:
raise ADBException(ADBExceptionTypes.MORE_THAN_ONE_DEVICE)
if "no such file or directory" in error_text:
raise ADBException(ADBExceptionTypes.PATH_NOT_FOUND)
raise ADBException(ADBExceptionTypes.UNKNOWN, error_text)
def find_adb_path() -> Union[str, None]:
"""Find adb path automatically in common locations"""
drive_letters = ["C", "D", "E"]
paths = [
"LDPlayer\\LDPlayer4.0",
"LDPlayer\\LDPlayer9",
"Program Files (x86)\\Nox\\bin",
"adb",
]
found_paths: list[str] = []
for drive_letter in drive_letters:
for path in paths:
path = f"{drive_letter}:\\{path}"
if os.path.exists(path):
found_paths.append(path)
if found_paths:
for path in found_paths:
if "adb" not in path:
return path
return found_paths[0]
return None
def if_windows() -> bool:
"""Check if windows"""
return os.name == "nt"
def add_to_path() -> None:
"""Try to add adb to path environment variable automatically"""
if not if_windows():
helper.colored_text(
"ADB path not added to PATH environment variable.\n"
"Please add it manually to your system PATH variable.",
helper.RED,
)
return
adb_path = find_adb_path()
if not adb_path:
adb_path = input(
"Please enter the path to the folder than contains adb: download link here: https://dl.google.com/android/repository/platform-tools-latest-windows.zip:"
)
if os.path.isfile(adb_path):
adb_path = os.path.dirname(adb_path)
print(f"Adding {adb_path} to your path environment variable")
backup = os.environ["PATH"]
backup_path = os.path.join(config_manager.get_app_data_folder(), "path_backup.txt")
helper.write_file_string(backup_path, backup)
helper.colored_text(
f"Your old PATH environment variable has been backed up to &{backup_path}&"
)
subprocess.run(f'setx PATH "{adb_path};%PATH%"', shell=True, check=True, text=True)
print("Successfully added adb to path")
def adb_err_handler(err: ADBException):
"""Handle ADB errors"""
if err.exception_type in (
ADBExceptionTypes.NO_DEVICE,
ADBExceptionTypes.DEVICE_OFFLINE,
):
helper.colored_text(
"Error: No device with an adb connection can be found, please connect one and try again. (You may have to wait aprox 1min for the device to be detected)",
base=helper.RED,
)
adb_reboot()
elif err.exception_type == ADBExceptionTypes.PATH_NOT_FOUND:
helper.colored_text(
'Error: SAVE_DATA couldn\'t be located, please make sure you have loaded into the game and clicked "START" and try again.',
base=helper.RED,
)
elif err.exception_type == ADBExceptionTypes.ADB_NOT_INSTALLED:
add_adb = (
user_input_handler.colored_input(
"Error, adb is not in your Path environment variable. There is a tutorial in the github's readme. Would you like to try to add adb to your path now?(&y&/&n&):"
)
== "y"
)
if add_adb:
add_to_path()
print("Please re-run the editor to try again")
elif err.exception_type == ADBExceptionTypes.MORE_THAN_ONE_DEVICE:
helper.colored_text(
"Error: More than one device with an adb connection can be found, please make sure that only 1 device is connected. (You may have to wait aprox 1min for the device to be detected)",
base=helper.RED,
)
adb_reboot()
else:
helper.colored_text(
"Error: " + str(err.message),
base=helper.RED,
)
helper.exit_editor()
def adb_pull_save_data(game_version: str) -> str:
"""Pull save data from device"""
helper.colored_text(
"Pulling save data from device...",
base=helper.DARK_YELLOW,
)
try:
adb_pull(
get_package_name(game_version),
"files/SAVE_DATA",
helper.get_save_path_home(),
)
except ADBException as err:
adb_err_handler(err)
return helper.get_save_path_home()
def adb_push_save_data(game_version: str, path: str) -> None:
"""Push save data to device"""
helper.colored_text(
"Pushing save data to device...",
base=helper.DARK_YELLOW,
)
try:
adb_push(get_package_name(game_version), path, "files/SAVE_DATA")
except ADBException as err:
adb_err_handler(err)
def rerun_game(game_version: str) -> None:
"""Rerun game"""
helper.colored_text(
"Rerunning game...",
base=helper.DARK_YELLOW,
)
try:
adb_close_process(get_package_name(game_version))
adb_run_process(get_package_name(game_version))
except ADBException as err:
adb_err_handler(err)
def adb_clear_save_data(game_version: str) -> None:
"""Clear save data"""
try:
adb_delete_file(get_package_name(game_version), "/files/*SAVE_DATA*")
adb_delete_file(get_package_name(game_version), "/shared_prefs", "-r -f")
except ADBException as err:
adb_err_handler(err)
def get_package_name(game_version: str) -> str:
"""Get package name"""
if game_version == "jp":
game_version = ""
return f"jp.co.ponos.battlecats{game_version}"
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/adb_handler.py
| 0.611034 | 0.161949 |
adb_handler.py
|
pypi
|
from typing import Any, Union
from . import (
helper,
user_input_handler,
config_manager,
)
from .edits import basic, cats, gamototo, levels, other, save_management
def fix_elsewhere_old(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Fix the elsewhere error using 2 save files"""
main_token = save_stats["token"]
main_iq = save_stats["inquiry_code"]
input(
"Select a save file that is currently loaded in-game that doesn't have the elsehere error and is not banned\nPress enter to continue:"
)
new_path = helper.select_file(
"Select a clean save file",
helper.get_save_file_filetype(),
helper.get_save_path(),
)
if not new_path:
print("Please select a save file")
return save_stats
data = helper.load_save_file(new_path)
new_stats = data["save_stats"]
new_token = new_stats["token"]
new_iq = new_stats["inquiry_code"]
save_stats["token"] = new_token
save_stats["inquiry_code"] = new_iq
helper.colored_text(f"Replaced inquiry code: &{main_iq}& with &{new_iq}&")
helper.colored_text(f"Replaced token: &{main_token}& with &{new_token}&")
return save_stats
FEATURES: dict[str, Any] = {
"세이브 관리": {
"세이브 저장": save_management.save.save_save,
"세이브 저장후 기종변경 코드 받기": save_management.server_upload.save_and_upload,
"파일에 저장하기": save_management.save.save,
"Save changes and push save data to the game with adb (don't re-open game)": save_management.save.save_and_push,
"Save changes and push save data to the game with adb (re-open game)": save_management.save.save_and_push_rerun,
"세이브 데이터를 JSON으로 컴파일": save_management.other.export,
"Clear save data with adb (used to generate a new account without re-installing the game)": save_management.other.clear_data,
"Upload tracked bannable items (This is done automatically when saving or exiting)": save_management.server_upload.upload_metadata,
"새로운 세이브 데이터 로딩": save_management.load.select,
"세이브 데이터 국가 변경": save_management.convert.convert_save,
# "Manage Presets": preset_handler.preset_manager,
},
"아이템": {
"통조림": basic.basic_items.edit_cat_food,
"XP": basic.basic_items.edit_xp,
"티켓": {
"냥코 티켓": basic.basic_items.edit_normal_tickets,
"레어 티켓": basic.basic_items.edit_rare_tickets,
"플래티넘 티켓": basic.basic_items.edit_platinum_tickets,
"플래티넘 조각": basic.basic_items.edit_platinum_shards,
"레전드 티켓": basic.basic_items.edit_legend_tickets,
},
"NP": basic.basic_items.edit_np,
"리더쉽": basic.basic_items.edit_leadership,
"배틀 아이템": basic.basic_items.edit_battle_items,
"캣츠아이": basic.catseyes.edit_catseyes,
"개다래 / 수석": basic.catfruit.edit_catfruit,
"본능 구슬": basic.talent_orbs_new.edit_talent_orbs,
"고양이 드링크": basic.basic_items.edit_catamins,
"항목 구성표(금지할 수 없는 항목을 얻을 수 있음)": other.scheme_item.edit_scheme_data,
},
"가마토토 / 오토토": {
"오토토 조수": basic.basic_items.edit_engineers,
"성 재료": basic.ototo_base_mats.edit_base_mats,
"고양이 드링크": basic.basic_items.edit_catamins,
"가마토토 XP / 레벨": gamototo.gamatoto_xp.edit_gamatoto_xp,
"오토토 대포": gamototo.ototo_cat_cannon.edit_cat_cannon,
"가마토토 대원": gamototo.helpers.edit_helpers,
"가마토토가 게임을 튕기는 버그 수정": gamototo.fix_gamatoto.fix_gamatoto,
},
"캐릭터 / 특능": {
"캐릭터 획득 / 제거": {
"캐릭터 획득": cats.get_remove_cats.get_cat,
"캐릭터 제거": cats.get_remove_cats.remove_cats,
},
"캐릭터 업그레이드": cats.upgrade_cats.upgrade_cats,
"캐릭터 3단 진화": {
"3단 진화 획득": cats.evolve_cats.get_evolve,
"3단 진화 제거": cats.evolve_cats.remove_evolve,
"강제 3단 진화 (3단 진화가 없는 고양이의 경우 빈 고양이로 채워짐)": cats.evolve_cats.get_evolve_forced,
},
"본능": {
"선택한 각 고양이의 본능을 개별적으로 설정": cats.talents.edit_talents_individual,
"선택된 모든 고양이 본능 만렙 / 제거": cats.talents.max_all_talents,
},
"캐릭터 도감": {
"캐릭터 도감 획득 (통조림 X)": cats.clear_cat_guide.collect_cat_guide,
"캐릭터 도감 미확인 상태": cats.clear_cat_guide.remove_cat_guide,
},
'스테이지 드롭 보상 받기 (보수 상태 획득)': cats.chara_drop.get_character_drops,
"특능 편집": cats.upgrade_blue.upgrade_blue,
},
"스테이지 / 보물": {
"스테이지 클리어 / 언클리어": {
"선택한 모든 챕터의 모든 챕터에서 각 스테이지 클리어": levels.main_story.clear_all,
"선택한 각 챕터의 모든 챕터에서 각 스테이지 클리어": levels.main_story.clear_each,
},
"보물": {
"보물 그룹으로 설정": levels.treasures.treasure_groups,
"모든 보물을 따로따로 설정": levels.treasures.specific_stages,
"각 장별로 한번에 설정": levels.treasures.specific_stages_all_chapters,
},
"좀비스테이지": levels.outbreaks.edit_outbreaks,
"이벤트 스테이지": levels.event_stages.event_stages,
"구레전드 스테이지": levels.event_stages.stories_of_legend,
"신레전드 스테이지": levels.uncanny.edit_uncanny,
"마계편 클리어": levels.aku.edit_aku,
"마계의 문 열기": levels.unlock_aku_realm.unlock_aku_realm,
#"Gauntlets": levels.gauntlet.edit_gauntlet,
#"Collab Gauntlets": levels.gauntlet.edit_collab_gauntlet,
"탑": levels.towers.edit_tower,
"초수 스테이지": levels.behemoth_culling.edit_behemoth_culling,
"미래편 시간 점수": levels.itf_timed_scores.timed_scores,
"챌린지 배틀 점수": basic.basic_items.edit_challenge_battle,
"튜토리얼 클리어": levels.clear_tutorial.clear_tutorial,
"냥코 도장 점수": basic.basic_items.edit_dojo_score,
"발굴 스테이지 추가": levels.enigma_stages.edit_enigma_stages,
"필리버스터 스테이지 언클리어": levels.allow_filibuster_clearing.allow_filibuster_clearing,
"레전드 퀘스트": levels.legend_quest.edit_legend_quest,
},
"문의코드 / 토큰 / 계정": {
"문의코드": basic.basic_items.edit_inquiry_code,
"토큰": basic.basic_items.edit_token,
"언밴 / 오류 해결": other.fix_elsewhere.fix_elsewhere,
#"Old Fix elsewhere error / Unban account (needs 2 save files)": fix_elsewhere_old,
"새 문의코드와 토큰 생성": other.create_new_account.create_new_account,
},
"기타": {
"레어 뽑기 시드": basic.basic_items.edit_rare_gacha_seed,
"캐릭터 편성": basic.basic_items.edit_unlocked_slots,
"리스타트팩 설정": basic.basic_items.edit_restart_pack,
"냥코 메달 설정": other.meow_medals.medals,
"플레이타임 설정": other.play_time.edit_play_time,
"적 도감 설정": other.unlock_enemy_guide.enemy_guide,
"미션 설정": other.missions.edit_missions,
"일반 티켓 최대 거래 진행률(금지할 수 없는 희귀 티켓 허용)": other.trade_progress.set_trade_progress,
"골드패스 설정": other.get_gold_pass.get_gold_pass,
"모든 유저 랭크 보상 제거 / 획득 (아이템을 주지 않음)": other.claim_user_rank_rewards.edit_rewards,
"냥코 사당 레벨 / XP": other.cat_shrine.edit_shrine_xp,
},
"오류해결": {
"시간 오류 해결": other.fix_time_issues.fix_time_issues,
"캐릭터 편성 오류 해결": other.unlock_equip_menu.unlock_equip,
"튜토리얼 클리어": levels.clear_tutorial.clear_tutorial,
"언밴 / 오류 해결": other.fix_elsewhere.fix_elsewhere,
#"Old Fix elsewhere error / Unban account (needs 2 save files)": fix_elsewhere_old,
"가마토토 / 오토토 오류 해결": gamototo.fix_gamatoto.fix_gamatoto,
},
"에디터 설정 (사용 X)": {
"Edit LOCALIZATION": config_manager.edit_locale,
"Edit DEFAULT_COUNTRY_CODE": config_manager.edit_default_gv,
"Edit DEFAULT_SAVE_PATH": config_manager.edit_default_save_file_path,
"Edit FIXED_SAVE_PATH": config_manager.edit_fixed_save_path,
"Edit EDITOR settings": config_manager.edit_editor_settings,
"Edit START_UP settings": config_manager.edit_start_up_settings,
"Edit SAVE_CHANGES settings": config_manager.edit_save_changes_settings,
"Edit SERVER settings": config_manager.edit_server_settings,
"Edit config path": config_manager.edit_config_path,
},
"종료": helper.exit_check_changes,
}
def get_feature(
selected_features: Any, search_string: str, results: dict[str, Any]
) -> dict[str, Any]:
"""Search for a feature if the feature name contains the search string"""
for feature in selected_features:
feature_data = selected_features[feature]
if isinstance(feature_data, dict):
feature_data = get_feature(feature_data, search_string, results)
if search_string.lower().replace(" ", "") in feature.lower().replace(" ", ""):
results[feature] = selected_features[feature]
return results
def show_options(
save_stats: dict[str, Any], features_to_use: dict[str, Any]
) -> dict[str, Any]:
"""Allow the user to either enter a feature number or a feature name, and get the features that match"""
if (
not config_manager.get_config_value_category("EDITOR", "SHOW_CATEGORIES")
and FEATURES == features_to_use
):
user_input = ""
else:
prompt = (
"에딧할 것을 선택하세요."
)
if config_manager.get_config_value_category(
"EDITOR", "SHOW_FEATURE_SELECT_EXPLANATION"
):
prompt += "\n원하시는 메뉴의 번호를 입력해주세요"
user_input = user_input_handler.colored_input(f"{prompt}:\n")
user_int = helper.check_int(user_input)
results = []
if user_int is None:
results = get_feature(features_to_use, user_input, {})
else:
if user_int < 1 or user_int > len(features_to_use) + 1:
helper.colored_text("번호가 메뉴에 없습니다.", helper.RED)
return show_options(save_stats, features_to_use)
if FEATURES != features_to_use:
if user_int - 2 < 0:
return menu(save_stats)
results = features_to_use[list(features_to_use)[user_int - 2]]
else:
results = features_to_use[list(features_to_use)[user_int - 1]]
if not isinstance(results, dict):
save_stats_return = results(save_stats)
if save_stats_return is None:
return save_stats
return save_stats_return
if len(results) == 0:
helper.colored_text("해당 메뉴가 없습니다.", helper.RED)
return menu(save_stats)
if len(results) == 1 and isinstance(list(results.values())[0], dict):
results = results[list(results)[0]]
if len(results) == 1:
save_stats_return = results[list(results)[0]](save_stats)
if save_stats_return is None:
return save_stats
return save_stats_return
helper.colored_list(["뒤로가기"] + list(results))
return show_options(save_stats, results)
def menu(
save_stats: dict[str, Any], path_save: Union[str, None] = None
) -> dict[str, Any]:
"""Show the menu and allow the user to select a feature to edit"""
if path_save:
helper.set_save_path(path_save)
if config_manager.get_config_value_category("EDITOR", "SHOW_CATEGORIES"):
helper.colored_list(list(FEATURES))
save_stats = show_options(save_stats, FEATURES)
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/feature_handler.py
| 0.490968 | 0.286512 |
feature_handler.py
|
pypi
|
from typing import Any, Optional, Tuple, Union
from . import helper, locale_handler
def handle_all_at_once(
ids: list[int],
all_at_once: bool,
data: list[int],
names: list[Any],
item_name: str,
group_name: str,
explain_text: str = "",
) -> list[int]:
"""Handle all at once option"""
locale_manager = locale_handler.LocalManager.from_config()
first = True
value = None
for item_id in ids:
if all_at_once and first:
value = helper.check_int(
colored_input(
locale_manager.search_key("enter_item_name_explain")
% (item_name, explain_text)
)
)
first = False
elif not all_at_once:
value = helper.check_int(
colored_input(
locale_manager.search_key("enter_item_name_group_explain")
% (item_name, group_name, names[item_id], explain_text)
)
)
if value is None:
continue
data[item_id] = value
return data
def create_all_list(
ids: list[str],
max_val: int,
) -> dict[str, Any]:
"""Creates a list with an all at once option"""
all_at_once = False
if f"{max_val}" in ids:
ids_s = list(range(1, max_val))
ids = [format(x, "02d") for x in ids_s]
all_at_once = True
return {"ids": ids, "at_once": all_at_once}
def create_all_list_inc(ids: list[str], max_val: int) -> dict[str, Any]:
"""Creates a list with an all at once option and include all"""
return create_all_list(ids, max_val)
def create_all_list_not_inc(ids: list[str], max_val: int) -> list[str]:
"""Creates a list with an all at once option and don't include all"""
return create_all_list(ids, max_val)["ids"]
def get_range(
usr_input: str,
length: Union[int, None] = None,
min_val: int = 0,
all_ids: Union[list[int], None] = None,
) -> list[int]:
"""Get a range of numbers from user input"""
locale_manager = locale_handler.LocalManager.from_config()
ids: list[int] = []
for item in usr_input.split(" "):
if item.lower() == locale_manager.search_key("all_text").lower():
if length is None and all_ids is None:
helper.colored_text(
locale_manager.search_key("invalid_all"), helper.RED
)
return []
if all_ids:
return all_ids
if length is not None:
return list(range(min_val, length))
if "-" in item:
start_s, end_s = item.split("-")
start = helper.check_int(start_s)
end = helper.check_int(end_s)
if start is None or end is None:
helper.colored_text(
locale_manager.search_key("invalid_range_format"),
helper.RED,
)
return ids
if start > end:
start, end = end, start
ids.extend(list(range(start, end + 1)))
else:
item_id = helper.check_int(item)
if item_id is None:
helper.colored_text(
locale_manager.search_key("invalid_int"), helper.RED
)
return ids
ids.append(item_id)
return ids
def colored_input(
dialog: str, base: Optional[str] = None, new: Optional[str] = None
) -> str:
"""Format dialog as a colored string"""
if base is None:
base = helper.WHITE
if new is None:
new = helper.DARK_YELLOW
helper.colored_text(dialog, end="", base=base, new=new)
return input()
def get_range_ids(group_name: str, length: int) -> list[int]:
"""Get a range of ids from user input"""
locale_manager = locale_handler.LocalManager.from_config()
ids = get_range(
colored_input(locale_manager.search_key("enter_range_text") % (group_name)),
length,
)
return ids
def select_options(
options: list[str],
mode: Optional[str] = None,
extra_data: Union[list[Any], None] = None,
offset: int = 0,
) -> Tuple[list[int], bool]:
"""Select an option or multiple options from a list"""
if len(options) == 1:
return [0], True
locale_manager = locale_handler.LocalManager.from_config()
if mode is None:
mode = locale_manager.search_key("edit_text")
helper.colored_list(options, extra_data=extra_data, offset=offset)
total = len(options)
helper.colored_text(f"{total+1}. {locale_manager.search_key('select_all')}")
ids_s = colored_input(
locale_manager.search_key("select_list") % (mode, mode)
).split(" ")
individual = True
if str(total + 1) in ids_s:
ids = list(range(1, total + 1))
individual = False
ids_s = helper.int_to_str_ls(ids)
ids = helper.parse_int_list(ids_s, -1)
for item_id in ids:
if item_id < 0 or item_id > total - 1:
helper.colored_text(
locale_manager.search_key("invalid_range") % (total + 1),
helper.RED,
)
return select_options(options, mode, extra_data, offset)
return ids, individual
def select_inc(
options: list[str],
mode: Optional[str] = None,
extra_data: Union[list[Any], None] = None,
offset: int = 0,
) -> Tuple[list[int], bool]:
"""Select an option or multiple options from a list and include all"""
return select_options(options, mode, extra_data, offset)
def select_not_inc(
options: list[str],
mode: Optional[str] = None,
extra_data: Union[list[Any], None] = None,
offset: int = 0,
) -> list[int]:
"""Select an option or multiple options from a list and don't include all"""
return select_options(options, mode, extra_data, offset)[0]
def select_single(
options: list[str],
mode: Optional[str] = None,
title: str = "",
allow_text: bool = False,
) -> int:
"Select a single option from a list"
locale_manager = locale_handler.LocalManager.from_config()
if not options:
raise ValueError(locale_manager.search_key("error_no_options"))
if len(options) == 1:
return 1
helper.colored_list(options)
if not title:
title = locale_manager.search_key("select_option_to") % (mode)
val = colored_input(title)
if allow_text:
if val in options:
return options.index(val) + 1
val = helper.check_int(val)
if val is None:
helper.colored_text(locale_manager.search_key("invalid_int"), helper.RED)
return select_single(options, mode, title, allow_text)
if val < 1 or val > len(options):
helper.colored_text(
locale_manager.search_key("invalid_range") % (len(options)),
helper.RED,
)
return select_single(options, mode, title, allow_text)
return val
def get_int(dialog: str, default: Optional[int] = None) -> int:
"""Get user input as an integer and keep asking until a valid integer is entered"""
helper.colored_text(dialog, end="")
locale_manager = locale_handler.LocalManager.from_config()
while True:
try:
val = input()
val = val.strip(" ")
return int(val)
except ValueError:
if default is not None:
return default
helper.colored_text(locale_manager.search_key("invalid_int"), helper.RED)
def ask_if_individual(item_name: str) -> bool:
"""Ask if the user wants to edit an individual item"""
locale_manager = locale_handler.LocalManager.from_config()
is_individual = (
colored_input(
locale_manager.search_key("ask_individual") % (item_name),
)
== "1"
)
return is_individual
def get_yes_no(dialog: str) -> bool:
"""Get user input as a yes or no"""
locale_manager = locale_handler.LocalManager.from_config()
while True:
val = colored_input(dialog)
if val:
if val.lower()[0] == "y":
return True
if val.lower()[0] == "n":
return False
helper.colored_text(locale_manager.search_key("invalid_yes_no"), helper.RED)
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/user_input_handler.py
| 0.776835 | 0.211753 |
user_input_handler.py
|
pypi
|
from typing import Any, Union
from ... import helper, user_input_handler
from . import cat_id_selector, cat_helper
def set_level_caps(save_stats: dict[str, Any]) -> dict[str, Any]:
"""
Set the level caps for the cats
Args:
save_stats (dict[str, Any]): The save stats
Returns:
dict[str, Any]: The save stats
"""
unit_max_data = cat_helper.get_unit_max_levels(helper.is_jp(save_stats))
rarities = cat_helper.get_rarities(helper.is_jp(save_stats))
for cat_id in range(len(save_stats["cats"])):
base_level = save_stats["cat_upgrades"]["Base"][cat_id]
if unit_max_data is not None:
max_base_level = cat_helper.get_unit_max_level(unit_max_data, cat_id)[0]
else:
max_base_level = 50000
try:
rarity = rarities[cat_id]
except IndexError:
rarity = 0
max_base_level_ur = cat_helper.get_max_level(save_stats, rarity, cat_id)
level_cap = cat_helper.get_level_cap_increase_amount(
min(base_level, max_base_level, max_base_level_ur)
)
save_stats["catseye_cat_data"][cat_id] = level_cap
save_stats["catseye_related_data"]["Base"][cat_id] = level_cap + 10
return save_stats
def set_user_popups(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Set user popups, stops the user rank popups from spamming up the screen"""
save_stats["user_rank_popups"]["Value"] = 0xFFFFFF
return save_stats
def get_plus_base(usr_input: str) -> tuple[Union[int, None], Union[int, None]]:
"""Get the base and plus level of an input"""
split = usr_input.split("+")
base = None
plus = None
if split[0]:
base = helper.check_int_max(split[0])
if len(split) == 2 and split[1]:
plus = helper.check_int_max(split[1])
if len(split) == 1:
plus = 0
return base, plus
def upgrade_cats(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Upgrade specific cats"""
ids = cat_id_selector.select_cats(save_stats)
return upgrade_cats_ids(save_stats, ids)
def upgrade_handler(
data: dict[str, Any], ids: list[int], item_name: str, save_stats: dict[str, Any]
) -> dict[str, Any]:
"""Handler for cat upgrades"""
ids = helper.check_cat_ids(ids, save_stats)
base = data["Base"]
plus = data["Plus"]
individual = True
if len(ids) > 1:
individual = user_input_handler.ask_if_individual(
f"upgrades for each {item_name}"
)
first = True
base_lvl = None
plus_lvl = None
for cat_id in ids:
if not individual and first:
levels = get_plus_base(
user_input_handler.colored_input(
'Enter the base level followed by a "&+&" then the plus level, e.g 5&+&12. If you want to ignore the base level do &+&12, if you want to ignore the plus level do 5&+&:\n'
)
)
base_lvl = levels[0]
plus_lvl = levels[1]
first = False
elif individual:
helper.colored_text(
f"The current upgrade level of id &{cat_id}& is &{base[cat_id]+1}&+&{plus[cat_id]}&"
)
levels = get_plus_base(
user_input_handler.colored_input(
f'Enter the base level for {item_name}: &{cat_id}& followed by a "&+&" then the plus level, e.g 5&+&12. If you want to ignore the base level do &+&12, if you want to ignore the plus level do 5&+&:\n'
)
)
base_lvl = levels[0]
plus_lvl = levels[1]
if base_lvl is not None:
if base_lvl > 0:
base_lvl = helper.clamp(base_lvl, 0, 50000)
base[cat_id] = base_lvl - 1
if plus_lvl is not None:
plus_lvl = helper.clamp(plus_lvl, 0, 50000)
plus[cat_id] = plus_lvl
data["Base"] = base
data["Plus"] = plus
return data
def upgrade_cats_ids(save_stats: dict[str, Any], ids: list[int]) -> dict[str, Any]:
"""Upgrade cats by ids"""
save_stats["cat_upgrades"] = upgrade_handler(
data=save_stats["cat_upgrades"],
ids=ids,
item_name="cat",
save_stats=save_stats,
)
save_stats = set_user_popups(save_stats)
# save_stats = set_level_caps(save_stats)
print("Successfully set cat levels")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/upgrade_cats.py
| 0.737064 | 0.191649 |
upgrade_cats.py
|
pypi
|
from typing import Any
from ... import helper, user_input_handler, csv_handler, game_data_getter
from . import cat_id_selector
def set_t_ids(save_stats: dict[str, Any]) -> dict[str, Any]:
"""handler for editing treasure ids"""
unit_drops_stats = save_stats["unit_drops"]
data = get_data(helper.check_data_is_jp(save_stats))
usr_t_ids = user_input_handler.get_range(
user_input_handler.colored_input(
"Enter treasures ids (Look up item drop cats battle cats to find ids)(You can enter &all& to get all, a range e.g &1&-&50&, or ids separate by spaces e.g &5 4 7&):"
),
all_ids=data["t_ids"],
)
unit_drops_stats = set_t_ids_val(unit_drops_stats, data, usr_t_ids)
save_stats["unit_drops"] = unit_drops_stats
return save_stats
def set_c_ids(save_stats: dict[str, Any]) -> dict[str, Any]:
"""handler for editing cat ids"""
unit_drops_stats = save_stats["unit_drops"]
data = get_data(helper.check_data_is_jp(save_stats))
ids = cat_id_selector.select_cats(save_stats)
usr_c_ids = helper.check_cat_ids(ids, save_stats)
unit_drops_stats = set_c_ids_val(unit_drops_stats, data, usr_c_ids)
save_stats["unit_drops"] = unit_drops_stats
return save_stats
def get_character_drops(save_stats: dict[str, Any]) -> dict[str, Any]:
"""handler for getting character drops"""
flag_t_ids = (
user_input_handler.colored_input(
"Do you want to select treasure ids &(1)&, or cat ids? &(2)&:"
)
== "1"
)
if flag_t_ids:
save_stats = set_t_ids(save_stats)
else:
save_stats = set_c_ids(save_stats)
print("Successfully set unit drops")
return save_stats
def get_data(is_jp: bool) -> dict[str, Any]:
"""gets all of the cat ids and treasure ids that can be dropped"""
file_data = game_data_getter.get_file_latest("DataLocal", "drop_chara.csv", is_jp)
if file_data is None:
helper.error_text("Failed to get drop_chara.csv")
return {"t_ids": [], "c_ids": [], "indexes": []}
character_data = helper.parse_int_list_list(
csv_handler.parse_csv(file_data.decode("utf-8"))[1:]
)
treasure_ids = helper.copy_first_n(character_data, 0)
indexes = helper.copy_first_n(character_data, 1)
cat_ids = helper.copy_first_n(character_data, 2)
return {"t_ids": treasure_ids, "indexes": indexes, "c_ids": cat_ids}
def set_t_ids_val(
unit_drops_stats: list[int], data: dict[str, Any], user_t_ids: list[int]
) -> list[int]:
"""sets the treasure ids of the unit drops"""
for t_id in user_t_ids:
if t_id in data["t_ids"]:
index = data["t_ids"].index(t_id)
save_index = data["indexes"][index]
unit_drops_stats[save_index] = 1
return unit_drops_stats
def set_c_ids_val(
unit_drops_stats: list[int], data: dict[str, Any], user_t_ids: list[int]
) -> list[int]:
"""sets the cat ids of the unit drops"""
for c_id in user_t_ids:
if c_id in data["c_ids"]:
index = data["c_ids"].index(c_id)
save_index = data["indexes"][index]
unit_drops_stats[save_index] = 1
return unit_drops_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/chara_drop.py
| 0.651355 | 0.29988 |
chara_drop.py
|
pypi
|
from typing import Any
from ... import helper, csv_handler, game_data_getter
from . import cat_id_selector
def get_evolve(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for evolving cats"""
cat_ids = cat_id_selector.select_cats(save_stats)
return evolve_handler_ids(
save_stats=save_stats,
val=2,
string="set",
ids=cat_ids,
forced=False,
)
def get_evolve_forced(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for evolving cats without the form check"""
cat_ids = cat_id_selector.select_cats(save_stats)
return evolve_handler_ids(
save_stats=save_stats,
val=2,
string="set",
ids=cat_ids,
forced=True,
)
def remove_evolve(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for de-evolving cats"""
cat_ids = cat_id_selector.select_cats(save_stats)
return evolve_handler_ids(
save_stats=save_stats,
val=0,
string="removed",
ids=cat_ids,
forced=True,
)
def evolve_handler(
save_stats: dict[str, Any], val: int, string: str, forced: bool
) -> dict[str, Any]:
"""Evolve specific cats"""
ids = cat_id_selector.select_cats(save_stats)
return evolve_handler_ids(save_stats, val, string, ids, forced)
def get_evolve_data(is_jp: bool) -> list[int]:
"""Get max form of cats"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "nyankoPictureBookData.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get evolve data")
return []
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode("utf-8")))
forms = helper.copy_first_n(data, 2)
forms = helper.offset_list(forms, -1)
return forms
def evolve_handler_ids(
save_stats: dict[str, Any], val: int, string: str, ids: list[int], forced: bool
) -> dict[str, Any]:
"""Evolve specific cats by ids"""
ids = helper.check_cat_ids(ids, save_stats)
evolves = save_stats["unlocked_forms"]
if not forced:
form_data = get_evolve_data(helper.check_data_is_jp(save_stats))
length = min([len(ids), len(form_data)])
for i in range(length):
try:
evolves[ids[i]] = form_data[ids[i]]
except IndexError:
pass
else:
for cat_id in ids:
evolves[cat_id] = val
for cat_id, (unlocked_flag, current_flag) in enumerate(
zip(evolves, save_stats["current_forms"])
):
save_stats["current_forms"][cat_id] = max(unlocked_flag, current_flag)
flags_evolved = [0 if form == 1 else form for form in evolves]
save_stats["unlocked_forms"] = flags_evolved
print(f"Successfully {string} true forms of cats")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/evolve_cats.py
| 0.587707 | 0.238196 |
evolve_cats.py
|
pypi
|
import os
from multiprocessing import Process
from typing import Any, Callable, Optional
from ... import (
csv_handler,
game_data_getter,
helper,
user_input_handler,
)
from ..levels import treasures
from . import cat_helper
def select_cats(save_stats: dict[str, Any], current: bool = True) -> list[int]:
"""Select cats"""
options: dict[str, Callable[[dict[str, Any]], list[int]]] = {
"Select currently unlocked cats": select_current_cats,
"Select cats of a certain rarity": select_cats_rarity,
"Select specific cat ids": select_cats_range,
"Select cats of a specific gacha banner": select_cats_gatya_banner,
"Select all cats": get_all_cats,
"Search by cat name": select_cat_names,
"Select all obtainable cats": select_cats_obtainable,
}
if not current:
del options["Select currently unlocked cats"]
choice_index = (
user_input_handler.select_single(list(options.keys()), title="Select cats:") - 1
)
cat_ids = options[list(options)[choice_index]](save_stats)
return cat_ids
def select_cats_obtainable(save_stats: dict[str, Any]) -> list[int]:
"""
Select cats that can be obtained
Args:
save_stats (dict[str, Any]): Save stats
Returns:
list[int]: Cat ids
"""
return filter_obtainable_cats(save_stats, get_all_cats(save_stats))
def select_current_cats(save_stats: dict[str, Any]) -> list[int]:
"""Select current cats"""
cats = save_stats["cats"]
cat_ids: list[int] = []
for i, cat_val in enumerate(cats):
if cat_val == 1:
cat_ids.append(i)
return cat_ids
def select_cats_rarity(save_stats: dict[str, Any]) -> list[int]:
"""Select cats of a certain rarity"""
ids = user_input_handler.select_not_inc(
options=cat_helper.TYPES,
mode="select",
)
is_jp = helper.is_jp(save_stats)
cat_ids = cat_helper.get_rarity(ids, is_jp)
return cat_ids
def select_cats_range(save_stats: dict[str, Any]) -> list[int]:
"""Select cats in a range"""
ids = user_input_handler.get_range(
user_input_handler.colored_input(
"Enter cat ids (Look up cro battle cats to find ids)(You can enter &all& to get all, a range e.g &1&-&50&, or ids separate by spaces e.g &5 4 7&):"
),
length=len(save_stats["cats"]),
)
return ids
def select_cats_gatya_banner(save_stats: dict[str, Any]) -> list[int]:
"""Select cats for a specific gacha banner"""
is_jp = helper.is_jp(save_stats)
file_data = game_data_getter.get_file_latest(
"DataLocal", "GatyaDataSetR1.csv", is_jp
)
if file_data is None:
helper.colored_text("Failed to get gatya banners")
return []
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode("utf-8")))
ids = user_input_handler.get_range(
user_input_handler.colored_input(
"Enter gacha banner id (Look up the gacha banners you want, then click on the image at the top, and look for the last digits of the file name (e.g royal fest = 602))(You can enter &all& to get all, a range e.g &1&-&50&, or ids separate by spaces e.g &5 4 7&):"
),
length=len(data),
)
data = treasures.remove_negative_1(data)
cat_ids: list[int] = []
for c_id in ids:
cat_ids.extend(data[c_id])
return cat_ids
def get_all_cats(save_stats: dict[str, Any]) -> list[int]:
"""Get all cats"""
return list(range(len(save_stats["cats"])))
def select_cat_names(save_stats: dict[str, Any]) -> list[int]:
"""
select_cat_names
Args:
save_stats (dict[str, Any]): save stats
Returns:
list[int]: cat ids
"""
all_names = get_cat_names(save_stats)
if all_names is None:
return []
name = user_input_handler.colored_input("Enter cat name:")
found_names = search_cat_names(name, all_names)
found_names = filter_cat_names(found_names)
if not found_names:
print("No cats with that name found")
return []
cat_ids: list[int] = []
cat_ids_str: list[str] = []
cat_names: list[str] = []
for cat_name, cat_id, _ in found_names:
cat_ids.append(cat_id)
cat_name = cat_name.replace("&", "\\&")
cat_names.append(cat_name)
cat_ids_str.append(f"Cat id: &{cat_id}&")
print("Select indexes of cats to select (Not the cat id itself):")
indexes = user_input_handler.select_not_inc(
cat_names, mode="select", extra_data=cat_ids_str
)
selected_ids: list[int] = []
for index in indexes:
try:
selected_ids.append(cat_ids[index])
except IndexError:
helper.colored_text(
f"Option is too high: {index} - Make sure to select the index on the left rather than the cat id",
helper.RED,
)
return selected_ids
def get_cat_by_form_and_id(
all_names: list[tuple[str, int, int]], cat_id: int, form_id: int
) -> Optional[tuple[str, int, int]]:
"""
Get cat by form and id
Args:
all_names (list[tuple[str, int, int]]): all names
cat_id (int): cat id
form_id (int): form id
Returns:
Optional[tuple[str, int, int]]: cat data
"""
for cat in all_names:
if cat[1] == cat_id and cat[2] == form_id:
return cat
return None
def get_cat_by_id(
cat_names: list[tuple[str, int, int]], cat_id_to_search: int
) -> list[tuple[str, int, int]]:
"""
Get cat by id
Args:
cat_names (list[tuple[str, int, int]]): list of cat names
cat_id_to_search (int): cat id to search for
Returns:
Optional[tuple[str, int, int]]: cat name, cat id, cat form
"""
cats: list[tuple[str, int, int]] = []
for cat_name, cat_id, cat_form in cat_names:
if cat_id == cat_id_to_search:
cats.append((cat_name, cat_id, cat_form))
return cats
def filter_cat_names(
cat_names: list[tuple[str, int, int]]
) -> list[tuple[str, int, int]]:
"""
Filter cat names by only selecting one of the forms
Args:
cat_names (list[tuple[str, int, int]]): list of cat names
Returns:
list[tuple[str, int, int]]: filtered cat names
"""
filtered_cat_ids: list[int] = []
cat_data: list[tuple[str, int, int]] = []
for cat_name, cat_id, cat_form in cat_names:
if cat_id not in filtered_cat_ids:
filtered_cat_ids.append(cat_id)
cat_data.append((cat_name, cat_id, cat_form))
return cat_data
def search_cat_names(
name: str, cat_names: list[tuple[str, int, int]]
) -> list[tuple[str, int, int]]:
"""
Search cat names
Args:
name (str): name to search for
cat_names (list[tuple[str, int, int]]): list of cat names
Returns:
list[tuple[str, int, int]]: list of cat names that match the search
"""
found_names: list[tuple[str, int, int]] = []
for cat_name, cat_id, form_id in cat_names:
if name.lower().replace(" ", "") in cat_name.lower().replace(" ", ""):
found_names.append((cat_name, cat_id, form_id))
return found_names
def download_10_files(game_version: str, file_names: list[str]) -> None:
"""
Download 10 files
Args:
game_version (str): game version
file_names (list[str]): file names
"""
for file_name in file_names:
game_data_getter.download_file(game_version, "resLocal", file_name, False)
def get_cat_names(save_stats: dict[str, Any]) -> Optional[list[tuple[str, int, int]]]:
"""
Get cat names and ids
Args:
save_stats (dict[str, Any]): save stats
Returns:
Optional[list[tuple[str, int, int]]]: cat names and ids
"""
is_jp = helper.is_jp(save_stats)
path = game_data_getter.get_path("resLocal", "", is_jp)
if path is None:
helper.colored_text("Failed to get cat names", helper.RED)
return None
file_path_dir = os.path.dirname(helper.get_file(path))
helper.create_dirs(file_path_dir)
if len(helper.find_files_in_dir(file_path_dir, "Unit_Explanation")) < len(
save_stats["cats"]
):
helper.colored_text(
"Downloading cat names for the first time... (This may take some time, but next time it will be much faster)",
helper.GREEN,
)
funcs: list[Process] = []
version = game_data_getter.get_latest_version(is_jp)
if version is None:
helper.colored_text("Failed to get cat names", helper.RED)
return None
all_file_names: list[str] = []
for cat_id, _ in enumerate(save_stats["cats"]):
file_name = f"Unit_Explanation{cat_id+1}_{helper.get_lang(is_jp)}.csv"
all_file_names.append(file_name)
file_names_split = helper.chunks(all_file_names, 10)
for file_names in file_names_split:
funcs.append(
Process(
target=download_10_files,
args=(version, file_names),
)
)
helper.run_in_parallel(funcs)
names: list[tuple[str, int, int]] = []
for cat_id, _ in enumerate(save_stats["cats"]):
file_path = os.path.join(
file_path_dir, f"Unit_Explanation{cat_id+1}_{helper.get_lang(is_jp)}.csv"
)
data = csv_handler.parse_csv(
helper.read_file_string(file_path),
delimeter=helper.get_text_splitter(is_jp),
)
for form_id, form in enumerate(data):
name = form[0]
names.append((name, cat_id, form_id))
return names
def get_obtainability(save_stats: dict[str, Any]) -> list[int]:
"""
Get obtainability of cats
Args:
save_stats (dict[str, Any]): save stats
Returns:
list[int]: obtainability of cats (0 = not obtainable, 1 = obtainable)
"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "nyankoPictureBookData.csv", helper.is_jp(save_stats)
)
if file_data is None:
helper.colored_text("Failed to get obtainability", helper.RED)
return []
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode("utf-8")))
is_obtainable = helper.copy_first_n(data, 0)
return is_obtainable
def get_obtainable_cats(save_stats: dict[str, Any]) -> list[int]:
"""
Get obtainable cats
Args:
save_stats (dict[str, Any]): save stats
Returns:
list[int]: obtainable cats
"""
obtainability = get_obtainability(save_stats)
return [i for i, x in enumerate(obtainability) if x == 1]
def filter_obtainable_cats(save_stats: dict[str, Any], cat_ids: list[int]) -> list[int]:
"""
Filter obtainable cats in a list of cat ids
Args:
save_stats (dict[str, Any]): save stats
cat_ids (list[int]): cat ids
Returns:
list[int]: obtainable cats
"""
obtainable_cats = get_obtainable_cats(save_stats)
return [i for i in cat_ids if i in obtainable_cats]
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/cat_id_selector.py
| 0.713731 | 0.24168 |
cat_id_selector.py
|
pypi
|
from typing import Any, Optional
from ... import helper, item, csv_handler, game_data_getter, user_input_handler
from . import cat_id_selector
def get_talent_data(save_stats: dict[str, Any]) -> Optional[dict[Any, Any]]:
"""Get talent data for all cats"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "SkillAcquisition.csv", helper.check_data_is_jp(save_stats)
)
if file_data is None:
helper.error_text("Failed to get talent data")
return None
talent_data_raw = helper.parse_int_list_list(
csv_handler.parse_csv(
file_data.decode("utf-8"),
)
)
file_data = game_data_getter.get_file_latest(
"resLocal", "SkillDescriptions.csv", helper.check_data_is_jp(save_stats)
)
if file_data is None:
helper.error_text("Failed to get talent names")
return None
talent_names = csv_handler.parse_csv(
file_data.decode("utf-8"),
helper.get_text_splitter(helper.check_data_is_jp(save_stats)),
)
columns = helper.int_to_str_ls(talent_data_raw[0])
new_talent_data: dict[Any, Any] = {}
for j in range(1, len(talent_data_raw)):
data = talent_data_raw[j]
cat_id: int = int(data[0])
new_talent_data[cat_id] = {}
for data_i, column in zip(data, columns):
new_talent_data = replace_name(
cat_id=cat_id,
column=column,
data=data_i,
talent_names=talent_names,
new_data=new_talent_data,
)
return new_talent_data
def replace_name(
cat_id: int,
column: str,
data: int,
talent_names: list[list[str]],
new_data: dict[Any, Any],
) -> dict[str, Any]:
"""Replace the text ids with the corresponding names"""
new_data[cat_id][column] = data
if (
"textID" in column or "tFxtID_F" in column
): # ponos made a typo, should be textID_F
new_data[cat_id][column] = talent_names[data][1]
stop_at = "<br>"
if stop_at in new_data[cat_id][column]:
index = new_data[cat_id][column].index(stop_at)
new_data[cat_id][column] = new_data[cat_id][column][:index]
return new_data
def find_order(
cat_talents: list[dict[str, Any]], cat_talent_data: dict[str, Any]
) -> list[str]:
"""Find what talent slot each letter corresponds to"""
letters = ["A", "B", "C", "D", "E", "F", "G", "H"]
letter_order: list[str] = []
for talent in cat_talents:
talent_id = talent["id"]
for letter in letters:
key = f"abilityID_{letter}"
if key not in cat_talent_data:
continue
ability_id = int(cat_talent_data[key])
if ability_id == talent_id:
letter_order.append(letter)
break
return letter_order
def get_cat_talents(
cat_talents: list[dict[str, Any]], cat_talent_data: dict[str, Any]
) -> dict[Any, Any]:
"""Get the name and max value of each talent for a specific cat"""
data: dict[Any, Any] = {}
letter_order = find_order(cat_talents, cat_talent_data)
for i, letter in enumerate(letter_order):
cat_data = {}
if letter == "F":
text_id_str = "tFxtID_F" # ponos made a typo, should be textID_F
else:
text_id_str = f"textID_{letter}"
cat_data["name"] = cat_talent_data[text_id_str].strip("\n")
cat_data["max"] = int(cat_talent_data[f"MAXLv_{letter}"])
if cat_data["max"] == 0:
cat_data["max"] = 1
data[i] = cat_data
return data
def get_talent_levels(
talent_data: dict[int, Any], talents: dict[int, Any], cat_id: int
) -> list[int]:
"""Get the level of each talent for a specific cat"""
cat_talent_data = talent_data[cat_id]
cat_talents = talents[cat_id]
cat_talent_data_formatted = get_cat_talents(cat_talents, cat_talent_data)
cat_talents_levels: list[int] = []
for talent_formatted in cat_talent_data_formatted.values():
max_val = talent_formatted["max"]
cat_talents_levels.append(max_val)
return cat_talents_levels
def max_all_talents(save_stats: dict[str, Any]):
"""Max all talents for all cats"""
max_all = (
user_input_handler.colored_input(
"Do you want to max talents or reset talents? (&m&/&r&):"
)
== "m"
)
if not max_all:
return remove_all_talents(save_stats)
talents = save_stats["talents"]
ids = cat_id_selector.select_cats(save_stats)
talent_data = get_talent_data(save_stats)
if talent_data is None:
return save_stats
cat_talents_levels: list[int] = []
for cat_id in ids:
if cat_id not in talents or cat_id not in talent_data:
continue
cat_talents = talents[cat_id]
cat_talents_levels = get_talent_levels(talent_data, talents, cat_id)
for i, cat_talent_level in enumerate(cat_talents_levels):
cat_talents[i]["level"] = cat_talent_level
save_stats["talents"] = talents
print("Successfully set talents")
return save_stats
def remove_all_talents(save_stats: dict[str, Any]) -> dict[str, Any]:
"""
Remove all talents for all cats
Args:
save_stats (dict[str, Any]): The save stats
Returns:
dict[str, Any]: The save stats
"""
talents = save_stats["talents"]
ids = cat_id_selector.select_cats(save_stats)
talent_data = get_talent_data(save_stats)
if talent_data is None:
return save_stats
cat_talents_levels: list[int] = []
for cat_id in ids:
if cat_id not in talents or cat_id not in talent_data:
continue
cat_talents = talents[cat_id]
cat_talents_levels = get_talent_levels(talent_data, talents, cat_id)
for i in range(len(cat_talents_levels)):
cat_talents[i]["level"] = 0
save_stats["talents"] = talents
print("Successfully removed talents")
return save_stats
def edit_talents_individual(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing talents"""
talents = save_stats["talents"]
ids = cat_id_selector.select_cats(save_stats)
talent_data = get_talent_data(save_stats)
if talent_data is None:
return save_stats
for cat_id in ids:
cat_talents_levels: list[int] = []
if cat_id not in talents or cat_id not in talent_data:
# don't spam the user with messages if they selected alot of ids at once
if len(ids) < 20:
helper.colored_text(
f"Error cat &{cat_id}& does not have any talents",
helper.RED,
helper.WHITE,
)
continue
cat_talent_data = talent_data[cat_id]
cat_talents = talents[cat_id]
cat_talent_data_formatted = get_cat_talents(cat_talents, cat_talent_data)
names: list[str] = []
maxes: list[int] = []
for talent_index, cat_talent_formatted in cat_talent_data_formatted.items():
names.append(cat_talent_formatted["name"])
cat_talents_levels.append(cat_talents[talent_index]["level"])
maxes.append(cat_talent_formatted["max"])
helper.colored_text(f"Cat &{cat_id}& is selected:")
cat_talents_levels_g = item.IntItemGroup.from_lists(
names=names,
values=cat_talents_levels,
maxes=maxes,
group_name="Talents",
)
cat_talents_levels_g.edit()
cat_talents_levels = cat_talents_levels_g.get_values()
for i, cat_talent_level in enumerate(cat_talents_levels):
cat_talents[i]["level"] = cat_talent_level
talents[cat_id] = cat_talents
save_stats["talents"] = talents
print("Successfully set talents")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/talents.py
| 0.732974 | 0.187802 |
talents.py
|
pypi
|
from typing import Any, Optional
from ... import csv_handler, game_data_getter, helper
from ..levels import main_story, uncanny
TYPES = [
"Normal",
"Special",
"Rare",
"Super Rare",
"Uber Super Rare",
"Legend Rare",
]
def get_level_cap_increase_amount(cat_base_level: int) -> int:
"""
Get the amount of levels to increase the level cap by
Args:
cat_base_level (int): The base level of the cat (30 = 29)
Returns:
int: The amount of levels to increase the level cap by
"""
return max(0, cat_base_level - 29)
def get_unit_max_levels(is_jp: bool) -> Optional[tuple[list[int], list[int]]]:
"""
Get the max base and plus levels for all cats
Args:
is_jp (bool): If the game is in Japanese
Returns:
tuple[list[int], list[int]]: The max base and plus levels for all cats
"""
file_data = game_data_getter.get_file_latest("DataLocal", "unitbuy.csv", is_jp)
if file_data is None:
helper.error_text("Could not get unitbuy.csv")
return None
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode("utf-8")))
max_base_level = helper.copy_first_n(data, 50)
max_plus_level = helper.copy_first_n(data, 51)
return max_base_level, max_plus_level
def get_unit_max_level(
data: tuple[list[int], list[int]], cat_id: int
) -> tuple[int, int]:
"""
Get the max base and plus levels for a cat
Args:
data (tuple[list[int], list[int]]): The max base and plus levels for all cats
cat_id (int): The id of the cat
Returns:
tuple[int, int]: The max base and plus levels for a cat
"""
try:
return data[0][cat_id], data[1][cat_id]
except IndexError:
return 0, 0
def get_rarities(is_jp: bool) -> list[int]:
"""Get all cat ids of each rarity"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "unitbuy.csv", is_jp
)
if file_data is None:
helper.error_text("Could not get unitbuy.csv")
return []
data = helper.parse_int_list_list(csv_handler.parse_csv(file_data.decode("utf-8")))
rarity_ids = helper.copy_first_n(data, 13)
return rarity_ids
def get_rarity(rarity_ids: list[int], is_jp: bool) -> list[int]:
"""Get all cat ids of a certain rarity"""
rarities = get_rarities(is_jp)
cat_ids: list[int] = []
for rarity_id in rarity_ids:
for i, rarity_val in enumerate(rarities):
if int(rarity_val) == rarity_id:
cat_ids.append(i)
return cat_ids
def is_legend(cat_id: int) -> bool:
"""
Check if a cat is a legend
Args:
cat_id (int): The id of the cat
Returns:
bool: If the cat is a legend
"""
legends = [
24,
25,
130,
172,
268,
323,
352,
383,
426,
437,
462,
464,
532,
554,
568,
613,
622,
653,
]
if cat_id in legends:
return True
return False
def is_crazed(cat_id: int) -> bool:
"""
Check if a cat is crazed
Args:
cat_id (int): The id of the cat
Returns:
bool: If the cat is crazed
"""
crazed = [
91,
92,
93,
94,
95,
96,
97,
98,
99,
]
if cat_id in crazed:
return True
return False
def get_max_cat_level_normal(save_stats: dict[str, Any]) -> int:
"""
Get the max level a normal cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
Returns:
int: The max level of a normal cat
"""
if main_story.has_cleared_chapter(save_stats, 1):
return 20
return 10
def catseyes_unlocked(save_stats: dict[str, Any]) -> bool:
"""
Check if catseyes are unlocked
Args:
save_stats (dict[str, Any]): The save stats
Returns:
bool: If catseyes are unlocked
"""
return helper.calculate_user_rank(save_stats) >= 1600
def get_max_cat_level_special(save_stats: dict[str, Any], cat_id: int) -> int:
"""
Get the max level a special cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
cat_id (int): The id of the cat
Returns:
int: The max level of a special cat
"""
legend = is_legend(cat_id)
acient_curse_clear = uncanny.is_ancient_curse_clear(save_stats)
user_rank = helper.calculate_user_rank(save_stats)
catseyes = catseyes_unlocked(save_stats)
eoc_cleared_2 = main_story.has_cleared_chapter(save_stats, 1)
if not eoc_cleared_2:
return 10
if user_rank < 1600:
return 20
if not catseyes:
return 30
if not acient_curse_clear and not legend:
return 40
if not acient_curse_clear and legend:
return 30
if acient_curse_clear and legend:
return 40
return 50
def get_max_cat_level_rare(save_stats: dict[str, Any]) -> int:
"""
Get the max level a cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
Returns:
int: The max level of a cat
"""
user_rank = helper.calculate_user_rank(save_stats)
catseyes = catseyes_unlocked(save_stats)
cleared_eoc_2 = main_story.has_cleared_chapter(save_stats, 1)
acient_curse_clear = uncanny.is_ancient_curse_clear(save_stats)
if not cleared_eoc_2:
return 10
if user_rank < 900:
return 20
if user_rank < 1200:
return 25
if not catseyes:
return 30
if not acient_curse_clear:
return 40
return 50
def get_max_level_super_rare(save_stats: dict[str, Any], cat_id: int) -> int:
"""
Get the max level a super rare cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
cat_id (int): The id of the cat
Returns:
int: The max level of a super rare cat
"""
user_rank = helper.calculate_user_rank(save_stats)
cleared_eoc_2 = main_story.has_cleared_chapter(save_stats, 1)
acient_curse_clear = uncanny.is_ancient_curse_clear(save_stats)
crazed = is_crazed(cat_id)
catseyes = catseyes_unlocked(save_stats)
if not cleared_eoc_2:
return 10
if crazed and user_rank < 3600:
return 20
if not crazed and user_rank < 1000:
return 20
if crazed and user_rank < 3650:
return 25
if not crazed and user_rank < 1300:
return 25
if not catseyes:
return 30
if not acient_curse_clear:
return 40
return 50
def get_max_level_uber_rare(save_stats: dict[str, Any]) -> int:
"""
Get the max level a uber rare cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
Returns:
int: The max level of a uber rare cat
"""
user_rank = helper.calculate_user_rank(save_stats)
cleared_eoc_2 = main_story.has_cleared_chapter(save_stats, 1)
acient_curse_clear = uncanny.is_ancient_curse_clear(save_stats)
catseyes = catseyes_unlocked(save_stats)
if not cleared_eoc_2:
return 10
if user_rank < 1100:
return 20
if user_rank < 1400:
return 25
if not catseyes:
return 30
if not acient_curse_clear:
return 40
return 50
def get_max_level_legend_rare(save_stats: dict[str, Any]) -> int:
"""
Get the max level a legend rare cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
Returns:
int: The max level of a legend rare cat
"""
user_rank = helper.calculate_user_rank(save_stats)
cleared_eoc_2 = main_story.has_cleared_chapter(save_stats, 1)
acient_curse_clear = uncanny.is_ancient_curse_clear(save_stats)
catseyes = catseyes_unlocked(save_stats)
if not cleared_eoc_2:
return 10
if user_rank < 1110:
return 20
if user_rank < 1410:
return 25
if not catseyes:
return 30
if not acient_curse_clear:
return 40
return 50
def get_max_level(save_stats: dict[str, Any], rarity_index: int, cat_id: int) -> int:
"""
Get the max level a cat can be upgraded to
Args:
save_stats (dict[str, Any]): The save stats
rarity_index (int): The rarity index of the cat
cat_id (int): The id of the cat
Returns:
int: The max level of a cat
"""
if rarity_index == 0:
return get_max_cat_level_normal(save_stats)
if rarity_index == 1:
return get_max_cat_level_special(save_stats, cat_id)
if rarity_index == 2:
return get_max_cat_level_rare(save_stats)
if rarity_index == 3:
return get_max_level_super_rare(save_stats, cat_id)
if rarity_index == 4:
return get_max_level_uber_rare(save_stats)
if rarity_index == 5:
return get_max_level_legend_rare(save_stats)
return 0
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/cats/cat_helper.py
| 0.85555 | 0.324075 |
cat_helper.py
|
pypi
|
from typing import Any, Optional
from ... import user_input_handler, server_handler, helper, adb_handler
from ..levels import clear_tutorial
def select(save_stats: dict[str, Any]) -> dict[str, Any]:
helper.check_changes(None)
options = [
"Download save data from the game using transfer and confirmation codes",
"Select a save file from file",
"Use adb to pull the save from a rooted device",
"Load save data from json",
]
index = (
user_input_handler.select_single(
options, title="Select an option to get save data:"
)
- 1
)
save_path = handle_index(index)
if not save_path:
return save_stats
helper.set_save_path(save_path)
data = helper.load_save_file(save_path)
save_stats = data["save_stats"]
if save_path.endswith(".json"):
input(
"Your save data seems to be in json format. Please use to import json option if you want to load json data.\nPress enter to continue...:"
)
if not clear_tutorial.is_tutorial_cleared(save_stats):
save_stats = clear_tutorial.clear_tutorial(save_stats)
return save_stats
def handle_index(index: int) -> Optional[str]:
path = None
if index == 0:
print("Enter details for data transfer:")
path = server_handler.download_handler()
elif index == 1:
print("Select save file:")
path = helper.select_file(
"Select a save file:",
helper.get_save_file_filetype(),
initial_file=helper.get_save_path_home(),
)
elif index == 2:
print("Enter details for save pulling:")
game_versions = adb_handler.find_game_versions()
if not game_versions:
game_version = helper.ask_cc()
else:
index = (
user_input_handler.select_single(
game_versions, "Select", "Select a game version to pull from:", True
)
- 1
)
game_version = game_versions[index]
path = adb_handler.adb_pull_save_data(game_version)
elif index == 3:
print("Select save data json file")
js_path = helper.select_file(
"Select save data json file",
[("Json", "*.json")],
initial_file=helper.get_save_path_home() + ".json",
)
if js_path:
path = helper.load_json_handler(js_path)
else:
helper.colored_text("Please enter a recognised option", base=helper.RED)
return None
return path
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/save_management/load.py
| 0.617167 | 0.293886 |
load.py
|
pypi
|
from typing import Any
from ... import helper, serialise_save, patcher, adb_handler, root_handler
def save(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Serialise the save data and exit"""
save_data = serialise_save.start_serialize(save_stats)
helper.write_save_data(
save_data, save_stats["version"], helper.get_save_path(), True
)
helper.check_managed_items(save_stats, helper.get_save_path())
return save_stats
def save_save(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Serialise the save data"""
save_data = serialise_save.start_serialize(save_stats)
helper.write_save_data(
save_data, save_stats["version"], helper.get_save_path(), False
)
helper.check_managed_items(save_stats, helper.get_save_path())
return save_stats
def save_and_push(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Serialise the save data and and push it to the game"""
save_data = serialise_save.start_serialize(save_stats)
save_data = patcher.patch_save_data(save_data, save_stats["version"])
helper.write_file_bytes(helper.get_save_path(), save_data)
helper.check_managed_items(save_stats, helper.get_save_path())
if not helper.is_android():
adb_handler.adb_push_save_data(save_stats["version"], helper.get_save_path())
return save_stats
def save_and_push_rerun(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Serialise the save data and push it to the game and restart the game"""
save_data = serialise_save.start_serialize(save_stats)
save_data = patcher.patch_save_data(save_data, save_stats["version"])
helper.write_file_bytes(helper.get_save_path(), save_data)
helper.check_managed_items(save_stats, helper.get_save_path())
if not helper.is_android():
adb_handler.adb_push_save_data(save_stats["version"], helper.get_save_path())
adb_handler.rerun_game(save_stats["version"])
else:
root_handler.rerun_game(save_stats["version"])
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/save_management/save.py
| 0.737253 | 0.188063 |
save.py
|
pypi
|
from typing import Any
from ... import helper, serialise_save, server_handler, user_info
def upload_metadata(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Upload the metadata to the game server"""
_, save_stats = server_handler.meta_data_upload_handler(
save_stats, helper.get_save_path()
)
return save_stats
def set_managed_items(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Set the managed items for the save stats"""
data = server_handler.check_gen_token(save_stats)
token = data["token"]
save_stats = data["save_stats"]
if token is None:
helper.colored_text("Error generating token")
return save_stats
server_handler.update_managed_items(save_stats["inquiry_code"], token, save_stats)
return save_stats
def handle_upload_error(inquiry_code: str):
"""Show an error message"""
info = user_info.UserInfo(inquiry_code)
info.set_auth_token("")
info.set_password("")
helper.colored_text(
"Error uploading save data\nPlease try again. If error persists, please report this in #bug-reports"
)
def save_and_upload(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Serialise the save data, and upload it to the game server"""
save_data = serialise_save.start_serialize(save_stats)
save_data = helper.write_save_data(
save_data, save_stats["version"], helper.get_save_path(), False
)
upload_data = server_handler.upload_handler(save_stats, helper.get_save_path())
if upload_data is None:
handle_upload_error(save_stats["inquiry_code"])
return save_stats
upload_data, save_stats = upload_data
inquiry_code = save_stats["inquiry_code"]
if upload_data is None:
handle_upload_error(inquiry_code)
return save_stats
if "transferCode" not in upload_data:
handle_upload_error(inquiry_code)
return save_stats
else:
helper.colored_text(f"Transfer code : &{upload_data['transferCode']}&")
helper.colored_text(f"Confirmation Code : &{upload_data['pin']}&")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/save_management/server_upload.py
| 0.664105 | 0.186817 |
server_upload.py
|
pypi
|
from typing import Any, Optional
from ... import game_data_getter, helper, item, user_input_handler
def get_boundaries(is_jp: bool) -> Optional[list[int]]:
"""
Returns the xp requirements for each level
Args:
is_jp (bool): If the save file is japanese
Returns:
list[int]: The xp requirements for each level
"""
file_data = game_data_getter.get_file_latest("resLocal", "jinja_level.csv", is_jp)
if file_data is None:
helper.error_text("Failed to get jinja level data")
return None
boundaries = file_data.decode("utf-8").splitlines()
xp_requirements: list[int] = []
counter = 0
for line in boundaries:
requirement = int(line.split(helper.get_text_splitter(is_jp))[0])
counter += requirement
xp_requirements.append(counter)
return xp_requirements
def get_level_from_xp(shrine_xp: int, is_jp: bool) -> Optional[dict[str, Any]]:
"""
Returns the level, max level and max xp from the given xp
Args:
shrine_xp (int): The xp of the shrine
is_jp (bool): If the save file is japanese
Returns:
dict[str, Any]: The level, max level, and max xp
"""
xp_requirements = get_boundaries(is_jp)
if xp_requirements is None:
return None
level = 1
for requirement in xp_requirements:
if shrine_xp >= requirement:
level += 1
if level > len(xp_requirements):
level = len(xp_requirements)
return {
"level": level,
"max_level": len(xp_requirements),
"max_xp": xp_requirements[-2],
}
def get_xp_from_level(level: int, is_jp: bool) -> Optional[int]:
"""
Returns the xp required to reach the given level
Returns:
_type_: int
"""
xp_requirements = get_boundaries(is_jp)
if xp_requirements is None:
return None
if level <= 1:
shrine_xp = 0
else:
shrine_xp = xp_requirements[level - 2]
return shrine_xp
def edit_shrine_xp(save_stats: dict[str, Any]) -> dict[str, Any]:
"""
Edit the shrine xp of the save file
Args:
save_stats (dict[str, Any]): The save file stats
Returns:
dict[str, Any]: The edited save file stats
"""
shrine_xp = save_stats["cat_shrine"]["xp_offering"]
data = get_level_from_xp(shrine_xp, helper.check_data_is_jp(save_stats))
if data is None:
return save_stats
level = data["level"]
helper.colored_text(f"Shrine XP: &{shrine_xp}&\nLevel: &{level}&")
raw = (
user_input_handler.colored_input(
"Do you want to edit raw xp(&1&) or the level(&2&)?:"
)
== "1"
)
if raw:
cat_shrine_xp = item.IntItem(
name="Shrine XP",
value=item.Int(shrine_xp),
max_value=None,
)
cat_shrine_xp.edit()
shrine_xp = int(cat_shrine_xp.get_value())
else:
shrine_level = item.IntItem(
name="Shrine Level",
value=item.Int(level),
max_value=data["max_level"],
)
shrine_level.edit()
shrine_xp = get_xp_from_level(
int(shrine_level.get_value()), helper.check_data_is_jp(save_stats)
)
if shrine_xp is None:
return save_stats
shrine_data = get_level_from_xp(shrine_xp, helper.check_data_is_jp(save_stats))
if shrine_data is None:
return save_stats
shrine_level = shrine_data["level"]
if shrine_level > data["max_level"]:
shrine_level = data["max_level"]
save_stats["shrine_dialogs"]["Value"] = shrine_level - 1 # Level up dialog
save_stats["shrine_gone"] = 0
save_stats["cat_shrine"]["stamp_1"] = 0
save_stats["cat_shrine"]["stamp_2"] = 0
save_stats["cat_shrine"]["xp_offering"] = shrine_xp
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/other/cat_shrine.py
| 0.865253 | 0.321846 |
cat_shrine.py
|
pypi
|
from typing import Any, Optional
from ... import user_input_handler, game_data_getter, csv_handler, helper
def get_mission_conditions(is_jp: bool) -> Optional[dict[Any, Any]]:
"""Get the mission data and what you need to do to complete it"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "Mission_Condition.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get mission conditions")
return None
mission_condition_data = file_data.decode("utf-8")
mission_conditions_list = helper.parse_int_list_list(
csv_handler.parse_csv(mission_condition_data)
)
mission_conditions: dict[Any, Any] = {}
for line in mission_conditions_list[1:]:
mission_id = line[0]
mission_conditions[mission_id] = {
"mission_type": line[1],
"conditions_type": line[2],
"progress_count": line[3],
"conditions_value": line[4:],
}
return mission_conditions
def get_mission_names(is_jp: bool) -> Optional[dict[int, Any]]:
"""Get all mission names"""
file_data = game_data_getter.get_file_latest("resLocal", "Mission_Name.csv", is_jp)
if file_data is None:
helper.error_text("Failed to get mission names")
return None
mission_name = file_data.decode("utf-8")
mission_name_list = mission_name.split("\n")
mission_names: dict[int, Any] = {}
for mission_name in mission_name_list:
line_data = mission_name.split(helper.get_text_splitter(is_jp))
if helper.check_int(line_data[0]) is None:
continue
mission_id = int(line_data[0])
name = line_data[1]
name = name.replace("&", "\\&")
mission_names[mission_id] = name
return mission_names
def get_mission_names_from_ids(
ids: list[int], mission_names: dict[int, Any]
) -> list[str]:
"""Get the mission names from the ids"""
names: list[str] = []
for mission_id in ids:
if mission_id in mission_names:
names.append(mission_names[mission_id])
return names
def get_mission_ids(
missions: dict[str, Any], conditions: dict[int, Any], names: dict[int, Any]
) -> tuple[list[int], list[str]]:
"""Get the mission ids and names from the conditions"""
mission_ids_to_use: list[int] = []
for mission_id in missions["states"]:
if mission_id in conditions:
mission_ids_to_use.append(mission_id)
names_to_use = get_mission_names_from_ids(mission_ids_to_use, names)
return mission_ids_to_use, names_to_use
def set_missions(
missions: dict[str, Any],
ids: list[int],
conditions: dict[Any, Any],
mission_ids_to_use: list[int],
re_claim: bool,
) -> dict[str, Any]:
"""Set the missions"""
for mission_id in ids:
mission_id = helper.clamp(mission_id, 1, len(mission_ids_to_use))
mission_id = mission_ids_to_use[mission_id]
if re_claim:
claim = True
elif not re_claim and missions["states"][mission_id] != 4:
claim = True
else:
claim = False
if claim:
missions["states"][mission_id] = 2
missions["requirements"][mission_id] = conditions[mission_id][
"progress_count"
]
return missions
def edit_missions(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editting catnip missions"""
missions = save_stats["missions"]
names = get_mission_names(helper.check_data_is_jp(save_stats))
conditions = get_mission_conditions(helper.check_data_is_jp(save_stats))
if names is None or conditions is None:
return save_stats
mission_ids_to_use, names_to_use = get_mission_ids(missions, conditions, names)
ids = user_input_handler.select_not_inc(
options=names_to_use,
mode="complete",
)
re_claim = (
user_input_handler.colored_input(
"Do you want to re-complete already claimed missions &(1)& (Allows you to get the rewards again) or only complete non-claimed missions&(2)&:"
)
== "1"
)
missions = set_missions(missions, ids, conditions, mission_ids_to_use, re_claim)
save_stats["missions"] = missions
print("Successfully completed missions")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/other/missions.py
| 0.740925 | 0.260251 |
missions.py
|
pypi
|
from typing import Any
from ... import csv_handler, game_data_getter, helper, user_input_handler
def get_item_names(is_jp: bool) -> list[str]:
"""Get the item names
Args:
is_jp (bool): If the data is for jp
Returns:
list[str]: The item names
"""
item_names = game_data_getter.get_file_latest(
"resLocal", "GatyaitemName.csv", is_jp
)
if item_names is None:
helper.error_text("Failed to get item names")
return []
item_names = csv_handler.parse_csv(
item_names.decode("utf-8"),
delimeter=helper.get_text_splitter(is_jp),
)
names: list[str] = []
for item in item_names:
names.append(item[0])
return names
def get_scheme_data(is_jp: bool) -> list[list[int]]:
"""Get the scheme data
Args:
is_jp (bool): If the data is for jp
Returns:
list[list[int]]: The scheme data
"""
scheme_data = game_data_getter.get_file_latest(
"DataLocal", "schemeItemData.tsv", is_jp
)
if scheme_data is None:
helper.error_text("Failed to get scheme data")
return []
scheme_data_data = helper.parse_int_list_list(
csv_handler.parse_csv(
scheme_data.decode("utf-8"),
delimeter="\t",
)
)
return scheme_data_data
def get_scheme_names(is_jp: bool, scheme_data: list[list[int]]) -> dict[int, str]:
"""Get the scheme names"""
file_data = game_data_getter.get_file_latest("resLocal", "localizable.tsv", is_jp)
if file_data is None:
helper.error_text("Failed to get scheme names")
return {}
localizable = csv_handler.parse_csv(
file_data.decode("utf-8"),
delimeter="\t",
)
names: dict[int, str] = {}
for scheme in scheme_data[1:]:
scheme_id = scheme[0]
for name in localizable:
scheme_str = f"scheme_popup_{scheme_id}"
if name[0] == scheme_str:
scheme_name = name[1].replace("<flash>", "").replace("</flash>", "")
names[scheme_id] = scheme_name
break
return names
def get_cat_name(cat_id: int, is_jp: bool, cc: str) -> str:
"""Get the cat name"""
file_data = game_data_getter.get_file_latest(
"resLocal", f"Unit_Explanation{cat_id+1}_{cc}.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get cat names")
return ""
cat_name = csv_handler.parse_csv(
file_data.decode("utf-8"),
delimeter=helper.get_text_splitter(is_jp),
)
return cat_name[0][0]
def edit_scheme_data(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing scheme data"""
is_jp = helper.check_data_is_jp(save_stats)
data = get_scheme_data(is_jp)
names = get_scheme_names(is_jp, data)
item_names = get_item_names(is_jp)
options: list[str] = []
for scheme in data[1:]:
scheme_id = scheme[0]
is_cat = scheme[2] == 1
item_id = scheme[3]
amount = scheme[4]
try:
scheme_name = names[scheme_id]
except KeyError:
continue
string = "\n\t"
if is_cat:
cat_name = get_cat_name(item_id, is_jp, helper.get_lang(is_jp))
string += scheme_name.replace("%@", cat_name)
else:
try:
item_name = item_names[item_id]
except IndexError:
continue
string += scheme_name
first_index = string.find("%@")
second_index = string.find("%@", first_index + 1)
string = (
string[:first_index]
+ str(amount)
+ " "
+ item_name
+ string[second_index + 2 :]
)
string = string.replace("<br>", "\n\t")
options.append(string)
scheme_ids = user_input_handler.select_not_inc(options, "get")
scheme_data = save_stats["item_schemes"]
for scheme_index in scheme_ids:
try:
scheme_id = data[scheme_index + 1][0]
except IndexError:
continue
obtain_ids: list[int] = scheme_data["to_obtain_ids"]
obtain_ids.append(scheme_id)
received_ids: list[int] = scheme_data["received_ids"]
if scheme_id in received_ids:
received_ids.remove(scheme_id)
scheme_data["to_obtain_ids"] = obtain_ids
scheme_data["received_ids"] = received_ids
save_stats["item_schemes"] = scheme_data
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/other/scheme_item.py
| 0.794305 | 0.240351 |
scheme_item.py
|
pypi
|
import json
from typing import Any, Optional
from SC_Editor import game_data_getter, csv_handler, helper, user_input_handler
class RawOrbInfo:
def __init__(
self,
orb_id: int,
grade_id: int,
effect_id: int,
value: list[int],
attribute_id: int,
):
"""Initialize the RawOrbInfo class
Args:
orb_id (int): The id of the orb
grade_id (int): The id of the grade
effect_id (int): The id of the effect
value (list[int]): The value of the effect? idk
attribute_id (int): The id of the attribute
"""
self.orb_id = orb_id
self.grade_id = grade_id
self.effect_id = effect_id
self.value = value
self.attribute_id = attribute_id
class OrbInfo:
def __init__(
self,
raw_orb_info: RawOrbInfo,
grade: str,
attribute: str,
effect: str,
):
"""Initialize the OrbInfo class
Args:
raw_orb_info (RawOrbInfo): The raw orb info
grade (str): The grade of the orb (e.g. "S")
attribute (str): The attribute of the orb (e.g. "Red")
effect (str): The effect of the orb (e.g. "Attack Up %@: %@")
"""
self.raw_orb_info = raw_orb_info
self.grade = grade
self.attribute = attribute
self.effect = effect
def __str__(self) -> str:
"""Get the string representation of the OrbInfo
Returns:
str: The string representation of the OrbInfo
"""
effect_text = self.effect.replace("%@", "{}")
text = effect_text.format(self.grade, self.attribute).strip()
return text
def to_colortext(self) -> str:
"""Get the string representation of the OrbInfo with color
Returns:
str: The string representation of the OrbInfo with color
"""
effect_text = self.effect.replace("%@", "&{}&")
text = effect_text.format(self.grade, self.attribute).strip()
return text
@staticmethod
def create_unknown(orb_id: int) -> "OrbInfo":
"""Create an unknown OrbInfo
Args:
orb_id (int): The id of the orb
Returns:
OrbInfo: The unknown OrbInfo
"""
return OrbInfo(
RawOrbInfo(orb_id, 0, 0, [], 0),
"Unknown",
"",
"%@:%@",
)
class OrbInfoList:
equipment_data_file_name = "DataLocal/equipmentlist.json"
grade_list_file_name = "DataLocal/equipmentgrade.csv"
attribute_list_file_name = "resLocal/attribute_explonation.tsv"
effect_list_file_name = "resLocal/equipment_explonation.tsv"
def __init__(self, orb_info_list: list[OrbInfo]):
"""Initialize the OrbInfoList class
Args:
orb_info_list (list[OrbInfo]): The list of OrbInfo
"""
self.orb_info_list = orb_info_list
@staticmethod
def create(is_jp: bool) -> Optional["OrbInfoList"]:
"""Create an OrbInfoList
Args:
is_jp (bool): Whether the game is in Japanese
Returns:
Optional[OrbInfoList]: The OrbInfoList
"""
json_data_file = game_data_getter.get_file_latest_path(
OrbInfoList.equipment_data_file_name, is_jp
)
grade_list_file = game_data_getter.get_file_latest_path(
OrbInfoList.grade_list_file_name, is_jp
)
attribute_list_file = game_data_getter.get_file_latest_path(
OrbInfoList.attribute_list_file_name, is_jp
)
equipment_list_file = game_data_getter.get_file_latest_path(
OrbInfoList.effect_list_file_name, is_jp
)
if (
json_data_file is None
or grade_list_file is None
or attribute_list_file is None
or equipment_list_file is None
):
return None
json_data = json_data_file.decode("utf-8")
grade_list = grade_list_file.decode("utf-8")
attribute_list = attribute_list_file.decode("utf-8")
equipment_list = equipment_list_file.decode("utf-8")
raw_orbs = OrbInfoList.parse_json_data(json_data)
orbs = OrbInfoList.load_names(
raw_orbs, grade_list, attribute_list, equipment_list
)
return OrbInfoList(orbs)
@staticmethod
def parse_json_data(json_data: str) -> list[RawOrbInfo]:
"""Parse the json data of the equipment
Args:
json_data (str): The json data
Returns:
list[RawOrbInfo]: The list of RawOrbInfo
"""
data: dict[str, Any] = json.loads(json_data)
orb_info_list: list[RawOrbInfo] = []
for id, orb in enumerate(data["ID"]):
grade_id = orb["gradeID"]
content = orb["content"]
value = orb["value"]
attribute = orb["attribute"]
orb_info_list.append(RawOrbInfo(id, grade_id, content, value, attribute))
return orb_info_list
@staticmethod
def load_names(
raw_orb_info: list[RawOrbInfo],
grade_data: str,
attribute_data: str,
effect_data: str,
) -> list[OrbInfo]:
"""Load the names of the equipment
Args:
raw_orb_info (list[RawOrbInfo]): The list of RawOrbInfo
grade_data (str): Raw data of the grade list
attribute_data (str): Raw data of the attribute list
effect_data (str): Raw data of the effect list
Returns:
list[OrbInfo]: The list of OrbInfo
"""
grade_csv = csv_handler.parse_csv(grade_data)
attribute_tsv = csv_handler.parse_csv(attribute_data, "\t")
effect_csv = csv_handler.parse_csv(effect_data, "\t")
orb_info_list: list[OrbInfo] = []
for orb in raw_orb_info:
grade = grade_csv[orb.grade_id][3]
attribute = attribute_tsv[orb.attribute_id][0]
effect = effect_csv[orb.effect_id][0]
orb_info_list.append(OrbInfo(orb, grade, attribute, effect))
return orb_info_list
def get_orb_info(self, orb_id: int) -> Optional[OrbInfo]:
"""Get the OrbInfo from the id
Args:
orb_id (int): The id of the orb
Returns:
Optional[OrbInfo]: The OrbInfo
"""
if orb_id >= len(self.orb_info_list):
return None
return self.orb_info_list[orb_id]
def get_orb_from_components(
self,
grade: str,
attribute: str,
effect: str,
) -> Optional[OrbInfo]:
"""Get the OrbInfo from the components
Args:
grade (str): The grade of the orb
attribute (str): The attribute of the orb
effect (str): The effect of the orb
Returns:
Optional[OrbInfo]: The OrbInfo
"""
for orb in self.orb_info_list:
if (
orb.grade == grade
and orb.attribute == attribute
and orb.effect == effect
):
return orb
return None
def get_orbs_from_component_fuzzy(
self,
grade: str,
attribute: str,
effect: str,
) -> list[OrbInfo]:
"""Get the OrbInfo from the components matching the first word of the effect and lowercased
Args:
grade (str): The grade of the orb
attribute (str): The attribute of the orb
effect (str): The effect of the orb
Returns:
list[OrbInfo]: The list of OrbInfo
"""
orbs: list[OrbInfo] = []
for orb in self.orb_info_list:
if (
(orb.grade.lower() == grade.lower() or grade == "*")
and (orb.attribute.lower() == attribute.lower() or attribute == "*")
and (
orb.effect.lower().split(" ")[0] == effect.lower().split(" ")[0]
or effect == "*"
)
):
orbs.append(orb)
return orbs
def get_all_grades(self) -> list[str]:
"""Get all the grades
Returns:
list[str]: The list of grades
"""
return list(set([orb.grade for orb in self.orb_info_list]))
def get_all_attributes(self) -> list[str]:
"""Get all the attributes
Returns:
list[str]: The list of attributes
"""
return list(set([orb.attribute for orb in self.orb_info_list]))
def get_all_effects(self) -> list[str]:
"""Get all the effects
Returns:
list[str]: The list of effects
"""
return list(set([orb.effect for orb in self.orb_info_list]))
class SaveOrb:
"""Represents a saved orb in the save file"""
def __init__(self, orb: OrbInfo, count: int):
"""Initialize the SaveOrb class
Args:
orb (OrbInfo): The OrbInfo
count (int): The amount of the orb
"""
self.count = count
self.orb = orb
class SaveOrbs:
def __init__(
self,
orbs: dict[int, SaveOrb],
orb_info_list: OrbInfoList,
):
"""Initialize the SaveOrbs class
Args:
orbs (dict[int, SaveOrb]): The orbs
orb_info_list (OrbInfoList): The orb info list
"""
self.orbs = orbs
self.orb_info_list = orb_info_list
@staticmethod
def from_save_stats(save_stats: dict[str, Any]) -> Optional["SaveOrbs"]:
"""Create a SaveOrbs from the save stats
Args:
save_stats (dict[str, Any]): The save stats
Returns:
Optional[SaveOrbs]: The SaveOrbs
"""
is_jp = helper.is_jp(save_stats)
orb_info_list = OrbInfoList.create(is_jp)
if orb_info_list is None:
return None
orbs: dict[int, SaveOrb] = {}
for orb_id, amount in save_stats["talent_orbs"].items():
try:
orb_info = orb_info_list.orb_info_list[int(orb_id)]
except IndexError:
orb_info = OrbInfo.create_unknown(int(orb_id))
orbs[int(orb_id)] = SaveOrb(orb_info, amount)
return SaveOrbs(orbs, orb_info_list)
def print(self):
"""Print the orbs as a formatted list"""
self.sort_orbs()
helper.colored_text(
f"Total current orbs: &{sum([orb.count for orb in self.orbs.values()])}&"
)
helper.colored_text(f"Total current types: &{len(self.orbs)}&")
print("Current Orbs:")
for orb in self.orbs.values():
helper.colored_text(f"&{orb.count}& {orb.orb.to_colortext()}")
def sort_orbs(self):
"""Sort the orbs by attribute, effect, grade and id in that order with attribute being the most important"""
orbs = list(self.orbs.values())
orbs.sort(key=lambda orb: orb.orb.raw_orb_info.orb_id)
orbs.sort(key=lambda orb: orb.orb.raw_orb_info.grade_id)
orbs.sort(key=lambda orb: orb.orb.raw_orb_info.effect_id)
orbs.sort(key=lambda orb: orb.orb.raw_orb_info.attribute_id)
def edit(self):
"""Edit the orbs"""
self.print()
all_grades = self.orb_info_list.get_all_grades()
all_grades = [grade.lower() for grade in all_grades]
all_grades.sort()
all_attributes = self.orb_info_list.get_all_attributes()
all_attributes = [attribute.lower() for attribute in all_attributes]
all_attributes.sort()
all_effects = self.orb_info_list.get_all_effects()
all_effects = [effect.lower().split(" ")[0] for effect in all_effects]
all_effects.sort()
all_grades_str = "&,& ".join(all_grades)
all_attributes_str = "&,& ".join(all_attributes)
all_effects_str = "&,& ".join(all_effects)
help_text = f"""Help:
Available grades: &{all_grades_str}&
Available attributes: &{all_attributes_str}&
Available effects: &{all_effects_str}&
&Note: Not all grades and effects will be available for all attributes.&
Example inputs:
&aku& - selects &all aku& orbs
&red s& - selects &all red &orbs with &s& grade
&alien d attack& - selects the &alien &orb with &d& grade that increases &attack&.
These can be switched around, so you can also do stuff like:
&d alien attack&
&s red&
&attack d alien&
If you want to select &all& orbs then input:
&*&
If you want to do &multiple selections& then separate them with a &comma& like this:
&s black tough&,&d red massive&,&floating&
"""
helper.colored_text(help_text)
orb_input_selection = (
input("Select orbs:").lower().replace("angle", "angel").split(",")
)
orb_selection: list[OrbInfo] = []
for orb_input in orb_input_selection:
grade = None
attribute = None
effect = None
orb_input = orb_input.strip()
parts = orb_input.split(" ")
parts = [part for part in parts if part != ""]
if len(parts) == 0:
continue
if parts[0] == "*":
orb_selection = self.orb_info_list.orb_info_list
break
for available_grade in all_grades:
if available_grade in parts:
grade = available_grade
break
for available_attribute in all_attributes:
if available_attribute in parts:
attribute = available_attribute
break
for available_effect in all_effects:
if available_effect in parts:
effect = available_effect
break
if grade is None:
grade = "*"
if attribute is None:
attribute = "*"
if effect is None:
effect = "*"
orbs = self.orb_info_list.get_orbs_from_component_fuzzy(
grade, attribute, effect
)
orb_selection.extend(orbs)
orb_selection = list(set(orb_selection))
orb_selection.sort(key=lambda orb: orb.raw_orb_info.orb_id)
orb_selection.sort(key=lambda orb: orb.raw_orb_info.grade_id)
orb_selection.sort(key=lambda orb: orb.raw_orb_info.effect_id)
orb_selection.sort(key=lambda orb: orb.raw_orb_info.attribute_id)
print("Selected orbs:")
for orb in orb_selection:
helper.colored_text((orb.to_colortext()))
individual = (
input("Edit orb amounts individually? or all at once? (i/a)") == "i"
)
if individual:
for orb in orb_selection:
orb_id = orb.raw_orb_info.orb_id
try:
orb_count = self.orbs[orb_id].count
except KeyError:
orb_count = 0
orb_count = user_input_handler.colored_input(
f"What do you want to set the amount of {orb.to_colortext()} to? (currently &{orb_count}&) (&q& to exit):"
)
if orb_count == "q":
break
orb_count = helper.check_int_max(orb_count)
if orb_count is None:
continue
self.orbs[orb_id] = SaveOrb(orb, orb_count)
else:
orb_count = user_input_handler.get_int(
"What do you want to set the amount of the selected orbs to?:"
)
orb_count = helper.clamp_int(orb_count)
for orb in orb_selection:
orb_id = orb.raw_orb_info.orb_id
self.orbs[orb_id] = SaveOrb(orb, orb_count)
self.print()
def save(self, save_stats: dict[str, Any]):
"""Save the orbs to the save_stats
Args:
save_stats (dict[str, Any]): The save_stats to save the orbs to
"""
for orb_id, orb in self.orbs.items():
save_stats["talent_orbs"][orb_id] = orb.count
def edit_talent_orbs(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Edit the talent orbs
Args:
save_stats (dict[str, Any]): The save_stats to edit the orbs in
Returns:
dict[str, Any]: The edited save_stats
"""
save_orbs = SaveOrbs.from_save_stats(save_stats)
if save_orbs is None:
print("Failed to load orbs")
return save_stats
save_orbs.edit()
save_orbs.save(save_stats)
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/basic/talent_orbs_new.py
| 0.869936 | 0.156652 |
talent_orbs_new.py
|
pypi
|
from typing import Any
from ... import item, managed_item
def edit_cat_food(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing cat food"""
cat_food = item.IntItem(
name="고양이 통조림",
value=item.Int(save_stats["cat_food"]["Value"]),
max_value=45000,
bannable=item.Bannable(
managed_item.ManagedItemType.CATFOOD, save_stats["inquiry_code"]
),
)
cat_food.edit()
save_stats["cat_food"]["Value"] = cat_food.get_value()
return save_stats
def edit_xp(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing xp"""
experience = item.IntItem(
name="XP",
value=item.Int(save_stats["xp"]["Value"]),
max_value=99999999,
)
experience.edit()
save_stats["xp"]["Value"] = experience.get_value()
return save_stats
def edit_normal_tickets(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing normal tickets"""
normal_tickets = item.IntItem(
name="냥코 티켓",
value=item.Int(save_stats["normal_tickets"]["Value"]),
max_value=2999,
)
normal_tickets.edit()
save_stats["normal_tickets"]["Value"] = normal_tickets.get_value()
return save_stats
def edit_rare_tickets(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing rare tickets"""
rare_tickets = item.IntItem(
name="레어 티켓",
value=item.Int(save_stats["rare_tickets"]["Value"]),
max_value=299,
bannable=item.Bannable(
inquiry_code=save_stats["inquiry_code"],
work_around='밴 위험이 높으니 신중히 사용하세요.',
type=managed_item.ManagedItemType.RARE_TICKET,
),
)
rare_tickets.edit()
save_stats["rare_tickets"]["Value"] = rare_tickets.get_value()
return save_stats
def edit_platinum_tickets(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing platinum tickets"""
platinum_tickets = item.IntItem(
name="플래티넘 티켓",
value=item.Int(save_stats["platinum_tickets"]["Value"]),
max_value=9,
bannable=item.Bannable(
inquiry_code=save_stats["inquiry_code"],
work_around="밴 위험이 높으니 신중히 사용하세요.",
type=managed_item.ManagedItemType.PLATINUM_TICKET,
),
)
platinum_tickets.edit()
save_stats["platinum_tickets"]["Value"] = platinum_tickets.get_value()
return save_stats
def edit_platinum_shards(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing platinum shards"""
ticket_amount = save_stats["platinum_tickets"]["Value"]
max_value = 99 - (ticket_amount * 10)
platinum_shards = item.IntItem(
name="플래티넘 조각",
value=item.Int(save_stats["platinum_shards"]["Value"]),
max_value=max_value,
)
platinum_shards.edit()
save_stats["platinum_shards"]["Value"] = platinum_shards.get_value()
return save_stats
def edit_np(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing np"""
nyanko_points = item.IntItem(
name="NP",
value=item.Int(save_stats["np"]["Value"]),
max_value=9999,
)
nyanko_points.edit()
save_stats["np"]["Value"] = nyanko_points.get_value()
return save_stats
def edit_leadership(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing leadership"""
leadership = item.IntItem(
name="리더쉽",
value=item.Int(save_stats["leadership"]["Value"]),
max_value=9999,
)
leadership.edit()
save_stats["leadership"]["Value"] = leadership.get_value()
return save_stats
def edit_battle_items(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing battle items"""
battle_items = item.IntItemGroup.from_lists(
names=[
"스피드 업",
"트레져 레이더",
"고양이 도령",
"야옹컴",
"고양이 박사",
"스냥이퍼",
],
values=save_stats["battle_items"],
maxes=9999,
group_name="배틀 아이템",
)
battle_items.edit()
save_stats["battle_items"] = battle_items.get_values()
return save_stats
def edit_engineers(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing ototo engineers"""
engineers = item.IntItem(
name="오토토 조수",
value=item.Int(save_stats["engineers"]["Value"]),
max_value=5,
)
engineers.edit()
save_stats["engineers"]["Value"] = engineers.get_value()
return save_stats
def edit_catamins(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing catamins"""
catamins = item.IntItemGroup.from_lists(
names=[
"드링크 A",
"드링크 B",
"드링크 C",
],
values=save_stats["catamins"],
maxes=9999,
group_name="고양이 드링크",
)
catamins.edit()
save_stats["catamins"] = catamins.get_values()
return save_stats
def edit_inquiry_code(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the inquiry code"""
print(
"경고: 문의 코드 편집은 자신이 무엇을 하고 있는지 알고 있는 경우에만 수행해야 합니다! 올바르게 수행하지 않으면 게임 내에서 다른 오류가 발생합니다!"
)
inquiry_code = item.StrItem(
name="문의코드",
value=save_stats["inquiry_code"],
)
inquiry_code.edit()
save_stats["inquiry_code"] = inquiry_code.get_value()
return save_stats
def edit_rare_gacha_seed(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the rare gacha seed"""
rare_gacha_seed = item.IntItem(
name="레어 뽑기 시드",
value=item.Int(save_stats["rare_gacha_seed"]["Value"], signed=False),
max_value=None,
)
rare_gacha_seed.edit()
save_stats["rare_gacha_seed"]["Value"] = rare_gacha_seed.get_value()
return save_stats
def edit_unlocked_slots(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the amount of unlocked slots"""
unlocked_slots = item.IntItem(
name="캐릭터 편성",
value=item.Int(save_stats["unlocked_slots"]["Value"]),
max_value=len(save_stats["slots"]),
)
unlocked_slots.edit()
save_stats["unlocked_slots"]["Value"] = unlocked_slots.get_value()
return save_stats
def edit_token(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the password-refresh-token"""
print(
"경고: 토큰 편집은 수행 중인 작업을 알고 있는 경우에만 수행해야 합니다! 올바르게 수행하지 않으면 게임 내에서 다른 오류가 발생합니다!"
)
token = item.StrItem(
name="토큰",
value=save_stats["token"],
)
token.edit()
save_stats["token"] = token.get_value()
return save_stats
def edit_restart_pack(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for giving the restart pack"""
save_stats["restart_pack"]["Value"] = 1
print("Successfully gave the restart pack")
return save_stats
def edit_challenge_battle(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the score of the challenge battle"""
challenge_battle = item.IntItem(
name="챌린지 배틀",
value=item.Int(save_stats["challenge"]["Score"]["Value"]),
max_value=None,
)
challenge_battle.edit()
save_stats["challenge"]["Score"]["Value"] = challenge_battle.get_value()
save_stats["challenge"]["Cleared"]["Value"] = 1
return save_stats
def edit_legend_tickets(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing legend tickets"""
legend_tickets = item.IntItem(
name="레전드 티켓",
value=item.Int(save_stats["legend_tickets"]["Value"]),
max_value=4,
bannable=item.Bannable(
inquiry_code=save_stats["inquiry_code"],
type=managed_item.ManagedItemType.LEGEND_TICKET,
),
)
legend_tickets.edit()
save_stats["legend_tickets"]["Value"] = legend_tickets.get_value()
return save_stats
def edit_dojo_score(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing the dojo score"""
if not save_stats["dojo_data"]:
save_stats["dojo_data"] = {0: {0: 0}}
dojo_score = item.IntItem(
name="도장 점수",
value=item.Int(save_stats["dojo_data"][0][0]),
max_value=None,
)
dojo_score.edit()
save_stats["dojo_data"][0][0] = dojo_score.get_value()
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/basic/basic_items.py
| 0.656878 | 0.34183 |
basic_items.py
|
pypi
|
from typing import Any
from ... import helper, user_input_handler
def edit_all_orbs(save_stats: dict[str, Any], orb_list: list[str]) -> dict[str, Any]:
"""Handler for editing all talent orbs"""
val = user_input_handler.colored_input(
"What do you want to set the value of all talent orbs to?:"
)
val = helper.check_int_max(val)
if val is None:
print("Error please enter a number")
return save_stats
for orb in orb_list:
try:
orb_id = orb_list.index(orb)
except ValueError:
continue
save_stats["talent_orbs"][orb_id] = val
helper.colored_text(f"Set all talent orbs to &{val}&")
return save_stats
def edit_talent_orbs(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing talent orbs"""
orb_list = get_talent_orbs_types()
talent_orbs = save_stats["talent_orbs"]
print("You have:")
for orb in talent_orbs:
amount = talent_orbs[orb]
text = "orbs" if amount != 1 else "orb"
try:
helper.colored_text(f"&{amount}& {orb_list[orb]} {text}")
except IndexError:
helper.colored_text(f"&{amount}& Unknown {orb} {text}")
orbs_str = user_input_handler.colored_input(
"Enter the name of the orb that you want. You can enter multiple orb names separated by &spaces& to edit multiple at once or you can enter &all& to select all talent orbs to edit (e.g &angel a massive red d strong black b resistant&):"
).split(" ")
if orbs_str[0] == "all":
return edit_all_orbs(save_stats, orb_list)
length = len(orbs_str) // 3
orbs_to_set: list[int] = []
for i in range(length):
orb_name = " ".join(orbs_str[i * 3 : i * 3 + 3]).lower()
orb_name = orb_name.replace("angle", "angel").title()
try:
orbs_to_set.append(orb_list.index(orb_name))
except ValueError:
helper.colored_text(
f"Error orb &{orb_name}& does not exist or is not recognized"
)
for orb_id in orbs_to_set:
name = orb_list[orb_id]
val = helper.check_int_max(
user_input_handler.colored_input(
f"What do you want to set the value of &{name}& to?:"
)
)
if val is None:
print("Error please enter a number")
continue
talent_orbs[orb_id] = val
save_stats["talent_orbs"] = talent_orbs
return save_stats
ATTRIBUTES = [
"Red",
"Floating",
"Black",
"Metal",
"Angel",
"Alien",
"Zombie",
]
EFFECTS = [
"Attack",
"Defense",
"Strong",
"Massive",
"Resistant",
]
GRADES = [
"D",
"C",
"B",
"A",
"S",
]
def create_orb_list(
attributes: list[str], effects: list[str], grades: list[str], incl_metal: bool
) -> list[str]:
"""Create a list of all possible talent orbs"""
orb_list: list[str] = []
for attribute in attributes:
effects_trim = effects
if attribute == "Metal" and incl_metal:
effects_trim = [effects[1]]
if attribute == "Metal" and not incl_metal:
effects_trim = []
for effect in effects_trim:
for grade in grades:
orb_list.append(f"{attribute} {grade} {effect}")
return orb_list
def create_aku_orbs(effects: list[str], grades: list[str]) -> list[str]:
"""Create a list of all possible aku orbs"""
orb_list: list[str] = []
for effect in effects:
for grade in grades:
orb_list.append(f"Aku {grade} {effect}")
return orb_list
def get_talent_orbs_types() -> list[str]:
"""Get a list of all possible talent orbs"""
orb_list = create_orb_list(ATTRIBUTES, EFFECTS[0:2], GRADES, True)
orb_list += create_orb_list(ATTRIBUTES, EFFECTS[2:], GRADES, False)
orb_list += create_aku_orbs(EFFECTS, GRADES)
print(orb_list)
return orb_list
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/basic/talent_orbs.py
| 0.526586 | 0.261997 |
talent_orbs.py
|
pypi
|
from typing import Any, Optional
from ... import item, game_data_getter, helper
def get_gamatoto_helpers(is_jp: bool) -> Optional[dict[str, Any]]:
"""Get the rarities of all gamatoto helpers"""
if is_jp:
country_code = "ja"
else:
country_code = "en"
file_data = game_data_getter.get_file_latest(
"resLocal", f"GamatotoExpedition_Members_name_{country_code}.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get gamatoto helper data")
return None
data = file_data.decode("utf-8").splitlines()[1:]
helpers: dict[str, Any] = {}
for line in data:
line_data = line.split(helper.get_text_splitter(is_jp))
if len(line_data) < 5:
break
helper_id = line_data[0]
rarity = int(line_data[1])
type_str = line_data[4]
helpers[helper_id] = {"Rarity_id": rarity, "Rarity_name": type_str}
return helpers
def generate_helpers(user_input: list[int], helper_data: dict[str, Any]) -> list[int]:
"""Generate unique helpers from amounts of each"""
final_helpers: list[int] = []
values = list(helper_data.values())
for i, usr_input in enumerate(user_input):
for j, value in enumerate(values):
if value["Rarity_id"] == i:
final_helpers += list(range(j + 1, j + 1 + usr_input))
break
return final_helpers
def get_helper_rarities(helper_data: dict[str, Any]) -> list[str]:
"""Get the rarities of all gamatoto helpers"""
rarities: list[str] = []
for helpers in helper_data.values():
if helpers["Rarity_name"] not in rarities:
rarities.append(helpers["Rarity_name"])
return rarities
def get_helpers(helpers: list[int], helper_data: dict[str, Any]) -> dict[str, Any]:
"""Get the amount of each type of helper"""
current_helpers: dict[int, Any] = {}
rarities = get_helper_rarities(helper_data)
helper_count: dict[str, int] = {}
for rarity in rarities:
helper_count[rarity] = 0
for helper_id in helpers:
if helper_id == 0xFFFFFFFF:
break
current_helpers[helper_id] = helper_data[str(helper_id)]
helper_count[current_helpers[helper_id]["Rarity_name"]] += 1
return helper_count
def add_empty_helper_slots(helpers: list[int], final_helpers: list[int]):
"""Add empty helper slots to the end of the list"""
empty_slots = len(helpers) - len(final_helpers)
if empty_slots > 0:
final_helpers += [0xFFFFFFFF] * empty_slots
return final_helpers
def edit_helpers(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for gamatoto helpers"""
helpers = save_stats["helpers"]
helper_data = get_gamatoto_helpers(helper.check_data_is_jp(save_stats))
if helper_data is None:
return save_stats
helper_count = get_helpers(helpers, helper_data)
helpers_counts_input = item.IntItemGroup.from_lists(
names=list(helper_count.keys()),
values=list(helper_count.values()),
group_name="Gamatoto Helpers",
maxes=10,
)
helpers_counts_input.edit()
final_helpers = generate_helpers(helpers_counts_input.get_values(), helper_data)
helpers = add_empty_helper_slots(helpers, final_helpers)
save_stats["helpers"] = helpers
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/gamototo/helpers.py
| 0.686895 | 0.292248 |
helpers.py
|
pypi
|
from typing import Any, Optional
from ... import helper, user_input_handler, item, game_data_getter
def get_boundaries(is_jp: bool) -> Optional[list[int]]:
"""Get the xp requirements for each level"""
file_data = game_data_getter.get_file_latest(
"DataLocal", "GamatotoExpedition.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get gamatoto xp requirements")
return None
boundaries = file_data.decode("utf-8").splitlines()
previous = 0
xp_requirements: list[int] = []
previous = 0
for line in boundaries:
requirement = int(line.split(",")[0])
if previous >= requirement:
break
xp_requirements.append(requirement)
previous = requirement
return xp_requirements
def get_level_from_xp(gamatoto_xp: int, is_jp: bool) -> Optional[dict[str, Any]]:
"""Get the level from the xp amount"""
xp_requirements = get_boundaries(is_jp)
if xp_requirements is None:
return None
level = 1
for requirement in xp_requirements:
if gamatoto_xp >= requirement:
level += 1
return {
"level": level,
"max_level": len(xp_requirements),
"max_xp": xp_requirements[-2],
}
def get_xp_from_level(level: int, is_jp: bool) -> Optional[int]:
"""Get the xp amount from the level"""
xp_requirements = get_boundaries(is_jp)
if xp_requirements is None:
return None
if level <= 1:
gamatoto_xp = 0
else:
gamatoto_xp = xp_requirements[level - 2]
return gamatoto_xp
def edit_gamatoto_xp(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for gamatoto xp"""
gamatoto_xp = save_stats["gamatoto_xp"]
data = get_level_from_xp(gamatoto_xp["Value"], helper.check_data_is_jp(save_stats))
if data is None:
return save_stats
level = data["level"]
helper.colored_text(f"Gamatoto xp: &{gamatoto_xp['Value']}&\nLevel: &{level}&")
raw = (
user_input_handler.colored_input(
"Do you want to edit raw xp(&1&) or the level(&2&)?:"
)
== "1"
)
if raw:
gam_xp = item.IntItem(
name="Gamatoto XP",
value=item.Int(gamatoto_xp["Value"]),
max_value=None,
)
gam_xp.edit()
gamatoto_xp["Value"] = gam_xp.get_value()
else:
gam_level = item.IntItem(
name="Gamatoto Level",
value=item.Int(level),
max_value=data["max_level"],
)
gam_level.edit()
gamatoto_xp["Value"] = get_xp_from_level(
gam_level.get_value(), helper.check_data_is_jp(save_stats)
)
save_stats["gamatoto_xp"] = gamatoto_xp
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/gamototo/gamatoto_xp.py
| 0.747063 | 0.310342 |
gamatoto_xp.py
|
pypi
|
from typing import Any
from ... import user_input_handler, helper
from ...edits.other import meow_medals
def set_stage_data(
stage_data_edit: dict[str, Any],
stage_id: int,
stars: int,
lengths: dict[str, int],
unlock_next: bool,
) -> dict[str, Any]:
"""Set the stage data for a stage"""
if stage_id >= len(stage_data_edit["Value"]["clear_progress"]):
return stage_data_edit
stage_data_edit = set_clear_progress(stage_data_edit, stage_id, stars, lengths)
if unlock_next and stage_id + 1 < len(stage_data_edit["Value"]["clear_progress"]):
stage_data_edit = set_unlock_next(stage_data_edit, stage_id, stars, lengths)
stage_data_edit = set_clear_amount(stage_data_edit, stage_id, stars, lengths)
return stage_data_edit
def set_clear_progress(
stage_data: dict[str, Any], stage_id: int, stars: int, lengths: dict[str, int]
) -> dict[str, Any]:
"""Set the clear progress for a stage"""
stage_data["Value"]["clear_progress"][stage_id] = ([lengths["stages"]] * stars) + (
[0] * (lengths["stars"] - stars)
)
return stage_data
def set_unlock_next(
stage_data: dict[str, Any], stage_id: int, stars: int, lengths: dict[str, int]
) -> dict[str, Any]:
"""Set the unlock next for a stage"""
stage_data["Value"]["unlock_next"][stage_id + 1] = (
[lengths["stars"] - 1] * stars
) + ([0] * (lengths["stars"] - stars))
return stage_data
def set_clear_amount(
stage_data: dict[str, Any], stage_id: int, stars: int, lengths: dict[str, int]
) -> dict[str, Any]:
"""Set the clear amount for a stage"""
stage_data["Value"]["clear_amount"][stage_id] = (
[[1] * lengths["stages"]] * stars
) + ([[0] * lengths["stages"]] * (lengths["stars"] - stars))
return stage_data
def set_medals(
stage_stats: dict[str, Any],
medal_stats: dict[str, Any],
valid_range: tuple[int, int],
offset: int,
is_jp: bool,
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Set the medals for completed stages"""
medal_data = meow_medals.get_medal_data(is_jp)
if medal_data is None:
return stage_stats, medal_stats
unlock_next = stage_stats["Value"]["unlock_next"]
for medal in medal_data.stages:
if not medal.maps:
continue
completed = True
for map_id in medal.maps:
star = medal.star
if map_id < 0:
continue
if map_id < valid_range[0] or map_id > valid_range[1]:
completed = False
break
map_id += offset
next_chapter = unlock_next[map_id + 1]
if star is None:
star = 0
if next_chapter[star] == 0:
completed = False
break
if completed:
if medal.medal_id not in medal_stats["medal_data_1"]:
medal_stats["medal_data_1"].append(medal.medal_id)
medal_stats["medal_data_2"][medal.medal_id] = 1
return stage_stats, medal_stats
def stage_handler(
stage_data: dict[str, Any], ids: list[int], offset: int, unlock_next: bool = True
) -> dict[str, Any]:
"""Clear stages from a set of ids"""
lengths = stage_data["Lengths"]
individual = True
if len(ids) > 1:
individual = user_input_handler.ask_if_individual(
"stars / crowns for each stage"
)
first = True
stars = 0
stage_data_edit = stage_data
for stage_id in ids:
if not individual and first:
stars = helper.check_int(
user_input_handler.colored_input(
f"Enter the number of stars/crowns (max &{lengths['stars']}&):"
)
)
if stars is None:
print("Please enter a valid number")
break
stars = helper.clamp(stars, 0, lengths["stars"])
first = False
elif individual:
stars = helper.check_int(
user_input_handler.colored_input(
f"Enter the number of stars/crowns for subchapter &{stage_id}& (max &{lengths['stars']}&):"
)
)
if stars is None:
print("Please enter a valid number")
break
stars = helper.clamp(stars, 0, lengths["stars"])
stage_id += offset
stage_data_edit = stage_data
stage_data_edit = set_stage_data(
stage_data_edit, stage_id, stars, lengths, unlock_next
)
print("Successfully set subchapters")
return stage_data_edit
def stories_of_legend(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for clearing stories of legend"""
stage_data = save_stats["event_stages"]
ids = user_input_handler.get_range(
user_input_handler.colored_input(
"Enter subchapter ids (e.g &1& = legend begins, &2& = passion land)(You can enter &all& to get all, a range e.g &1&-&49&, or ids separate by spaces e.g &5 4 7&):"
),
50,
)
offset = -1
save_stats["event_stages"] = stage_handler(stage_data, ids, offset)
save_stats["event_stages"], save_stats["medals"] = set_medals(
save_stats["event_stages"],
save_stats["medals"],
(0, 50),
0,
helper.check_data_is_jp(save_stats),
)
return save_stats
def event_stages(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for clearing event stages"""
stage_data = save_stats["event_stages"]
lengths = stage_data["Lengths"]
ids = user_input_handler.get_range(
user_input_handler.colored_input(
"Enter subchapter ids (Look up &Event Release Order battle cats& to find ids)(You can enter &all& to get all, a range e.g &1&-&50&, or ids separate by spaces e.g &5 4 7&):"
),
lengths["total"] - 400,
)
offset = 400
save_stats["event_stages"] = stage_handler(stage_data, ids, offset)
save_stats["event_stages"], save_stats["medals"] = set_medals(
save_stats["event_stages"],
save_stats["medals"],
(0, len(save_stats["event_stages"]["Value"]["unlock_next"])),
-600,
helper.check_data_is_jp(save_stats),
)
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/levels/event_stages.py
| 0.584508 | 0.249807 |
event_stages.py
|
pypi
|
from typing import Optional
from ... import user_input_handler, helper
from . import main_story
def select_specific_chapters() -> list[int]:
"""Select specific levels"""
print("What chapters do you want to select?")
ids = user_input_handler.select_not_inc(main_story.CHAPTERS, "clear")
return ids
def get_option():
"""Get option"""
options = [
"Select specific levels with stage ids",
"Select all levels up to a certain stage",
"Select all levels",
]
return user_input_handler.select_single(options)
def select_levels(
chapter_id: Optional[int], forced_option: Optional[int] = None, total: int = 48
) -> list[int]:
"""Select levels"""
if forced_option is None:
choice = get_option()
else:
choice = forced_option
if choice == 1:
return select_specific_levels(chapter_id, total)
if choice == 2:
return select_levels_up_to(chapter_id, total)
if choice == 3:
return select_all(total)
return []
def select_specific_levels(chapter_id: Optional[int], total: int) -> list[int]:
"""Select specific levels"""
print("What levels do you want to select?")
if chapter_id is not None:
helper.colored_text(
f"Chapter: &{chapter_id+1}& : &{main_story.CHAPTERS[chapter_id]}&"
)
ids = user_input_handler.get_range_ids(
"Level ids (e.g &1&=korea, &2&=mongolia)", total
)
ids = helper.check_clamp(ids, total, 1, -1)
return ids
def select_levels_up_to(chapter_id: Optional[int], total: int) -> list[int]:
"""Select levels up to a certain level"""
print("What levels do you want to select?")
if chapter_id is not None:
helper.colored_text(
f"Chapter: &{chapter_id+1}& : &{main_story.CHAPTERS[chapter_id]}&"
)
stage_id = user_input_handler.get_int(
f"Enter the stage id that you want to clear/unclear up to (and including) (e.g &1&=korea cleared, &2&=korea &and& mongolia cleared, &{total}&=all)?:"
)
stage_id = helper.clamp(stage_id, 1, total)
return list(range(0, stage_id))
def select_all(total: int) -> list[int]:
"""Select all levels"""
return list(range(0, total))
def select_level_progress(
chapter_id: Optional[int], total: int, examples: Optional[list[str]] = None
) -> int:
"""Select level progress"""
if examples is None:
examples = [
"korea",
"mongolia",
]
print("What level do you want to clear up to and including?")
if chapter_id is not None:
helper.colored_text(
f"Chapter: &{chapter_id+1}& : &{main_story.CHAPTERS[chapter_id]}&"
)
progress = user_input_handler.get_int(
f"Enter the stage id that you want to clear/unclear (e.g &1&={examples[0]} cleared, &2&={examples[0]} &and& {examples[1]} cleared, &{total}&=all, &0&=unclear all)?:"
)
progress = helper.clamp(progress, 0, total)
return progress
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/levels/story_level_id_selector.py
| 0.785144 | 0.259842 |
story_level_id_selector.py
|
pypi
|
from typing import Any, Optional
from ... import user_input_handler, helper
from . import main_story
def get_available_chapters(outbreaks: dict[int, Any]) -> list[str]:
"""Get available chapters"""
available_chapters: list[str] = []
for chapter_index in outbreaks:
if chapter_index > 2:
chapter_index -= 1
if chapter_index > 6:
continue
available_chapters.append(main_story.CHAPTERS[chapter_index])
return available_chapters
def set_outbreak(
chapter_data: dict[int, int], val_to_set: int, total: Optional[int] = None
) -> dict[int, int]:
"""Set a chapter of an outbreak"""
if total is None:
total = len(chapter_data)
for level_id in range(total):
chapter_data[level_id] = val_to_set
return chapter_data
def set_outbreaks(
outbreaks: dict[int, Any],
current_outbreaks: dict[int, Any],
ids: list[int],
clear: bool = True,
) -> tuple[dict[int, Any], dict[int, Any]]:
"""Set outbreaks"""
for chapter_id in ids:
outbreaks[chapter_id] = set_outbreak(
outbreaks[chapter_id], 1 if clear else 0, 48
)
if chapter_id in current_outbreaks:
if clear:
current_outbreaks[chapter_id] = {}
return outbreaks, current_outbreaks
def edit_outbreaks(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editting outbreaks"""
outbreaks = save_stats["outbreaks"]
current_outbreaks = save_stats["current_outbreaks"]
clear = (
user_input_handler.colored_input(
"Do you want to clear or un-clear outbreaks? (&c&/&u&): "
)
== "c"
)
available_chapters = get_available_chapters(outbreaks)
print("What chapter do you want to edit:")
ids = user_input_handler.select_not_inc(
options=available_chapters,
mode="clear the outbreaks for?",
)
ids = helper.check_clamp(ids, len(available_chapters) + 1, 0, 0)
ids = main_story.format_story_ids(ids)
outbreaks, current_outbreaks = set_outbreaks(
outbreaks, current_outbreaks, ids, clear
)
save_stats["outbreaks"] = outbreaks
save_stats["current_outbreaks"] = current_outbreaks
print("Successfully set outbreaks")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/levels/outbreaks.py
| 0.707101 | 0.255367 |
outbreaks.py
|
pypi
|
from typing import Any
from ... import helper
from . import story_level_id_selector
CHAPTERS = [
"Empire of Cats 1",
"Empire of Cats 2",
"Empire of Cats 3",
"Into the Future 1",
"Into the Future 2",
"Into the Future 3",
"Cats of the Cosmos 1",
"Cats of the Cosmos 2",
"Cats of the Cosmos 3",
]
def clear_specific_level_ids(
save_stats: dict[str, Any], chapter_id: int, progress: int
) -> dict[str, Any]:
"""Clear specific levels in a chapter"""
story_chapters = save_stats["story_chapters"]
progress = helper.clamp(progress, 0, 48)
if progress == 0:
story_chapters["Chapter Progress"][chapter_id] = 0
story_chapters["Times Cleared"][chapter_id] = [0] * 51
else:
stage_index = progress - 1
story_chapters["Chapter Progress"][chapter_id] = progress
# set all levels before the one being cleared to 1
story_chapters["Times Cleared"][chapter_id][stage_index] = 1
for i in range(stage_index):
story_chapters["Times Cleared"][chapter_id][i] = 1
# set all levels after the one being cleared to 0
for i in range(stage_index + 1, get_total_stages(save_stats, chapter_id) + 3):
story_chapters["Times Cleared"][chapter_id][i] = 0
save_stats["story_chapters"] = story_chapters
return save_stats
def has_cleared_chapter(save_stats: dict[str, Any], chapter_id: int) -> bool:
"""
Check if a chapter has been cleared
Args:
save_stats (dict[str, Any]): Save stats
chapter_id (int): Chapter ID
Returns:
bool: True if cleared, False if not
"""
chapter_id = format_story_id(chapter_id)
return save_stats["story_chapters"]["Chapter Progress"][chapter_id] >= 48
def format_story_ids(ids: list[int]) -> list[int]:
"""For some reason there is a gap after EoC 3. This adds that"""
formatted_ids: list[int] = []
for story_id in ids:
formatted_ids.append(format_story_id(story_id))
return formatted_ids
def format_story_id(chapter_id: int) -> int:
"""For some reason there is a gap after EoC 3. This adds that"""
if chapter_id > 2:
chapter_id += 1
return chapter_id
def clear_levels(
story_chapters: dict[str, Any],
treasures: list[list[int]],
ids: list[int],
val: int,
chapter_progress: int,
clear: bool,
) -> tuple[dict[str, Any], list[list[int]]]:
"""Clear levels in a chapter"""
for chapter_id in ids:
story_chapters["Chapter Progress"][chapter_id] = chapter_progress
story_chapters["Times Cleared"][chapter_id] = (
([val] * chapter_progress) + ([0] * (48 - chapter_progress)) + ([0] * 3)
)
if not clear:
treasures[chapter_id] = [0] * 49
return story_chapters, treasures
def get_total_stages(save_stats: dict[str, Any], chapter_id: int) -> int:
"""Get the total number of stages in a chapter"""
return len(save_stats["story_chapters"]["Times Cleared"][chapter_id]) - 3
def clear_each(save_stats: dict[str, Any]):
"""Clear stages for each chapter"""
chapter_ids = story_level_id_selector.select_specific_chapters()
for chapter_id in chapter_ids:
helper.colored_text(f"Chapter: &{chapter_id+1}& : &{CHAPTERS[chapter_id]}&")
formatted_id = format_story_id(chapter_id)
progress = story_level_id_selector.select_level_progress(
chapter_id, get_total_stages(save_stats, formatted_id)
)
save_stats = clear_specific_level_ids(save_stats, formatted_id, progress)
helper.colored_text("Successfully set main story chapters")
return save_stats
def clear_all(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Clear whole chapters"""
chapter_ids = story_level_id_selector.select_specific_chapters()
text = ""
for chapter_id in chapter_ids:
text += f"Chapter: &{chapter_id+1}& : &{CHAPTERS[chapter_id]}&\n"
helper.colored_text(text.strip("\n"))
progress = story_level_id_selector.select_level_progress(
None, get_total_stages(save_stats, 0)
)
for chapter_id in chapter_ids:
chapter_id = format_story_id(chapter_id)
save_stats = clear_specific_level_ids(save_stats, chapter_id, progress)
helper.colored_text("Successfully set main story chapters")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/levels/main_story.py
| 0.708717 | 0.383295 |
main_story.py
|
pypi
|
from typing import Any, Optional
from ... import helper, user_input_handler, item, csv_handler, game_data_getter
from . import story_level_id_selector, main_story
def get_stages(is_jp: bool) -> Optional[list[list[list[int]]]]:
"""Get what stages belong to which treasure group"""
treasures_values: list[list[list[int]]] = []
file_data = game_data_getter.get_file_latest(
"DataLocal", "treasureData0.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get treasureData0.csv")
return None
eoc_treasures = helper.parse_int_list_list(
csv_handler.parse_csv(
file_data.decode("utf-8"),
)
)[11:22]
file_data = game_data_getter.get_file_latest(
"DataLocal", "treasureData1.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get treasureData1.csv")
return None
itf_treasures = helper.parse_int_list_list(
csv_handler.parse_csv(
file_data.decode("utf-8"),
)
)[11:22]
file_data = game_data_getter.get_file_latest(
"DataLocal", "treasureData2_0.csv", is_jp
)
if file_data is None:
helper.error_text("Failed to get treasureData2_0.csv")
return None
cotc_treasures = helper.parse_int_list_list(
csv_handler.parse_csv(
file_data.decode("utf-8"),
)
)[11:22]
treasures_values.append(remove_negative_1(eoc_treasures))
treasures_values.append(remove_negative_1(itf_treasures))
treasures_values.append(remove_negative_1(cotc_treasures))
return treasures_values
def remove_negative_1(data: list[list[int]]) -> list[list[int]]:
"""Remove items from a list that have a negative value of 1"""
new_data = data.copy()
for i, val in enumerate(data):
if -1 in val:
new_data[i] = new_data[i][:-1]
return new_data
def get_names(is_jp: bool) -> Optional[list[list[list[str]]]]:
"""Get the names of all of the treasure groups"""
names: list[list[list[str]]] = []
if is_jp:
country_code = "ja"
else:
country_code = "en"
file_data = game_data_getter.get_file_latest(
"resLocal", f"Treasure3_0_{country_code}.csv", is_jp
)
if file_data is None:
helper.error_text(f"Failed to get Treasure3_0_{country_code}.csv")
return None
eoc_names = csv_handler.parse_csv(
file_data.decode("utf-8"),
delimeter=helper.get_text_splitter(is_jp),
)[:11]
file_data = game_data_getter.get_file_latest(
"resLocal", f"Treasure3_1_AfterFirstEncounter_{country_code}.csv", is_jp
)
if file_data is None:
helper.error_text(
f"Failed to get Treasure3_1_AfterFirstEncounter_{country_code}.csv"
)
return None
itf_names = csv_handler.parse_csv(
file_data.decode("utf-8"),
delimeter=helper.get_text_splitter(is_jp),
)[:11]
file_data = game_data_getter.get_file_latest(
"resLocal", f"Treasure3_2_0_{country_code}.csv", is_jp
)
if file_data is None:
helper.error_text(f"Failed to get Treasure3_2_0_{country_code}.csv")
return None
cotc_names = csv_handler.parse_csv(
file_data.decode("utf-8"),
delimeter=helper.get_text_splitter(is_jp),
)[:11]
names.append(helper.copy_first_n(eoc_names, 0))
names.append(helper.copy_first_n(itf_names, 0))
names.append(helper.copy_first_n(cotc_names, 0))
return names
def get_treasure_groups(is_jp: bool) -> Optional[dict[str, Any]]:
"""Get the names and stages of all of the treasure groups"""
treasure_stages = get_stages(is_jp)
treasure_names = get_names(is_jp)
if treasure_stages is None or treasure_names is None:
return None
return {"names": treasure_names, "stages": treasure_stages}
def set_treasures(
treasure_stats: list[list[int]], user_levels: list[int]
) -> list[list[int]]:
"""Set the treasure stats of a set of levels"""
for i, level in enumerate(user_levels):
if level == -1:
continue
if i > 2:
i += 1
treasure_stats[i] = [level] * 48 + [0]
return treasure_stats
def set_specific_treasures(
treasure_stats: list[list[int]], treasure_data: list[int], chapter_id: int
) -> list[list[int]]:
"""Set the treasure stats of specific treasures"""
for i, stage in enumerate(treasure_data):
if stage == -1:
continue
if i > 45:
stage_id = i
else:
stage_id = 45 - i
treasure_stats[chapter_id][stage_id] = stage
return treasure_stats
def set_treasure_groups(
treasures_stats: list[list[int]],
treasure_grps: dict[str, Any],
treasure_levels: list[Optional[int]],
type_id: int,
chapter_id: int,
) -> list[list[int]]:
"""Set the treasure stats of a group of treasures"""
for i, treasure_level in enumerate(treasure_levels):
stages = treasure_grps["stages"][type_id][i]
for stage in stages:
if treasure_level is not None:
treasures_stats[chapter_id][stage] = treasure_level
return treasures_stats
def treasure_groups(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing treasure groups"""
treasure_grps = get_treasure_groups(helper.check_data_is_jp(save_stats))
if treasure_grps is None:
return save_stats
treasures_stats = save_stats["treasures"]
ids = user_input_handler.select_not_inc(main_story.CHAPTERS, "select")
for chapter_id in ids:
helper.colored_text(f"Chapter: &{main_story.CHAPTERS[chapter_id]}&")
type_id = chapter_id // 3
if chapter_id > 2:
chapter_id += 1
names = treasure_grps["names"][type_id]
treasure_levels = [-1] * len(names)
helper.colored_text("&0& = None, &1& = Inferior, &2& = Normal, &3& = Superior")
treasure_levels = item.IntItemGroup.from_lists(
names=names,
values=None,
maxes=None,
group_name="Treasures",
)
treasure_levels.edit()
treasures_stats = set_treasure_groups(
treasures_stats,
treasure_grps,
treasure_levels.get_values_none(),
type_id,
chapter_id,
)
save_stats["treasures"] = treasures_stats
return save_stats
def specific_stages(save_stats: dict[str, Any]):
"""Handler for editing specific stages"""
treasure_stats = save_stats["treasures"]
chapter_ids = story_level_id_selector.select_specific_chapters()
choice = story_level_id_selector.get_option()
for chapter_id in chapter_ids:
chapter_id = main_story.format_story_id(chapter_id)
stage_ids = story_level_id_selector.select_levels(chapter_id, choice)
treasure_data = [-1] * 48
treasure_data = user_input_handler.handle_all_at_once(
stage_ids,
False,
treasure_data,
list(range(1, 49)),
"treasure level",
"stage",
"(&0&=none, &1&=inferior, &2&=normal, &3&=superior)",
)
treasure_stats = set_specific_treasures(
treasure_stats, treasure_data, chapter_id
)
save_stats["treasures"] = treasure_stats
print("Successfully set treasures")
return save_stats
def specific_stages_all_chapters(save_stats: dict[str, Any]) -> dict[str, Any]:
"""Handler for editing treasure levels"""
chapter_ids = story_level_id_selector.select_specific_chapters()
treasure_stats = save_stats["treasures"]
stage_ids = story_level_id_selector.select_levels(None)
treasure_data = [-1] * 48
for i, chapter_id in enumerate(chapter_ids):
chapter_id = main_story.format_story_id(chapter_id)
if i == 0:
treasure_data = user_input_handler.handle_all_at_once(
stage_ids,
True,
treasure_data,
list(range(0, 48)),
"treasure level",
"stage",
"(&0&=none, &1&=inferior, &2&=normal, &3&=superior)",
)
treasure_stats = set_specific_treasures(
treasure_stats, treasure_data, chapter_id
)
save_stats["treasures"] = treasure_stats
print("Successfully set treasures")
return save_stats
|
/sc-editor-1.93.tar.gz/sc-editor-1.93/src/SC_Editor/edits/levels/treasures.py
| 0.656768 | 0.171234 |
treasures.py
|
pypi
|
from githooks.base_check import BaseCheck, Severity
from githooks.config import config
from githooks.git import CommittedFile
class CommittedFileCheck(BaseCheck):
"""Parent class for checks on a single committed file
To check the files, we have to clone ourselves when we are being prepared
for the CommittedFile. The subclasses has additional logic on those
to filter out themselves for some cases.
"""
committed_file = None
def prepare(self, obj):
new = super(CommittedFileCheck, self).prepare(obj)
if not new or not isinstance(obj, CommittedFile):
return new
new = new.clone()
new.committed_file = obj
return new
def __str__(self):
return '{} on {}'.format(type(self).__name__, self.committed_file)
class CommittedFileSizeCheck(CommittedFileCheck):
"""Special check for committed file size"""
def get_problems(self):
if self.committed_file.get_file_size() >= config.get("commit_check.commit_file_max_size"):
yield (
Severity.ERROR,
'提交 {} 的文件 {} 大小超过 {}, 即 {} MB'
.format(self.committed_file.commit, self.committed_file.path,
config.get("commit_check.commit_file_max_size"),
config.get("commit_check.commit_file_max_size") / 1024 / 1024)
)
class CommittedFileExtensionCheck(CommittedFileCheck):
"""Special check for committed file extension"""
def get_problems(self):
illegal_suffixes = config.get("commit_check.binary_file_illegal_suffixes")
illegal_suffixes_list = illegal_suffixes.split(",")
legal_binary_filenames = config.get("commit_check.legal_binary_filenames")
legal_binary_filenames_list = legal_binary_filenames.split(",")
filename = self.committed_file.get_filename()
if filename in legal_binary_filenames_list:
return
extension = self.committed_file.get_extension()
if extension in illegal_suffixes_list:
yield (
Severity.ERROR,
'提交 {} 的文件 {} 在不允许的提交文件后缀名清单中 {}'
.format(self.committed_file.commit, self.committed_file.path,
illegal_suffixes_list)
)
|
/sc-githooks-0.2.0.tar.gz/sc-githooks-0.2.0/githooks/file_checks.py
| 0.466603 | 0.154121 |
file_checks.py
|
pypi
|
from os.path import isabs, join as joinpath, normpath
from subprocess import check_output
from githooks.utils import get_exe_path, get_extension, decode_str
git_exe_path = get_exe_path('git')
class CommitList(list):
"""Routines on a list of sequential commits"""
ref_path = None
def __init__(self, other, branch_name):
super(CommitList, self).__init__(other)
self.branch_name = branch_name
def __str__(self):
name = '{}..{}'.format(self[0], self[-1])
if self.ref_path:
name += ' ({})'.format(self.branch_name)
return name
class Commit(object):
"""Routines on a single commit"""
null_commit_id = '0000000000000000000000000000000000000000'
def __init__(self, commit_id, commit_list=None):
self.commit_id = commit_id
self.commit_list = commit_list
self.content_fetched = False
self.changed_files = None
self.binary_files = None
def __str__(self):
return self.commit_id[:8]
def __bool__(self):
return self.commit_id != Commit.null_commit_id
def __nonzero__(self):
return self.__bool__()
def __eq__(self, other):
return isinstance(other, Commit) and self.commit_id == other.commit_id
def get_new_commit_list(self, branch_name):
"""Get the list of parent new commits in order"""
output = decode_str(check_output([
git_exe_path,
'rev-list',
self.commit_id,
'--not',
'--all',
'--reverse',
]))
commit_list = CommitList([], branch_name)
for commit_id in output.splitlines():
commit = Commit(commit_id, commit_list)
commit_list.append(commit)
return commit_list
def _fetch_content(self):
content = check_output(
[git_exe_path, 'cat-file', '-p', self.commit_id]
)
self._parents = []
self._message_lines = []
# The commit message starts after the empty line. We iterate until
# we find one, and then consume the rest as the message.
lines = iter(content.splitlines())
for line in lines:
if not line:
break
if line.startswith(b'parent '):
self._parents.append(Commit(line[len(b'parent '):].rstrip()))
elif line.startswith(b'author '):
self._author = Contributor.parse(line[len(b'author '):])
elif line.startswith(b'committer '):
self._committer = Contributor.parse(line[len(b'committer '):])
for line in lines:
self._message_lines.append(decode_str(line))
self.content_fetched = True
def get_parents(self):
if not self.content_fetched:
self._fetch_content()
return self._parents
def get_author(self):
if not self.content_fetched:
self._fetch_content()
return self._author
def get_committer(self):
if not self.content_fetched:
self._fetch_content()
return self._committer
def get_contributors(self):
yield self.get_author()
yield self._committer
def get_message_lines(self):
if not self.content_fetched:
self._fetch_content()
return self._message_lines
def get_summary(self):
return self.get_message_lines()[0]
def parse_tags(self):
tags = []
rest = self.get_summary()
while rest.startswith('[') and ']' in rest:
end_index = rest.index(']')
tags.append(rest[1:end_index])
rest = rest[end_index + 1:]
return tags, rest
def content_can_fail(self):
return not any(
t in ['HOTFIX', 'MESS', 'TEMP', 'WIP']
for t in self.parse_tags()[0]
)
def get_changed_files(self):
"""Return the list of added or modified files on a commit"""
if self.changed_files is None:
output = decode_str(check_output([
git_exe_path,
'diff-tree',
'-r',
'--root', # Get the initial commit as additions
'--no-commit-id', # We already know the commit id.
'--break-rewrites', # Get rewrites as additions
'--no-renames', # Get renames as additions
'--diff-filter=AM', # Only additions and modifications
self.commit_id,
]))
changed_files = []
for line in output.splitlines():
line_split = line.split(None, 5)
assert len(line_split) == 6
assert line_split[0].startswith(':')
file_mode = line_split[1]
# sc add object_id
object_id = line_split[3]
file_path = line_split[5]
changed_files.append(CommittedFile(file_path, self, file_mode, object_id))
self.changed_files = changed_files
return self.changed_files
def get_binary_files(self):
"""Return the binary files on a commit"""
if self.binary_files is None:
output = decode_str(check_output([
git_exe_path,
'log',
'--pretty=format:%H -M100%', # pretty format
'--numstat', # number state of the file
'--no-commit-id', # We already know the commit id.
'--break-rewrites', # Get rewrites as additions
'--no-renames', # Get renames as additions
'--diff-filter=AM', # Only additions and modifications
"{}^!".format(self.commit_id),
]))
binary_files = []
for line in output.splitlines():
line_split = line.split('\t')
if len(line_split) == 3:
if "-" == line_split[0] and "-" == line_split[1]:
binary_files.append(line_split[2])
self.binary_files = binary_files
return self.binary_files
class Contributor(object):
"""Routines on contribution properties of a commit"""
def __init__(self, name, email, timestamp):
self.name = name
self.email = email
self.timestamp = timestamp
@classmethod
def parse(cls, line):
"""Parse the contribution line as bytes"""
name, line = line.split(b' <', 1)
email, line = line.split(b'> ', 1)
timestamp, line = line.split(b' ', 1)
return cls(decode_str(name), decode_str(email), int(timestamp))
def get_email_domain(self):
return self.email.split('@', 1)[-1]
class CommittedFile(object):
"""Routines on a single committed file"""
def __init__(self, path, commit=None, mode=None, object_id=None):
self.path = path
self.commit = commit
assert mode is None or len(mode) == 6
self.mode = mode
self.content = None
# sc add object id
self.object_id = object_id
def __str__(self):
return '文件 {} 位于提交 {}'.format(self.path, self.commit)
def __eq__(self, other):
return (
isinstance(other, CommittedFile) and
self.path == other.path and
self.commit == other.commit
)
def exists(self):
return bool(check_output([
git_exe_path,
'ls-tree',
'--name-only',
'-r',
self.commit.commit_id,
self.path,
]))
def changed(self):
return self in self.commit.get_changed_files()
def regular(self):
return self.mode[:2] == '10'
def symlink(self):
return self.mode[:2] == '12'
def owner_can_execute(self):
owner_bits = int(self.mode[-3])
return bool(owner_bits & 1)
def get_object_id(self):
return self.object_id
def get_filename(self):
return self.path.rsplit('/', 1)[-1]
def get_file_size(self):
output = check_output([
git_exe_path,
'cat-file',
'-s', # show file size
self.object_id,
])
return int(output)
def get_extension(self):
return get_extension(self.path)
def get_content(self):
"""Get the file content as binary"""
if self.content is None:
self.content = check_output([
git_exe_path, 'show', self.commit.commit_id + ':' + self.path
])
return self.content
def get_shebang(self):
"""Get the shebang from the file content"""
if not self.regular():
return None
content = self.get_content()
if not content.startswith(b'#!'):
return None
content = content[len(b'#!'):].strip()
return decode_str(content.split(None, 1)[0])
def get_shebang_exe(self):
"""Get the executable from the shebang"""
shebang = self.get_shebang()
if not shebang:
return None
if shebang == '/usr/bin/env':
rest = self.get_content().splitlines()[0][len(b'#!/usr/bin/env'):]
rest_split = rest.split(None, 1)
if rest_split:
return decode_str(rest_split[0])
return shebang.rsplit('/', 1)[-1]
def get_symlink_target(self):
"""Get the symlink target as same kind of instance
We just return None, if the target has no chance to be on
the repository."""
content = self.get_content()
if isabs(content):
return None
path = normpath(joinpath(self.path, '..', decode_str(content)))
if path.startswith('..'):
return None
return type(self)(path, self.commit)
|
/sc-githooks-0.2.0.tar.gz/sc-githooks-0.2.0/githooks/git.py
| 0.581541 | 0.154217 |
git.py
|
pypi
|
import logging
class Handler(object):
"""The root handler"""
def handle(self, data):
return True
class BaseHandler(Handler):
"""Base handler for decorating use"""
_handler: Handler = None
def __init__(self, handler: Handler = None) -> None:
self._handler = handler
@property
def handler(self):
return self._handler
def handle(self, data):
if self._handler:
return self._handler.handle(data)
return True
class NoOpHandler(Handler):
"""No Operation handler
"""
def handle(self, data):
return True
class UnknownOpHandler(Handler):
"""Unknown Operation handler
"""
def handle(self, data):
event_type = data.get('event_type')
event_name = data.get('event_name')
logging.getLogger(__name__).error("Unknown event, type: %s, name: %s", event_type, event_name)
return False
class UserCreateHandler(BaseHandler):
"""A handler for creating user
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class UserDestroyHandler(BaseHandler):
"""A handler for destroying user
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class GroupCreateHandler(BaseHandler):
"""A handler for creating group
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class GroupRenameHandler(BaseHandler):
"""A handler for renaming group
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class GroupDestroyHandler(BaseHandler):
"""A handler for destroying group
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class UserAddToGroupHandler(BaseHandler):
"""A handler for adding user to group
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class UserRemoveFromGroupHandler(BaseHandler):
"""A handler for removing user from group
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class ProjectCreateHandler(BaseHandler):
"""A handler for creating project
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class ProjectRenameHandler(BaseHandler):
"""A handler for renaming project
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class ProjectUpdateHandler(BaseHandler):
"""A handler for updating project
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class ProjectDestroyHandler(BaseHandler):
"""A handler for destroying project
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class PushHandler(BaseHandler):
"""A handler for pushing commits
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class TagPushHandler(BaseHandler):
"""A handler for pushing tag
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class RepositoryUpdateHandler(BaseHandler):
"""A handler for updating repository
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_name = data.get('event_name')
logging.getLogger(__name__).info("Handling event: %s", event_name)
return True
class MergeRequestHandler(BaseHandler):
"""A handler for merging request
"""
def handle(self, data):
result = BaseHandler.handle(self, data)
if not result:
return False
event_type = data.get('event_type')
logging.getLogger(__name__).info("Handling event: %s", event_type)
return True
class HandlerFactory:
_HANDLERS = {
"user_create": UserCreateHandler(),
"user_destroy": UserDestroyHandler(),
"group_create": GroupCreateHandler(),
"group_rename": GroupRenameHandler(),
"group_destroy": GroupDestroyHandler(),
"user_add_to_group": UserAddToGroupHandler(),
"user_remove_from_group": UserRemoveFromGroupHandler(),
"project_create": ProjectCreateHandler(),
"project_rename": ProjectRenameHandler(),
"project_update": ProjectUpdateHandler(),
"project_destroy": ProjectDestroyHandler(),
"push": PushHandler(),
"repository_update": RepositoryUpdateHandler(),
"merge_request": MergeRequestHandler(),
}
@staticmethod
def get_handler(event_type: str, event_name: str) -> Handler:
if event_type:
if event_type in HandlerFactory._HANDLERS.keys():
return HandlerFactory._HANDLERS.get(event_type)
if event_name in HandlerFactory._HANDLERS.keys():
return HandlerFactory._HANDLERS.get(event_name)
return UnknownOpHandler()
|
/sc-gitlab-msg-consumer-0.0.2.tar.gz/sc-gitlab-msg-consumer-0.0.2/consumer/handlers.py
| 0.720663 | 0.247325 |
handlers.py
|
pypi
|
import decimal
import json
import logging
from sc_config.config import Config
from sc_utilities import Singleton
from .configs.default import DEFAULT_CONFIG
class ConfigUtils(metaclass=Singleton):
"""
配置文件相关工具类
"""
_config = None
def __init__(self):
pass
@classmethod
def load_configurations(cls):
"""
加载配置文件
:return:
"""
try:
# load configurations
cls._config = Config.create(project_name="sc-gz-dashboard", defaults=DEFAULT_CONFIG)
except Exception as error:
cls._config = {}
logging.getLogger(__name__).exception("failed to read configuration", exc_info=error)
@classmethod
def get_config(cls):
"""
获取配置信息
:return: 配置信息字典
"""
if cls._config is None:
cls.load_configurations()
return cls._config
class MonthUtils:
@classmethod
def calculate_month(cls, year_str, month_str):
"""
计算月份:年初、季初、月初
:param year_str: 年份
:param month_str: 月份
:return: (
(year, yearly_compare_month),
(year, seasonal_compare_month),
(year, monthly_compare_month)
),年初对应的年份和月份、季初对应的年份和月份、月初对应的年份和月份
"""
illegal_result = (None, None), (None, None), (None, None)
try:
year = int(year_str)
except ValueError:
logging.getLogger(__name__).error("年份参数错误:{}".format(year_str))
return illegal_result
except TypeError:
logging.getLogger(__name__).error("年份参数错误:{}".format(year_str))
return illegal_result
try:
month = int(month_str)
except ValueError:
logging.getLogger(__name__).error("月份参数错误:{}".format(month_str))
return illegal_result
except TypeError:
logging.getLogger(__name__).error("月份参数错误:{}".format(month_str))
return illegal_result
# 年初为去年的12月
yearly = (year - 1, 12)
# 季度的开始月份
seasons = [1, 4, 7, 10, 13]
# 计算季度初
if seasons[0] <= month < seasons[1]:
seasonal_start = seasons[0]
elif seasons[1] <= month < seasons[2]:
seasonal_start = seasons[1]
elif seasons[2] <= month < seasons[3]:
seasonal_start = seasons[2]
elif seasons[3] <= month < seasons[4]:
seasonal_start = seasons[3]
else:
logging.getLogger(__name__).error("月份参数错误:{}".format(month_str))
return illegal_result
# 计算季初
last_month = seasonal_start - 1
if last_month == 0:
seasonal = yearly
else:
seasonal = (year, last_month)
# 计算月初,即上个月
last_month = month - 1
if last_month == 0:
monthly = yearly
else:
monthly = (year, last_month)
return yearly, seasonal, monthly
class DecimalEncoder(json.JSONEncoder):
"""
Decimal类型的数据JSON序列化类
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
super(DecimalEncoder, self).default(o)
__all__ = {
"ConfigUtils",
"MonthUtils",
"DecimalEncoder",
}
|
/sc_gz_dashboard-0.0.14-py3-none-any.whl/sc_gz_dashboard/utils.py
| 0.422266 | 0.160825 |
utils.py
|
pypi
|
import logging
import pandas as pd
from sc_analyzer_base import BranchUtils
from sc_utilities import Singleton, calculate_column_index
class ManifestUtils(metaclass=Singleton):
"""
花名册相关工具类
"""
# 名单DataFrame
_df: pd.DataFrame = None
# 花名册姓名与所在部门对应关系DataFrame
_name_branch_df: pd.DataFrame = None
_id_column_name: str = ""
_name_column_name: str = ""
_branch_column_name: str = ""
_sales_performance_attribution_column_name: str = ""
_config = None
@classmethod
def set_config(cls, config):
cls._config = config
@classmethod
def get_manifest_branch_column_name(cls):
return "花名册" + cls.get_branch_column_name()
@classmethod
def get_name_branch_data_frame(cls) -> pd.DataFrame:
"""
花名册姓名与所在部门对应关系
:return:
"""
return cls._name_branch_df
@classmethod
def get_id_column_name(cls) -> str:
"""
工号列名
:return: 工号列名
"""
return cls._id_column_name
@classmethod
def get_name_column_name(cls) -> str:
"""
姓名列名
:return: 姓名列名
"""
return cls._name_column_name
@classmethod
def get_branch_column_name(cls) -> str:
"""
所属机构列名
:return: 所属机构列名
"""
return cls._branch_column_name
@classmethod
def get_sales_performance_attribution_column_name(cls) -> str:
"""
业绩归属机构列名
:return: 业绩归属机构列名
"""
return cls._sales_performance_attribution_column_name
@classmethod
def load_manifest(cls):
"""
加载花名册
:return:
"""
config = cls._config
src_file_path = config.get("manifest.source_file_path")
# 业绩归属机构列名
cls._sales_performance_attribution_column_name = config.get("branch.sales_performance_attribution_column_name")
sheet_name = config.get("manifest.sheet_name")
header_row = config.get("manifest.sheet_config.header_row")
# 工号列索引
id_column_config = config.get("manifest.sheet_config.id_column")
try:
id_column = calculate_column_index(id_column_config)
except ValueError as e:
logging.getLogger(__name__).error("id_column configuration is invalid", exc_info=e)
raise e
# 姓名列索引
name_column_config = config.get("manifest.sheet_config.name_column")
try:
name_column = calculate_column_index(name_column_config)
except ValueError as e:
logging.getLogger(__name__).error("name_column configuration is invalid", exc_info=e)
raise e
# 所属机构列索引
branch_column_config = config.get("manifest.sheet_config.branch_column")
try:
branch_column = calculate_column_index(branch_column_config)
except ValueError as e:
logging.getLogger(__name__).error("branch_column configuration is invalid", exc_info=e)
raise e
logging.getLogger(__name__).info("加载花名册:{}".format(src_file_path))
df = pd.read_excel(src_file_path, sheet_name=sheet_name, header=header_row)
df = df.iloc[:, [id_column, name_column, branch_column]]
cls._id_column_name = df.columns[0]
cls._name_column_name = df.columns[1]
cls._branch_column_name = df.columns[2]
# 添加公共户相关行
for branch in set(BranchUtils.get_branch_name_mapping().values()):
df = pd.concat(
[df, pd.DataFrame(
{
cls._id_column_name: 0,
cls._name_column_name: branch,
cls._branch_column_name: branch,
},
index=[1]) # 必须添加此index参数,否则会报错
],
ignore_index=True, # 忽略上一步添加的index,使用系统生成的index
)
mapping = BranchUtils.get_branch_name_mapping()
# 替换机构名称
df = df.replace({cls._branch_column_name: mapping})
# 添加业绩归属列
df[cls._sales_performance_attribution_column_name] = df[cls._branch_column_name]
# 业绩归属机构配置
mapping = BranchUtils.get_sales_performance_attribution_mapping()
# 处理业绩归属机构
result = df.replace({cls._sales_performance_attribution_column_name: mapping})
# 花名册姓名与所在部门对应关系
cls._name_branch_df = result[[cls._name_column_name, cls._branch_column_name]].copy()
cls._name_branch_df.set_index(cls._name_column_name, inplace=True)
cls._name_branch_df.rename(columns={
cls._branch_column_name: ManifestUtils.get_manifest_branch_column_name(),
}, inplace=True)
cls._df = result
@classmethod
def fix_name_error(cls, data: pd.DataFrame, id_column_name: str, name_column_name: str) -> pd.DataFrame:
"""
解决姓名与工号不匹配的问题
:param data: 原始数据
:param id_column_name: 工号列名称
:param name_column_name: 姓名列名称
:return: 解决姓名与工号不匹配的问题后的数据
"""
for row_i, row in data.iterrows():
id_value = row[id_column_name]
if id_value == 0:
continue
name_in_manifest = cls._df.loc[cls._df[cls._id_column_name] == id_value][cls._name_column_name]
if name_in_manifest.empty:
continue
name = row[name_column_name]
if name != name_in_manifest.values[0]:
data.at[row_i, name_column_name] = name_in_manifest.values[0]
return data
@classmethod
def merge_with_manifest(cls, *,
manifest_data: pd.DataFrame,
data: pd.DataFrame,
id_column_name: str = None,
how: str = "left",
name_column_name: str = None) -> pd.DataFrame:
"""
与花名册合并
:param manifest_data: 花名册数据,左边表
:param data: 待合并DataFrame,右边表
:param how: 如何合并,即连接方式,默认使用left连接,即左连接,保证花名册的数据完整
:param id_column_name: 工号列名称
:param name_column_name: 姓名列名称
:return: 花名册与目标DataFrame合并后的DataFrame
"""
if id_column_name is None and name_column_name is None:
# ID与名称列全为空,则返回原结果
return data
if id_column_name is None and name_column_name is not None:
# 如果没有工号列,则按姓名列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[1]], right_on=[name_column_name])
if name_column_name is None and id_column_name is not None:
# 如果没有姓名列,则按工号列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[0]], right_on=[id_column_name])
# ID与姓名都不为空,则按ID和姓名两列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[0], cls._df.columns[1]],
right_on=[id_column_name, name_column_name])
@classmethod
def get_manifest_df(cls) -> pd.DataFrame:
return cls._df
@classmethod
def get_all_names_in_manifest(cls) -> list:
return list(cls._df[cls._name_column_name].values)
|
/sc_inclusive_analysis-0.0.4-py3-none-any.whl/sc_inclusive/manifest_utils.py
| 0.409457 | 0.171338 |
manifest_utils.py
|
pypi
|
import logging
import pandas as pd
from sc_analyzer_base import BranchUtils
from sc_utilities import Singleton, calculate_column_index
class ManifestUtils(metaclass=Singleton):
"""
花名册相关工具类
"""
# 名单DataFrame
_df: pd.DataFrame = None
# 花名册姓名与所在部门对应关系DataFrame
_name_branch_df: pd.DataFrame = None
_id_column_name: str = ""
_name_column_name: str = ""
_branch_column_name: str = ""
_sales_performance_attribution_column_name: str = ""
_config = None
@classmethod
def set_config(cls, config):
cls._config = config
@classmethod
def get_manifest_branch_column_name(cls):
return "花名册" + cls.get_branch_column_name()
@classmethod
def get_name_branch_data_frame(cls) -> pd.DataFrame:
"""
花名册姓名与所在部门对应关系
:return:
"""
return cls._name_branch_df
@classmethod
def get_id_column_name(cls) -> str:
"""
工号列名
:return: 工号列名
"""
return cls._id_column_name
@classmethod
def get_name_column_name(cls) -> str:
"""
姓名列名
:return: 姓名列名
"""
return cls._name_column_name
@classmethod
def get_branch_column_name(cls) -> str:
"""
所属机构列名
:return: 所属机构列名
"""
return cls._branch_column_name
@classmethod
def get_sales_performance_attribution_column_name(cls) -> str:
"""
业绩归属机构列名
:return: 业绩归属机构列名
"""
return cls._sales_performance_attribution_column_name
@classmethod
def load_manifest(cls):
"""
加载花名册
:return:
"""
config = cls._config
src_file_path = config.get("manifest.source_file_path")
# 业绩归属机构列名
cls._sales_performance_attribution_column_name = config.get("branch.sales_performance_attribution_column_name")
sheet_name = config.get("manifest.sheet_name")
header_row = config.get("manifest.sheet_config.header_row")
# 工号列索引
id_column_config = config.get("manifest.sheet_config.id_column")
try:
id_column = calculate_column_index(id_column_config)
except ValueError as e:
logging.getLogger(__name__).error("id_column configuration is invalid", exc_info=e)
raise e
# 姓名列索引
name_column_config = config.get("manifest.sheet_config.name_column")
try:
name_column = calculate_column_index(name_column_config)
except ValueError as e:
logging.getLogger(__name__).error("name_column configuration is invalid", exc_info=e)
raise e
# 所属机构列索引
branch_column_config = config.get("manifest.sheet_config.branch_column")
try:
branch_column = calculate_column_index(branch_column_config)
except ValueError as e:
logging.getLogger(__name__).error("branch_column configuration is invalid", exc_info=e)
raise e
logging.getLogger(__name__).info("加载花名册:{}".format(src_file_path))
df = pd.read_excel(src_file_path, sheet_name=sheet_name, header=header_row)
df = df.iloc[:, [id_column, name_column, branch_column]]
cls._id_column_name = df.columns[0]
cls._name_column_name = df.columns[1]
cls._branch_column_name = df.columns[2]
# 添加公共户相关行
for branch in set(BranchUtils.get_branch_name_mapping().values()):
df = pd.concat(
[df, pd.DataFrame(
{
cls._id_column_name: 0,
cls._name_column_name: branch,
cls._branch_column_name: branch,
},
index=[1]) # 必须添加此index参数,否则会报错
],
ignore_index=True, # 忽略上一步添加的index,使用系统生成的index
)
mapping = BranchUtils.get_branch_name_mapping()
# 替换机构名称
df = df.replace({cls._branch_column_name: mapping})
# 添加业绩归属列
df[cls._sales_performance_attribution_column_name] = df[cls._branch_column_name]
# 业绩归属机构配置
mapping = BranchUtils.get_sales_performance_attribution_mapping()
# 处理业绩归属机构
result = df.replace({cls._sales_performance_attribution_column_name: mapping})
# 花名册姓名与所在部门对应关系
cls._name_branch_df = result[[cls._name_column_name, cls._branch_column_name]].copy()
cls._name_branch_df.set_index(cls._name_column_name, inplace=True)
cls._name_branch_df.rename(columns={
cls._branch_column_name: ManifestUtils.get_manifest_branch_column_name(),
}, inplace=True)
cls._df = result
@classmethod
def fix_name_error(cls, data: pd.DataFrame, id_column_name: str, name_column_name: str) -> pd.DataFrame:
"""
解决姓名与工号不匹配的问题
:param data: 原始数据
:param id_column_name: 工号列名称
:param name_column_name: 姓名列名称
:return: 解决姓名与工号不匹配的问题后的数据
"""
for row_i, row in data.iterrows():
id_value = row[id_column_name]
if id_value == 0:
continue
name_in_manifest = cls._df.loc[cls._df[cls._id_column_name] == id_value][cls._name_column_name]
if name_in_manifest.empty:
continue
name = row[name_column_name]
if name != name_in_manifest.values[0]:
data.at[row_i, name_column_name] = name_in_manifest.values[0]
return data
@classmethod
def merge_with_manifest(cls, *,
manifest_data: pd.DataFrame,
data: pd.DataFrame,
id_column_name: str = None,
how: str = "left",
name_column_name: str = None) -> pd.DataFrame:
"""
与花名册合并
:param manifest_data: 花名册数据,左边表
:param data: 待合并DataFrame,右边表
:param how: 如何合并,即连接方式,默认使用left连接,即左连接,保证花名册的数据完整
:param id_column_name: 工号列名称
:param name_column_name: 姓名列名称
:return: 花名册与目标DataFrame合并后的DataFrame
"""
if id_column_name is None and name_column_name is None:
# ID与名称列全为空,则返回原结果
return data
if id_column_name is None and name_column_name is not None:
# 如果没有工号列,则按姓名列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[1]], right_on=[name_column_name])
if name_column_name is None and id_column_name is not None:
# 如果没有姓名列,则按工号列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[0]], right_on=[id_column_name])
# ID与姓名都不为空,则按ID和姓名两列Join
return manifest_data.merge(data, how=how, left_on=[cls._df.columns[0], cls._df.columns[1]],
right_on=[id_column_name, name_column_name])
@classmethod
def get_manifest_df(cls) -> pd.DataFrame:
return cls._df
@classmethod
def get_all_names_in_manifest(cls) -> list:
return list(cls._df[cls._name_column_name].values)
|
/sc_inclusive_balance_detail_analysis-0.0.4-py3-none-any.whl/sc_inclusive_balance_detail_analysis/manifest_utils.py
| 0.409457 | 0.171338 |
manifest_utils.py
|
pypi
|
# Supercollider Jupyter Kernel
This kernel allows running [SuperCollider](https://supercollider.github.io/) Code in a [Jupyter](https://jupyter.org/) environment.

## Installation
Please make sure one has installed [SuperCollider](https://supercollider.github.io/) and
[Python 3 with pip](https://realpython.com/installing-python).
* To install the kernel for Jupyter execute
```shell
pip3 install --upgrade sc-kernel
```
This will also install [Jupyter Lab](https://jupyter.org/) if it is not already installed on the system.
* Start a new Jupyter Lab instance by executing `jupyter lab` in a console.
* Click on the SuperCollider icon
If one has not installed SuperCollider in the default location, one has to set a environment variable
called `SCLANG_PATH` which points to the sclang executable.
To uninstall the kernel execute
```shell
jupyter kernelspec uninstall sc_kernel
```
### As a Docker container
It is also possible to run sc-kernel in a Docker container, although a sound output is not possible in this case.
Assuming you have cloned the repository and opened a terminal in its directory.
```shell
# build container - takes some time b/c we build supercollider
docker build -t sc_kernel .
# run container
# -v mounts the current directory to the container
# -p passes the container port to our host
docker run -v ${PWD}:/home/sc_kernel -p 8888:8888 sc_kernel
```
## Usage
Contrary to ScIDE each document will run in its own interpreter and not in a shared one.
This is the default behavior of Jupyter but maybe this will be changed at a later point.
Currently it is only possible to use the default config - if you encounter missing classes
it is probably caused that they are not available in the default config.
### Stop sound
Currently the `Cmd + .` command is not binded. Instead create a new cell with a single dot
```supercollider
.
```
and execute this cell. This will transform the command to `CommandPeriod.run;` which is what is actually called on the `Cmd + .` press in the IDE.
### Recording
`sc_kernel` provides an easy way to record audio to the local directory and store it embedded in the notebook
so one can transfer the notebook into a website which has the audio files included.
The audio is stored in FLAC with 16 bit resolution.
The provided function `record` takes 2 arguments:
* Duration in seconds
* Filename which will be used for the recording, using the path of the notebook as base path.
Assuming one has started the server, simply execute
```supercollider
Ndef(\sine, {
var sig = SinOsc.ar(LFDNoise0.kr(1.0!2).exprange(100, 400));
sig = sig * \amp.kr(0.2);
sig;
}).play;
record.(4.0);
```

### Plotting
`sc_kernel` also provides a way to embed images of SuperCollider windows into the Jupyter document.
First create a window that you want to embed into the document
```supercollider
w = {SinOsc.ar(2.0)}.plot(1.0);
```
After the plotting is finished by the server we can now simply save an image of the window
to a file and also embed the image into the document via a SuperCollider helper method which is available.
```supercollider
plot.(w);
```

The image will be saved relative the directory where `jupyter lab` was executed.
The optional second argument can be the filename.
> Note that `{}.plot` does not return a `Window` but a `Plotter`, but `sc_kernel`
> accesses the window of a `Plotter` automatically.
>
> For plotting e.g. the server meter you need to pass the proper window, so
>
> ```supercollider
> a = s.meter;
> // a is a ServerMeter
>
> // new cell
> plot.(a.window, "meter.png");
> ```
### Autocomplete
Simply push `Tab` to see available autocompletions.
This is currently limited to scan for available classes.
### Documentation
To display the documentation of a Class, simply prepend a `?` to it and execute it, e.g.
```supercollider
?SinOsc
```
You can also hit `shift <tab>` iff the cursor is behind a class to trigger the inline documentation.

### Real Time Collaboration
Jupyter Lab allows for real time collaboration in which multiple users can write in the same document from different computers by visiting the Jupyter server via their browser.
Each user can write and execute sclang statements on your local sclang interpreter and the cursors of each user is shown to everyone.
This allows for interactive, shared sessions which can be an interesting live coding sessions.
> Be aware that this can be a security threat as it allows for other people from within the network to execute arbitrary sclang commands on your computer
To start such a session you can spin Jupyter Lab via
```shell
jupyter lab --ip 0.0.0.0 --collaborative --NotebookApp.token='sclang'
```
where the `NotebookApp.token` is the necessary password to login - set it to `''` if no password is wanted.
Check out the [documentation on Jupyter Lab](https://jupyterlab.readthedocs.io/en/stable/user/rtc.html) about *Real Time Collaboration*.
## Development
Any PR is welcome! Please state the changes in an Issue.
To contribute, please
* Fork the repository and clone it to a local directory
* Create a virtual environment and install the dev dependencies
in it with
```shell
pip3 install -e ".[dev]"
```
* If one wants to add the kernel to an existing Jupyter installation one can execute
```shell
jupyter kernelspec install sc_kernel
```
and run `jupyter lab` from within the cloned directory as
we need to have access to `sc_kernel`.
* Run `./run_tests.sh` and make a PR :)
Use `black sc_kernel test` to format the source code.
## Maintainers
* [Dennis Scheiba](https://dennis-scheiba.com)
|
/sc_kernel-0.4.0.tar.gz/sc_kernel-0.4.0/README.md
| 0.80271 | 0.962179 |
README.md
|
pypi
|
from ._leflib import parse as _parse
def parse(path):
''' Parses LEF file.
Given a path to a LEF file, this function parses the file and returns a
dictionary representing the contents of the LEF file. If there's an error
while reading or parsing the file, this function returns None instead.
Note that this function does not return all information contained in the
LEF. The subset of information returned includes:
* LEF version
* Bus bit characters
* Divider characters
* Units
* Manufacturing grid
* Use min spacing
* Clearance measure
* Fixed mask
* Layer information
* Type
* Width
* Direction
* Offset
* Pitch
* Max stack via
* Viarules
* Sites
* Macro information
* Size
* Pins
* Obstructions
The dictionary returned by this function is designed to mimic the structure
of the LEF file as closely as possible, and this function does minimal
legality checking. The order all top-level objects appear in the dictionary
is guaranteed to match the LEF file. It looks like follows:
.. code-block:: python
{
'version': 5.8,
'busbitchars': '<>',
'dividerchar': ':',
'units': {
'capacitance': 10.0,
'current': 10000.0,
'database': 20000.0,
'frequency': 10.0,
'power': 10000.0,
'resistance': 10000.0,
'time': 100.0,
'voltage': 1000.0
},
'manufacturinggrid': 0.05,
'useminspacing': {'OBS': 'OFF'},
'clearancemeasure': 'MAXXY',
'fixedmask': True,
'layers': {
'M1': {
'type': 'ROUTING',
'direction': 'HORIZONTAL',
'offset': (0.1, 0.2),
'pitch': 1.8,
'width': 1.0
},
'V1': {
'type': 'CUT',
},
...
},
'maxviastack': {'range': {'bottom': 'm1', 'top': 'm7'}, 'value': 4},
'viarules': {
'<name>': {
'generate': True,
'layers': [
{'enclosure': {'overhang1': 1.4,
'overhang2': 1.5},
'name': 'M1',
'width': {'max': 19.0, 'min': 0.1}},
{'enclosure': {'overhang1': 1.4,
'overhang2': 1.5},
'name': 'M2',
'width': {'max': 1.9, 'min': 0.2}},
{'name': 'M3',
'rect': (-0.3, -0.3, -0.3, 0.3),
'resistance': 0.5,
'spacing': {'x': 5.6, 'y': 7.0}}
]
},
'<name>': {
{'layers': [
{'direction': 'VERTICAL',
'name': 'M1',
'width': {'max': 9.6, 'min': 9.0}},
{'direction': 'HORZIONTAL',
'name': 'M1',
'width': {'max': 3.0, 'min': 3.0}}
]}
},
...
}
'macros': {
'<name>': {
'size': {
'width': 5,
'height': 8
},
'pins': {
'<name>': {
'ports': [{
'class': 'CORE',
'layer_geometries': [{
'layer': 'M1',
'exceptpgnet': True,
'spacing': 0.01,
'designrulewidth': 0.05,
'width': 1.5,
'shapes': [
{
'rect': (0, 0, 5, 5),
'mask': 1,
'iterate': {
'num_x': 2,
'num_y': 3,
'space_x': 1,
'space_y': 4
}
},
{
'path': [(0, 0), (5, 0), (0, 5)],
'iterate': ...
},
{
'polygon': [(0, 0), (5, 0), (0, 5)],
'iterate': ...
}
],
'via': {
'pt': (2, 3),
'name': 'via1',
'iterate': ...
}
}]
}]
},
...
}
},
...
}
}
If some entry is not specified in the LEF, the corresponding key will not be
present in the dictionary.
Args:
path (str): Path to LEF file to parse.
'''
return _parse(path)
|
/sc_leflib-0.1.0-cp311-cp311-macosx_10_15_x86_64.whl/sc_leflib/__init__.py
| 0.769817 | 0.497986 |
__init__.py
|
pypi
|
from PyQt5 import QtCore, QtGui, QtWidgets
class SideGrip(QtWidgets.QWidget):
def __init__(self, parent, edge):
QtWidgets.QWidget.__init__(self, parent)
if edge == QtCore.Qt.LeftEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.resizeFunc = self.resizeLeft
elif edge == QtCore.Qt.TopEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.resizeFunc = self.resizeTop
elif edge == QtCore.Qt.RightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.resizeFunc = self.resizeRight
else:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.resizeFunc = self.resizeBottom
self.mousePos = None
def resizeLeft(self, delta):
window = self.window()
width = max(window.minimumWidth(), window.width() - delta.x())
geo = window.geometry()
geo.setLeft(geo.right() - width)
window.setGeometry(geo)
def resizeTop(self, delta):
window = self.window()
height = max(window.minimumHeight(), window.height() - delta.y())
geo = window.geometry()
geo.setTop(geo.bottom() - height)
window.setGeometry(geo)
def resizeRight(self, delta):
window = self.window()
width = max(window.minimumWidth(), window.width() + delta.x())
window.resize(width, window.height())
def resizeBottom(self, delta):
window = self.window()
height = max(window.minimumHeight(), window.height() + delta.y())
window.resize(window.width(), height)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.mousePos = event.pos()
def mouseMoveEvent(self, event):
if self.mousePos is not None:
delta = event.pos() - self.mousePos
self.resizeFunc(delta)
def mouseReleaseEvent(self, event):
self.mousePos = None
class CSizeGrip(QtWidgets.QSizeGrip):
def __init__(self, parent):
QtWidgets.QSizeGrip.__init__(self, parent)
self.setStyleSheet("background-color:rgba(0, 0, 0, 0);")
class ResizableMainWindow(QtWidgets.QMainWindow):
_gripSize = 8
resizable = True
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.sideGrips = [
SideGrip(self, QtCore.Qt.LeftEdge),
SideGrip(self, QtCore.Qt.TopEdge),
SideGrip(self, QtCore.Qt.RightEdge),
SideGrip(self, QtCore.Qt.BottomEdge),
]
self.cornerGrips = [CSizeGrip(self) for i in range(4)]
@property
def gripSize(self):
return self._gripSize
def setGripSize(self, size):
if size == self._gripSize:
return
elif size == 0:
self._gripSize = 0
else:
self._gripSize = max(2, size)
self.updateGrips()
def updateGrips(self):
self.setContentsMargins(*[self.gripSize] * 4)
outRect = self.rect()
inRect = outRect.adjusted(self.gripSize, self.gripSize,
-self.gripSize, -self.gripSize)
self.cornerGrips[0].setGeometry(
QtCore.QRect(outRect.topLeft(), inRect.topLeft()))
self.cornerGrips[1].setGeometry(
QtCore.QRect(outRect.topRight(), inRect.topRight()).normalized())
self.cornerGrips[2].setGeometry(
QtCore.QRect(inRect.bottomRight(), outRect.bottomRight()))
self.cornerGrips[3].setGeometry(
QtCore.QRect(outRect.bottomLeft(), inRect.bottomLeft()).normalized())
self.sideGrips[0].setGeometry(
0, inRect.top(), self.gripSize, inRect.height())
self.sideGrips[1].setGeometry(
inRect.left(), 0, inRect.width(), self.gripSize)
self.sideGrips[2].setGeometry(
inRect.left() + inRect.width(),
inRect.top(), self.gripSize, inRect.height())
self.sideGrips[3].setGeometry(
self.gripSize, inRect.top() + inRect.height(),
inRect.width(), self.gripSize)
def resizeEvent(self, event):
if self.resizable == True:
QtWidgets.QMainWindow.resizeEvent(self, event)
self.updateGrips()
elif self.gripSize != 0:
self.setContentsMargins(*[8] * 4)
else:
self.setContentsMargins(*[0] * 4)
|
/sc_mainwindow-1.2.tar.gz/sc_mainwindow-1.2/sc_mainwindow/components/resizable_mwindow.py
| 0.602529 | 0.189765 |
resizable_mwindow.py
|
pypi
|
import smtplib, os
from collections import namedtuple
from email.message import EmailMessage
from typing import List, Union
attachable = namedtuple('attachable', ['fn', 'maintype', 'subtype'])
def email_text(content: str, recipient: str = None, subject: str = 'Build Status Update', sender: str = None):
"""
Sends a basic email from Bob the Builder to the specified recipient.
:param content: The text to include in the email body
:param recipient: The email of the person you're sending this to
:param subject: The subject line of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
message = EmailMessage()
message.set_content(content)
if not sender:
try:
sender = os.environ['EMAIL_SENDER']
except:
raise Exception('No email sender provided')
if not recipient:
try:
recipient = os.environ['EMAIL_RECIPIENT']
except:
raise Exception('No email recipient provided')
message['Subject'] = subject
message['From'] = sender
message['To'] = recipient
s = smtplib.SMTP_SSL('smtp.mail.yahoo.com', port=465)
s.login(sender, os.environ['EMAIL_APP_KEY']) # app password
s.send_message(message)
s.quit()
return message
def email_attachment(content: str, attachment: Union[List[attachable], attachable], recipient: str = None, subject: str = 'Build Update! Files attached.', sender: str = None):
"""
Sends an email with some number of attachments from Bob the Builder to the specified recipient
:param content: The text to include in the email body
:param attachment: The file or list of files to attach. These are tuples with 'fn', 'maintype', and 'subtype' fields
:param recipient: The email of the person you're sending this to
:param subject: The subject of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
message = EmailMessage()
message.set_content(content)
if not sender:
sender = os.environ['EMAIL_SENDER']
if not recipient:
recipient = os.environ['EMAIL_RECIPIENT']
message['Subject'] = subject
message['From'] = sender
message['To'] = recipient
if type(attachment) != list:
attachment = [attachment]
for a in attachment:
try:
att = open(a.fn, 'rb')
att_content = att.read()
message.add_attachment(att_content, maintype = a.maintype, subtype = a.subtype, filename = a.fn.split('/')[-1])
finally:
att.close()
s = smtplib.SMTP_SSL('smtp.mail.yahoo.com', port=465)
s.login('[email protected]', os.environ['EMAIL_APP_KEY']) # app password
s.send_message(message)
s.quit()
return message
def email_text_attachment(content: str, attachment: str, recipient: str = None, subject: str = 'Build Update! Text files attached.', sender: str = None):
"""
Sends an email with a text document attachment from Bob the Builder to the specified recipient
:param content: The text to include in the email body
:param attachment: The text file to attach to the email
:param recipient: The email of the person you're sending this to
:param subject: The subject of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
return email_attachment(content, [attachable(attachment, 'text', 'plain')], recipient, subject, sender)
def email_html_attachment(content: str, attachment: str, recipient: str = None, subject: str = 'Build Update! Text files attached.', sender: str = None):
"""
Sends an email with a HTML attachment from Bob the Builder to the specified recipient
:param content: The text to include in the email body
:param attachment: The html file to attach to the email
:param recipient: The email of the person you're sending this to
:param subject: The subject of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
return email_attachment(content, [attachable(attachment, 'text', 'html')], recipient, subject, sender)
def email_png_attachment(content: str, attachment: str, recipient: str = None, subject: str = 'Build Update! Text files attached.', sender: str = None):
"""
Sends an email with a PNG image attachment from Bob the Builder to the specified recipient
:param content: The text to include in the email body
:param attachment: The png file to attach to the email
:param recipient: The email of the person you're sending this to
:param subject: The subject of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
return email_attachment(content, [attachable(attachment, 'image', 'png')], recipient, subject, sender)
def email_pdf_attachment(content: str, attachment: str, recipient: str = None, subject: str = 'Build Update! Files attached.', sender = None):
"""
Sends an email with a PDF attachment from Bob the Builder to the specified recipient
:param content: The text to include in the email body
:param attachment: The pdf file to attach to the email
:param recipient: The email of the person you're sending this to
:param subject: The subject of the email
:param sender: The address to send from. If not present, we use the EMAIL_SENDER environment var
"""
return email_attachment(content, [attachable(attachment, 'application', 'pdf')], recipient, subject, sender)
|
/sc_notify-1.2.0-py3-none-any.whl/notify/mail.py
| 0.713432 | 0.204362 |
mail.py
|
pypi
|
from io import StringIO
_DEFAULT_EXAMPLES = {
"string": "string",
"integer": 1,
"number": 1.0,
"boolean": True,
"array": [],
}
_DEFAULT_STRING_EXAMPLES = {
"date": "2020-01-01",
"date-time": "2020-01-01T01:01:01Z",
"password": "********",
"byte": "QG1pY2hhZWxncmFoYW1ldmFucw==",
"ipv4": "127.0.0.1",
"ipv6": "::1",
}
def example_from_schema(schema):
"""
Generates an example request/response body from the provided schema.
>>> schema = {
... "type": "object",
... "required": ["id", "name"],
... "properties": {
... "id": {
... "type": "integer",
... "format": "int64"
... },
... "name": {
... "type": "string",
... "example": "John Smith"
... },
... "tag": {
... "type": "string"
... }
... }
... }
>>> example = example_from_schema(schema)
>>> assert example == {
... "id": 1,
... "name": "John Smith",
... "tag": "string"
... }
"""
# If an example was provided then we use that
if "example" in schema:
return schema["example"]
elif "oneOf" in schema:
return example_from_schema(schema["oneOf"][0])
elif "anyOf" in schema:
return example_from_schema(schema["anyOf"][0])
elif "allOf" in schema:
# Combine schema examples
example = {}
for sub_schema in schema["allOf"]:
example.update(example_from_schema(sub_schema))
return example
elif "enum" in schema:
return schema["enum"][0]
elif "type" not in schema:
# Any type
return _DEFAULT_EXAMPLES["integer"]
elif schema["type"] == "object" or "properties" in schema:
example = {}
for prop, prop_schema in schema.get("properties", {}).items():
example[prop] = example_from_schema(prop_schema)
return example
elif schema["type"] == "array":
items = schema["items"]
min_length = schema.get("minItems", 0)
max_length = schema.get("maxItems", max(min_length, 2))
assert min_length <= max_length
# Try generate at least 2 example array items
gen_length = min(2, max_length) if min_length <= 2 else min_length
example_items = []
if items == {}:
# Any-type arrays
example_items.extend(_DEFAULT_EXAMPLES.values())
elif isinstance(items, dict) and "oneOf" in items:
# Mixed-type arrays
example_items.append(_DEFAULT_EXAMPLES[sorted(items["oneOf"])[0]])
else:
example_items.append(example_from_schema(items))
# Generate array containing example_items and satisfying min_length and max_length
return [example_items[i % len(example_items)] for i in range(gen_length)]
elif schema["type"] == "string":
example_string = _DEFAULT_STRING_EXAMPLES.get(
schema.get("format", None), _DEFAULT_EXAMPLES["string"]
)
min_length = schema.get("minLength", 0)
max_length = schema.get("maxLength", max(min_length, len(example_string)))
gen_length = (
min(len(example_string), max_length)
if min_length <= len(example_string)
else min_length
)
assert 0 <= min_length <= max_length
if min_length <= len(example_string) <= max_length:
return example_string
else:
example_builder = StringIO()
for i in range(gen_length):
example_builder.write(example_string[i % len(example_string)])
example_builder.seek(0)
return example_builder.read()
elif schema["type"] in ("integer", "number"):
example = _DEFAULT_EXAMPLES[schema["type"]]
if "minimum" in schema and "maximum" in schema:
# Take average
example = schema["minimum"] + (schema["maximum"] - schema["minimum"]) / 2
elif "minimum" in schema and example <= schema["minimum"]:
example = schema["minimum"] + 1
elif "maximum" in schema and example >= schema["maximum"]:
example = schema["maximum"] - 1
return float(example) if schema["type"] == "number" else int(example)
else:
return _DEFAULT_EXAMPLES[schema["type"]]
|
/sc-oa-0.7.0.12.tar.gz/sc-oa-0.7.0.12/sphinxcontrib/openapi/schema_utils.py
| 0.711932 | 0.511534 |
schema_utils.py
|
pypi
|
from docutils.parsers.rst import directives
from . import abc
from .. import openapi20, openapi30, utils
class HttpdomainOldRenderer(abc.RestructuredTextRenderer):
option_spec = {
# A list of endpoints to be rendered. Endpoints must be whitespace
# delimited.
"paths": lambda s: s.split(),
# Regular expression patterns to includes/excludes endpoints to/from
# rendering. Similar to paths, the patterns must be whitespace
# delimited.
"include": lambda s: s.split(),
"exclude": lambda s: s.split(),
# Endpoints to be included based on HTTP method names.
"methods": lambda s: s.split(),
# Render the request body structure when passed.
"request": directives.flag,
# Render request/response examples when passed.
"examples": directives.flag, # render examples when passed
# Render request/response examples in one block or inline with the response codes.
"contextpath": directives.flag, # use the server path as prefix in service URL
"group_examples": directives.flag, # render examples in one block
# Include links to entity description
"entities": directives.flag,
# Group endpoints by tags when passed. By default, no grouping is
# applied and endpoints are rendered in the order they met in spec.
"group": directives.flag,
# Markup format to render OpenAPI descriptions.
"format": str,
}
def __init__(self, state, options):
self._state = state
self._options = options
def render_restructuredtext_markup(self, spec):
# OpenAPI spec may contain JSON references, common properties, etc.
# Trying to render the spec "As Is" will require to put multiple if-s
# around the code. In order to simplify rendering flow, let's make it
# have only one (expected) schema, i.e. normalize it.
utils.normalize_spec(spec, **self._options)
# We support both OpenAPI 2.0 (f.k.a. Swagger) and OpenAPI 3.0.0, so
# determine which version we are parsing here.
spec_version = spec.get("openapi", spec.get("swagger", "2.0"))
if spec_version.startswith("2."):
openapihttpdomain = openapi20.openapihttpdomain
elif spec_version.startswith("3."):
openapihttpdomain = openapi30.openapihttpdomain
else:
raise ValueError("Unsupported OpenAPI version (%s)" % spec_version)
yield from openapihttpdomain(spec, **self._options)
|
/sc-oa-0.7.0.12.tar.gz/sc-oa-0.7.0.12/sphinxcontrib/openapi/renderers/_httpdomain_old.py
| 0.749087 | 0.226302 |
_httpdomain_old.py
|
pypi
|
try:
import keras
import tensorflow as tf
from keras.layers import (
Activation,
BatchNormalization,
Dense,
Dropout,
Input,
Lambda,
)
from keras.models import Model
from keras.utils import np_utils
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
confusion_matrix,
plot_confusion_matrix,
)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
except ImportError:
pass
import datetime
import os
import pickle
import anndata
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
import yaml
def preprocess_one_hot(adata, obs_key):
X = adata.X
if type(X) != np.ndarray:
X_array = X.toarray()
else:
X_array = X
y = adata.obs[obs_key]
y_array = y.array
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y)
encoded_y = encoder.transform(y)
decoded_y = encoder.inverse_transform(encoded_y)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_y)
X_train, X_test, y_train, y_test = train_test_split(
X_array, dummy_y, test_size=0.2, random_state=42
)
return X_train, X_test, y_train, y_test, encoder
def build_predictor(input_size, nb_classes):
inputs = Input(shape=(input_size,))
# a layer instance is callable on a tensor, and returns a tensor
denses = Dense(128, activation="relu")(inputs)
denses = Dense(64, activation="relu")(denses)
predictions = Dense(nb_classes, activation="softmax")(denses)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["acc"]
)
model.summary()
return model
def batch_generator_training(X, y, batch_size):
samples_per_epoch = X.shape[0]
number_of_batches = samples_per_epoch / batch_size
counter = 0
shuffle_index = np.arange(np.shape(y)[0])
np.random.shuffle(shuffle_index)
X = X[shuffle_index, :]
y = y[shuffle_index]
while 1:
index_batch = shuffle_index[
batch_size * counter : batch_size * (counter + 1)
]
X_batch = X[index_batch, :].todense()
X_batch = np.array(X_batch).reshape(
X_batch.shape[0], X_batch.shape[1], 1
)
y_batch = y[index_batch, :]
counter += 1
yield (np.array(X_batch), np.array(y_batch))
if counter >= number_of_batches:
np.random.shuffle(shuffle_index)
counter = 0
class MLP_Predictor:
def __init__(
self,
latent_space,
predict_key,
predictor_hidden_sizes,
predictor_epochs,
predictor_batch_size,
unlabeled_category,
predictor_activation="softmax",
): # random_state gives the split for train_test split during the MLP predictor training
self.predict_key = predict_key
self.adata = latent_space
self.predictor_hidden_sizes = predictor_hidden_sizes
self.predictor_epochs = predictor_epochs
self.predictor_batch_size = predictor_batch_size
self.predictor_activation = predictor_activation
self.unlabeled_category = unlabeled_category
## Removing the unlabeled cells from the prediction training set
to_keep = self.adata.obs["train_split"].copy()
UNK_cells = self.adata.obs[self.predict_key] == self.unlabeled_category
print("nb of nan")
print(UNK_cells.sum())
to_keep[UNK_cells] = "val"
self.adata.obs["train_split"] = to_keep
self.adata_train = self.adata[
self.adata.obs["train_split"] == "train", :
].copy()
self.adata_test = self.adata[
self.adata.obs["train_split"] == "test", :
].copy()
self.adata_val = self.adata[
self.adata.obs["train_split"] == "val", :
].copy()
self.train_index = self.adata_train.obs.index
self.test_index = self.adata_test.obs.index
self.val_index = self.adata_val.obs.index
self.y = self.adata.obs[self.predict_key]
self.categories = self.y.unique()
self.train_categories = []
self.model = keras.Model()
self.X_array = np.array([])
self.X_train = np.array([])
self.X_test = np.array([])
self.y_train = pd.Series()
self.y_test = pd.Series()
self.y_train_onehot = np.array([])
self.y_test_onehot = np.array([])
self.y_pred_raw = np.array(
[]
) # The output of the model ie result of the softmax/sigmoid layer
self.y_pred = pd.Series()
self.prediction_table = pd.DataFrame()
self.encoder = LabelEncoder()
self.train_adata = anndata.AnnData()
self.test_adata = anndata.AnnData()
self.val_adata = anndata.AnnData()
self.train_history = keras.callbacks.History()
self.history = dict()
self.is_trained = False
self.training_time = datetime.datetime.today()
def preprocess_one_hot(self):
self.X_array = self.adata.X
if type(self.X_array) != np.ndarray:
self.X_array = self.X_array.toarray()
self.X_train = self.adata_train.X
if type(self.X_train) != np.ndarray:
self.X_train = self.X_train.toarray()
self.X_test = self.adata_test.X
if type(self.X_test) != np.ndarray:
self.X_test = self.X_test.toarray()
self.X_val = self.adata_val.X
if type(self.X_val) != np.ndarray:
self.X_val = self.X_val.toarray()
self.y_train = self.y[self.train_index]
self.y_test = self.y[self.test_index]
self.y_val = self.y[self.val_index]
self.train_categories = self.y_train.unique()
# encode class values as integers
self.encoder = LabelEncoder()
self.encoder.fit(self.y)
self.y_train_onehot = self.encoder.transform(self.y_train)
self.y_val_onehot = self.encoder.transform(self.y_val)
# self.y_test_onehot = self.encoder.transform(self.y_test)
self.y_train_onehot = np_utils.to_categorical(self.y_train_onehot)
self.y_val_onehot = np_utils.to_categorical(self.y_val_onehot)
# self.y_test_onehot = np_utils.to_categorical(self.y_test_onehot)
self.train_adata = self.adata[self.train_index, :]
self.test_adata = self.adata[self.test_index, :]
self.val_adata = self.adata[self.val_index, :]
return (
self.X_train,
self.X_test,
self.X_val,
self.y_train,
self.y_val,
self.y_test,
self.encoder,
)
def build_predictor(self):
print("building predictor on :")
print(self.adata_train)
print(self.X_train)
print(self.categories)
print(self.train_categories)
print(self.adata_train.obs[self.predict_key].value_counts())
input_size = self.X_train.shape[1]
nb_classes = len(self.train_categories)
inputs = Input(shape=(input_size,))
denses = Dense(self.predictor_hidden_sizes, activation="relu")(inputs)
predictions = Dense(nb_classes, activation=self.predictor_activation)(
denses
)
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["acc"]
)
model.summary()
self.model = model
return model
def train_model(self):
self.training_time = datetime.datetime.today()
batch_size = self.predictor_batch_size
epochs = self.predictor_epochs
es = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
mode="min",
patience=10,
restore_best_weights=True,
verbose=1,
)
callbacks = [es]
self.train_history = self.model.fit(
x=self.X_train,
y=self.y_train_onehot,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.2,
) # Predictor is trained only on the annotated data in a fully supervised way
self.is_trained = True
def predict_on_test(self):
self.y_pred_raw = self.model.predict(self.X_array)
self.y_pred = pd.Series(
self.encoder.inverse_transform(np.argmax(self.y_pred_raw, axis=1)),
index=self.y.index,
)
self.prediction_table = pd.DataFrame(
{
"y_true": self.y,
"y_pred": self.y_pred,
"is_test": self.y.index.isin(self.test_index),
},
index=self.y.index,
)
self.adata.obs["y_pred"] = self.y_pred
self.adata.obsm["y_pred_raw"] = self.y_pred_raw
self.adata.uns["prediction_decoder"] = self.encoder.classes_
def save_model(self, save_path):
if not os.path.isdir(save_path):
os.mkdir(save_path)
self.model.save(save_path)
def save_results(self):
self.prediction_table.to_csv(self.predict_save_path)
if not os.path.isdir(self.model_predict_save_path):
os.mkdir(self.model_predict_save_path)
self.model.save(self.model_predict_save_path + "/model")
with open(self.model_predict_save_path + "/history", "wb") as file_pi:
pickle.dump(self.train_history.history, file_pi)
def load_prediction_results(self):
self.prediction_table = pd.read_csv(
self.predict_save_path, index_col=0
)
self.history = pickle.load(
open(self.model_predict_save_path + "/history", "rb")
)
self.adata.obs[
f"{self.predict_key}_predictions"
] = self.prediction_table["y_pred"]
misclassified = self.adata.obs[self.predict_key].astype("str")
misclassified[
self.adata.obs[f"{self.predict_key}_predictions"]
!= self.adata.obs[self.predict_key]
] = "misclassified"
self.adata.obs[f"misclassified"] = misclassified
misclassified_and_test = self.adata.obs["misclassified"].astype("str")
misclassified_and_test[
(self.adata.obs["misclassified"] == "misclassified")
& (self.prediction_table["is_test"])
] = "misclassified_and_test"
self.adata.obs[f"misclassified_and_test"] = misclassified_and_test
self.adata.obs[f"is_test"] = self.prediction_table["is_test"].replace(
{True: "test", False: "train"}
)
def plot_prediction_results(self, test_only, normalize="true", ax=None):
if not test_only:
y_true = self.prediction_table["y_true"]
y_pred = self.prediction_table["y_pred"]
else:
y_true = self.prediction_table.loc[
self.prediction_table["is_test"], "y_true"
]
y_pred = self.prediction_table.loc[
self.prediction_table["is_test"], "y_pred"
]
labels = sorted(y_true.unique())
self.confusion_matrix = confusion_matrix(
y_true, y_pred, labels=labels, normalize=normalize
)
confusion_to_plot = pd.DataFrame(
self.confusion_matrix, index=labels, columns=labels
)
self.balanced_accuracy_score = balanced_accuracy_score(y_true, y_pred)
self.accuracy_score = accuracy_score(y_true, y_pred)
plt.figure(figsize=(15, 15))
ax = sns.heatmap(confusion_to_plot, annot=True, ax=ax)
return (
self.confusion_matrix,
self.balanced_accuracy_score,
self.accuracy_score,
ax,
)
def plot_training_performances(self):
plt.figure(figsize=(5, 5))
plt.show()
plt.plot(self.history["acc"])
plt.plot(self.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(self.history["loss"])
plt.plot(self.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
def get_colors(self, color=None):
if not color:
color = self.predict_key
if not self.adatas:
print("please load at least one dataset")
else:
adata = self.adatas[0]
sc.pl.umap(adata, color=color, show=False)
self.colors[color] = adata.uns[f"{color}_colors"]
self.colors_UNK[f"{color} + _UNK"] = self.colors[color] + [
"#FF0000"
]
def plot_misclassified_umap(self, title=None):
if self.prediction_table.empty:
self.load_prediction_results()
sc.pl.umap(self.adata, color=self.predict_key, title=title)
self.adata.uns[f"misclassified_colors"] = self.adata.uns[
f"{self.predict_key}_colors"
] + ["#FF0000"]
self.adata.uns[f"misclassified_and_test_colors"] = (
self.adata.uns[f"{self.predict_key}_colors"]
+ ["#FF0000"]
+ ["#08ff00"]
)
sc.pl.umap(self.adata, color="misclassified", title=title)
sizes = pd.Series([2] * self.adata.n_obs, index=self.adata.obs.index)
sizes[
self.adata.obs[f"{self.predict_key}_predictions"]
!= self.adata.obs[self.predict_key]
] = 20
sc.pl.umap(
self.adata,
color=[self.predict_key, f"{self.predict_key}_predictions"],
size=sizes,
)
sc.pl.umap(self.adata, color=["misclassified", "is_test"], size=sizes)
sc.pl.umap(
self.adata,
color=["misclassified_and_test", "dca_split"],
size=sizes,
)
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/predictor.py
| 0.817174 | 0.439747 |
predictor.py
|
pypi
|
import tensorflow as tf
from keras.engine.base_layer import InputSpec
from keras.engine.topology import Layer
from keras.layers import Dense, Lambda
from tensorflow.compat.v1.keras import backend as K
class ConstantDispersionLayer(Layer):
"""
An identity layer which allows us to inject extra parameters
such as dispersion to Keras models
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.theta = self.add_weight(
shape=(1, input_shape[1]),
initializer="zeros",
trainable=True,
name="theta",
)
self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4)
super().build(input_shape)
def call(self, x):
return tf.identity(x)
def compute_output_shape(self, input_shape):
return input_shape
class SliceLayer(Layer):
def __init__(self, index, **kwargs):
self.index = index
super().__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError("Input should be a list")
super().build(input_shape)
def call(self, x):
assert isinstance(x, list), "SliceLayer input is not a list"
return x[self.index]
def compute_output_shape(self, input_shape):
return input_shape[self.index]
class ElementwiseDense(Dense):
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
assert (input_dim == self.units) or (
self.units == 1
), "Input and output dims are not compatible"
# shape=(input_units, ) makes this elementwise bcs of broadcasting
self.kernel = self.add_weight(
shape=(self.units,),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
# use * instead of tf.matmul, we need broadcasting here
output = inputs * self.kernel
if self.use_bias:
output = output + self.bias
if self.activation is not None:
output = self.activation(output)
return output
nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))
ColwiseMultLayer = Lambda(
lambda l: l[0] * tf.reshape(l[1], (-1, 1)), name="reconstruction"
)
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/layers.py
| 0.916987 | 0.428532 |
layers.py
|
pypi
|
import json
import os
import pickle
import keras.optimizers as opt
import numpy as np
from hyperopt import Trials, fmin, hp, tpe
from kopt import CompileFN, test_fn
from . import io
from .network import AE_types
def hyper(args):
print("entering_hyper")
adata = io.read_dataset(
args.input, transpose=args.transpose, test_split=False
)
hyper_params = {
"data": {
"norm_input_log": hp.choice("d_norm_log", (True, False)),
"norm_input_zeromean": hp.choice("d_norm_zeromean", (True, False)),
"norm_input_sf": hp.choice("d_norm_sf", (True, False)),
},
"model": {
"lr": hp.loguniform("m_lr", np.log(1e-3), np.log(1e-2)),
"ridge": hp.loguniform("m_ridge", np.log(1e-7), np.log(1e-1)),
"l1_enc_coef": hp.loguniform(
"m_l1_enc_coef", np.log(1e-7), np.log(1e-1)
),
"hidden_size": hp.choice(
"m_hiddensize",
(
(64, 32, 64),
(32, 16, 32),
(64, 64),
(32, 32),
(16, 16),
(16,),
(32,),
(64,),
(128,),
),
),
"activation": hp.choice(
"m_activation",
("relu", "selu", "elu", "PReLU", "linear", "LeakyReLU"),
),
"aetype": hp.choice("m_aetype", ("zinb", "zinb-conddisp")),
"batchnorm": hp.choice("m_batchnorm", (True, False)),
"dropout": hp.uniform("m_do", 0, 0.7),
"input_dropout": hp.uniform("m_input_do", 0, 0.8),
},
"fit": {"epochs": args.hyperepoch},
}
def data_fn(norm_input_log, norm_input_zeromean, norm_input_sf):
ad = adata.copy()
ad = io.normalize(
ad,
size_factors=norm_input_sf,
logtrans_input=norm_input_log,
normalize_input=norm_input_zeromean,
)
x_train = {"count": ad.X, "size_factors": ad.obs.size_factors}
y_train = ad.raw.X
return ((x_train, y_train),)
def model_fn(
train_data,
lr,
hidden_size,
activation,
aetype,
batchnorm,
dropout,
input_dropout,
ridge,
l1_enc_coef,
):
net = AE_types[aetype](
train_data[1].shape[1],
hidden_size=hidden_size,
l2_coef=0.0,
l1_coef=0.0,
l2_enc_coef=0.0,
l1_enc_coef=l1_enc_coef,
ridge=ridge,
hidden_dropout=dropout,
input_dropout=input_dropout,
batchnorm=batchnorm,
activation=activation,
init="glorot_uniform",
debug=args.debug,
)
net.build()
net.model.summary()
optimizer = opt.__dict__["RMSprop"](lr=lr, clipvalue=5.0)
net.model.compile(loss=net.loss, optimizer=optimizer)
return net.model
output_dir = os.path.join(args.outputdir, "hyperopt_results")
objective = CompileFN(
"autoencoder_hyperpar_db",
"myexp1",
data_fn=data_fn,
model_fn=model_fn,
loss_metric="loss",
loss_metric_mode="min",
valid_split=0.2,
save_model=None,
save_results=True,
use_tensorboard=False,
save_dir=output_dir,
)
test_fn(objective, hyper_params, save_model=None)
trials = Trials()
best = fmin(
objective,
hyper_params,
trials=trials,
algo=tpe.suggest,
max_evals=args.hypern,
catch_eval_exceptions=True,
)
with open(os.path.join(output_dir, "trials.pickle"), "wb") as f:
pickle.dump(trials, f)
# TODO: map indices in "best" back to choice-based hyperpars before saving
with open(os.path.join(output_dir, "best.json"), "wt") as f:
json.dump(best, f, sort_keys=True, indent=4)
print(best)
# TODO: not just save the best conf but also train the model with these params
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/hyper.py
| 0.450601 | 0.217535 |
hyper.py
|
pypi
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import scipy as sp
import seaborn as sns
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface
nb_zero = lambda t, mu: (t / (mu + t)) ** t
zinb_zero = lambda t, mu, p: p + ((1.0 - p) * ((t / (mu + t)) ** t))
sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x))
logit = lambda x: np.log(x + 1e-7) - np.log(1.0 - x + 1e-7)
tf_logit = lambda x: tf.cast(
tf.math.log(x + 1e-7) - tf.math.log(1.0 - x + 1e-7), "float32"
)
log_loss = lambda pred, label: np.sum(
-(label * np.log(pred + 1e-7))
- ((1.0 - label) * np.log(1.0 - pred + 1e-7))
)
def _lrt(ll_full, ll_reduced, df_full, df_reduced):
# Compute the difference in degrees of freedom.
delta_df = df_full - df_reduced
# Compute the deviance test statistic.
delta_dev = 2 * (ll_full - ll_reduced)
# Compute the p-values based on the deviance and its expection based on the chi-square distribution.
pvals = 1.0 - sp.stats.chi2(delta_df).cdf(delta_dev)
return pvals
def _fitquad(x, y):
coef, res, _, _ = np.linalg.lstsq(
(x**2)[:, np.newaxis], y - x, rcond=None
)
ss_exp = res[0]
ss_tot = np.var(y - x) * len(x)
r2 = 1 - (ss_exp / ss_tot)
# print('Coefs:', coef)
return np.array([coef[0], 1, 0]), r2
def _tf_zinb_zero(mu, t=None):
a, b = tf.Variable([-1.0], dtype="float32"), tf.Variable(
[0.0], dtype="float32"
)
if t is None:
t_log = tf.Variable([-10.0], dtype="float32")
t = tf.math.exp(t_log)
p = tf.math.sigmoid((tf.math.log(mu + 1e-7) * a) + b)
pred = p + ((1.0 - p) * ((t / (mu + t)) ** t))
pred = tf.cast(pred, "float32")
return pred, a, b, t
def _optimize_zinb(mu, dropout, theta=None):
pred, a, b, t = _tf_zinb_zero(mu, theta)
# loss = tf.math.reduce_mean(tf.abs(tf_logit(pred) - tf_logit(dropout)))
loss = tf.compat.v1.losses.log_loss(
labels=dropout.astype("float32"), predictions=pred
)
optimizer = ScipyOptimizerInterface(loss, options={"maxiter": 100})
with tf.compat.v1.Session() as sess:
sess.run(tf.global_variables_initializer())
optimizer.minimize(sess)
ret_a = sess.run(a)
ret_b = sess.run(b)
if theta is None:
ret_t = sess.run(t)
else:
ret_t = t
return ret_a, ret_b, ret_t
def plot_mean_dropout(ad, title, ax, opt_zinb_theta=False, legend_out=False):
expr = ad.X
mu = expr.mean(0)
do = np.mean(expr == 0, 0)
v = expr.var(axis=0)
coefs, r2 = _fitquad(mu, v)
theta = 1.0 / coefs[0]
# zinb fit
coefs = _optimize_zinb(mu, do, theta=theta if not opt_zinb_theta else None)
print(coefs)
# pois_pred = np.exp(-mu)
nb_pred = nb_zero(theta, mu)
zinb_pred = zinb_zero(
coefs[2], mu, sigmoid((np.log(mu + 1e-7) * coefs[0]) + coefs[1])
)
# calculate log loss for all distr.
# pois_ll = log_loss(pois_pred, do)
nb_ll = log_loss(nb_pred, do)
zinb_ll = log_loss(zinb_pred, do)
ax.plot(mu, do, "o", c="black", markersize=1)
ax.set(xscale="log")
# sns.lineplot(mu, pois_pred, ax=ax, color='blue')
sns.lineplot(mu, nb_pred, ax=ax, color="red")
sns.lineplot(mu, zinb_pred, ax=ax, color="green")
ax.set_title(title)
ax.set_ylabel("Empirical dropout rate")
ax.set_xlabel(r"Mean expression")
leg_loc = "best" if not legend_out else "upper left"
leg_bbox = None if not legend_out else (1.02, 1.0)
ax.legend(
[
"Genes",
# r'Poisson $L=%.4f$' % pois_ll,
r"NB($\theta=%.2f)\ L=%.4f$" % ((1.0 / theta), nb_ll),
r"ZINB($\theta=%.2f,\pi=\sigma(%.2f\mu%+.2f)) \ L=%.4f$"
% (1.0 / coefs[2], coefs[0], coefs[1], zinb_ll),
],
loc=leg_loc,
bbox_to_anchor=leg_bbox,
)
zinb_pval = _lrt(-zinb_ll, -nb_ll, 3, 1)
print("p-value: %e" % zinb_pval)
def plot_mean_var(ad, title, ax):
ad = ad.copy()
sc.pp.filter_cells(ad, min_counts=1)
sc.pp.filter_genes(ad, min_counts=1)
m = ad.X.mean(axis=0)
v = ad.X.var(axis=0)
coefs, r2 = _fitquad(m, v)
ax.set(xscale="log", yscale="log")
ax.plot(m, v, "o", c="black", markersize=1)
poly = np.poly1d(coefs)
sns.lineplot(m, poly(m), ax=ax, color="red")
ax.set_title(title)
ax.set_ylabel("Variance")
ax.set_xlabel(r"$\mu$")
sns.lineplot(m, m, ax=ax, color="blue")
ax.legend(
["Genes", r"NB ($\theta=%.2f)\ r^2=%.3f$" % (coefs[0], r2), "Poisson"]
)
return coefs[0]
def plot_zeroinf(ad, title, mean_var_plot=False, opt_theta=True):
if mean_var_plot:
f, axs = plt.subplots(1, 2, figsize=(15, 5))
plot_mean_var(ad, title, ax=axs[0])
plot_mean_dropout(
ad, title, axs[1], opt_zinb_theta=opt_theta, legend_out=True
)
plt.tight_layout()
else:
f, ax = plt.subplots(1, 1, figsize=(10, 5))
plot_mean_dropout(
ad, title, ax, opt_zinb_theta=opt_theta, legend_out=True
)
plt.tight_layout()
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/utils.py
| 0.731346 | 0.59884 |
utils.py
|
pypi
|
try:
from .clust_compute import *
from .dataset import Dataset
from .load import load_runfile
from .model import DCA_Permuted
from .predictor import MLP_Predictor
from .runfile_handler import RunFile
from .workflow import Workflow
except ImportError:
from load import load_runfile
from dataset import Dataset
from predictor import MLP_Predictor
from model import DCA_Permuted
from workflow import Workflow
from runfile_handler import RunFile
from clust_compute import *
import math
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sns
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
confusion_matrix,
plot_confusion_matrix,
)
metrics_list = [
"balanced_accuracy_scores",
"balanced_accuracy_scores_test",
"balanced_accuracy_scores_val",
"balanced_accuracy_scores_train",
"accuracy_scores",
"accuracy_scores_test",
"accuracy_scores_val",
"accuracy_scores_train",
# "silhouette_true",
# "silhouette_pred",
"davies_bouldin_true",
"davies_bouldin_pred",
"nmi",
"batch_entropy_mixing",
]
class AnalysisWorkflow:
def __init__(self, working_dir, id_list):
"""
At the moment, most of the functions might not support workflows coming from different datasets (especially, it's better if every latent spaces has the same number of obs)
"""
self.id_list = id_list
self.working_dir = working_dir
self.runfile_paths = {
wf_id: RunFile(
working_dir=working_dir, workflow_ID=wf_id
).run_file_path
for wf_id in self.id_list
}
self.workflow_list = {
wf_id: Workflow(working_dir=working_dir, yaml_name=rf_path)
for wf_id, rf_path in self.runfile_paths.items()
}
self.true_class = dict()
self.pred_class = dict()
self.corrected_counts = dict()
self.balanced_accuracy_scores = dict()
self.accuracy_scores = dict()
self.metric_results_path = (
self.working_dir + "/results/metric_results.csv"
)
self.metric_results_df = pd.read_csv(
self.metric_results_path, index_col="index"
)
self.runfile_csv_path = (
self.working_dir + "/runfile_dir/runfile_list.csv"
)
print(self.id_list)
print(self.runfile_csv_path)
self.runfile_df = pd.read_csv(
self.runfile_csv_path, index_col="index"
).loc[self.id_list, :]
self.metrics_computed_solo = []
self.metrics_computed_solo_df = pd.DataFrame()
self.metrics_table = self.metric_results_df.copy().loc[self.id_list, :]
def load_metrics(self):
"""
Loads the metrics from the result folder
"""
self.metrics_computed_solo = [
pd.read_csv(wf.metric_path, index_col=0)
for wf in self.workflow_list.values()
] # The metrics as saved in the result folder
self.metrics_computed_solo_df = pd.concat(
self.metrics_computed_solo
) # Merge them into a dataframe
self.metrics_computed_solo_df.index.name = "index"
def update_metrics(self):
"""
Updates the metrics in the metrics table
"""
self.metric_results_df.update(self.metrics_computed_solo_df)
self.metric_results_df.to_csv(self.metric_results_path)
self.metrics_table = self.metric_results_df.copy().loc[self.id_list, :]
def load_corrected_counts(self):
for workflow in self.workflow_list.values():
workflow.load_corrected()
self.corrected_counts = {
wf_id: workflow.corrected_count
for wf_id, workflow in self.workflow_list.items()
}
def load_latent_spaces(self, verbose=False):
def verbose_print(to_print):
if verbose:
print(to_print)
failed_ID = []
for ID, workflow in self.workflow_list.items():
verbose_print(f"workflow {ID} loaded")
workflow.load_results()
try:
self.true_class[ID] = workflow.latent_space.obs[
f"true_{workflow.class_key}"
]
except:
failed_ID.append(ID)
if failed_ID:
failed_ID = [str(i) for i in failed_ID]
print(
Exception(
f'The following ID didnt have a true_class_key obs : {"_".join(failed_ID)}'
)
)
return failed_ID # returns th wrong ids
self.pred_class = {
wf_id: workflow.latent_space.obs[f"{workflow.class_key}_pred"]
for wf_id, workflow in self.workflow_list.items()
}
self.latent_spaces = {
wf_id: workflow.latent_space
for wf_id, workflow in self.workflow_list.items()
}
self.pred_tables = {
wf_id: workflow.latent_space.obs.loc[
:,
[
f"true_{workflow.class_key}",
f"{workflow.class_key}_pred",
"train_split",
],
]
for wf_id, workflow in self.workflow_list.items()
}
# for wf_id,workflow in self.workflow_list.items():
# class_key = self.workflow_list[wf_id].class_key
# true_celltype = self.workflow_list[wf_id].true_celltype
# false_celltype = self.workflow_list[wf_id].false_celltype
# false_true_only = pd.Series(['Other'] * workflow.latent_space.n_obs, index = workflow.latent_space.obs.index)
# false_true_only[workflow.latent_space.obs[f'true_{class_key}'] == true_celltype] = true_celltype
# false_true_only[workflow.latent_space.obs[f'true_{class_key}'] == false_celltype] = false_celltype
# false_true_only[workflow.latent_space.obs['faked']] = f'fake {false_celltype} - true {true_celltype}'
# workflow.latent_space.obs['false_true_only'] = false_true_only
# false_only = pd.Series(['Other'] * workflow.latent_space.n_obs, index = workflow.latent_space.obs.index)
# false_only[workflow.latent_space.obs['faked']] = f'fake {false_celltype} - true {true_celltype}'
# workflow.latent_space.obs['false_only'] = false_only
# self.latent_spaces = {wf_id: workflow.latent_space for wf_id, workflow in self.workflow_list.items()}
def subsample_true_false(self, keep="true and false"):
"""
Use only when celltypes have been faked in the workflows. Keeps only a few cells hein
keep is either 'true', 'false' or 'true and false'.
"""
for wf_id, latent_space in self.latent_spaces.items():
class_key = self.workflow_list[wf_id].class_key
if keep == "true":
keep_celltype = [self.workflow_list[wf_id].true_celltype]
elif keep == "false":
keep_celltype = [self.workflow_list[wf_id].false_celltype]
elif keep == "true and false":
keep_celltype = [
self.workflow_list[wf_id].false_celltype,
self.workflow_list[wf_id].true_celltype,
]
self.latent_spaces[wf_id] = latent_space[
latent_space.obs[f"true_{class_key}"].isin(keep_celltype), :
].copy()
self.workflow_list[wf_id].latent_space = latent_space[
latent_space.obs[f"true_{class_key}"].isin(keep_celltype), :
].copy()
self.true_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"true_{workflow.class_key}"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"{workflow.class_key}_pred"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_tables = {
wf_id: self.latent_spaces[wf_id].obs.loc[
:,
[
f"true_{workflow.class_key}",
f"{workflow.class_key}_pred",
"train_split",
],
]
for wf_id, workflow in self.workflow_list.items()
}
def subsample_on_obs(self, obs_filter, condition=True):
"""
Reduces every workflow on a common obs condition. If obs_filter is a boolean Series, no need to specify condition.
The True and False value might be different between workflows therefore, workflows won't share the same cells after this operation
"""
if (type(condition) == str) or (type(condition) == bool):
condition = [condition]
for wf_id, latent_space in self.latent_spaces.items():
self.latent_spaces[wf_id] = latent_space[
latent_space.obs[obs_filter].isin(condition), :
].copy()
self.workflow_list[wf_id].latent_space = latent_space[
latent_space.obs[obs_filter].isin(condition), :
].copy()
self.true_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"true_{workflow.class_key}"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"{workflow.class_key}_pred"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_tables = {
wf_id: self.latent_spaces[wf_id].obs.loc[
:,
[
f"true_{workflow.class_key}",
f"{workflow.class_key}_pred",
"train_split",
],
]
for wf_id, workflow in self.workflow_list.items()
}
def subsample_on_index(self, cell_index):
"""
Reduces every workflow to cells specified in cell_index
"""
for wf_id, latent_space in self.latent_spaces.items():
self.latent_spaces[wf_id] = latent_space[cell_index, :].copy()
self.workflow_list[wf_id].latent_space = latent_space[
cell_index, :
].copy()
self.true_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"true_{workflow.class_key}"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_class = {
wf_id: self.latent_spaces[wf_id].obs[
f"{workflow.class_key}_pred"
]
for wf_id, workflow in self.workflow_list.items()
}
self.pred_tables = {
wf_id: self.latent_spaces[wf_id].obs.loc[
:,
[
f"true_{workflow.class_key}",
f"{workflow.class_key}_pred",
"train_split",
],
]
for wf_id, workflow in self.workflow_list.items()
}
def add_obs_metadata(self, metadata):
"""
Add metadata to every latent_space. metadata should be a pandas DataFrame with correct number of observations
"""
for wf_id, latent_space in self.latent_spaces.items():
assert (
latent_space.obs.shape[0] == metadata.shape[0]
), "metadata doesn't have the same number of cell as the latent spaces"
latent_space.obs = pd.concat([latent_space.obs, metadata], axis=1)
def compute_metrics_solo(self, ID, add_to_csv=True, save_adata=True):
"""
computes the metrics of interest for one sample and returns them in a pd.Series object.
It is then stored as a 1 row dataframe (for convenience when merging with metrics_table) in the uns['metrics'] section of the adata.
ID : ID of the adata to compute the metric on
add_to_csv : wether to add it to the metrics_table csv
save_adata : wether to save the new adata object created
"""
wf = self.workflow_list[ID]
latent_space = self.latent_spaces[ID]
if os.path.exists(wf.metric_path):
metric_series = pd.read_csv(wf.metric_path, index_col=0)
else:
try:
metric_series = latent_space.uns["metrics"]
except:
metric_series = pd.Series(index=metrics_list, name=ID)
metric_series = pd.DataFrame(
[metric_series.values], index=[ID], columns=metric_series.index
)
# metric_series = pd.read_csv(wf.metric_path, index_col = 0)
metric_clone = metric_series.copy()
print(f"computing ID {ID}")
for metric in metrics_list:
if (
(metric not in metric_series.columns)
or (metric_series.isna().loc[ID, metric])
or (metric_series.loc[ID, metric] == "NC")
):
print(f"computing metric : {metric}")
if metric == "balanced_accuracy_scores":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_test":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "test"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_val":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "val"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_train":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "train"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores":
try:
metric_series.loc[ID, metric] = accuracy(
adata=latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_test":
try:
metric_series.loc[ID, metric] = accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "test"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_val":
try:
metric_series.loc[ID, metric] = accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "val"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_train":
try:
metric_series.loc[ID, metric] = accuracy(
adata=latent_space[
latent_space.obs["train_split"] == "train"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_true":
try:
metric_series.loc[ID, metric] = silhouette(
adata=latent_space,
partition_key=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_pred":
try:
metric_series.loc[ID, metric] = silhouette(
adata=latent_space,
partition_key=f"{wf.class_key}_pred",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_true":
try:
metric_series.loc[ID, metric] = davies_bouldin(
adata=latent_space,
partition_key=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_pred":
try:
metric_series.loc[ID, metric] = davies_bouldin(
adata=latent_space,
partition_key=f"{wf.class_key}_pred",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "nmi":
try:
metric_series.loc[ID, metric] = nmi(
adata=latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "batch_entropy_mixing":
for b_k in ["manip", "sample"]:
if b_k in latent_space.obs.columns:
batch_key = b_k
try:
metric_series.loc[ID, metric] = batch_entropy_mixing(
adata=latent_space, batch_key=batch_key
)
except:
metric_series.loc[ID, metric] = "NC"
if not metric_series.equals(metric_clone):
metric_series.to_csv(wf.metric_path)
print(f"metric_series saved for ID {ID}")
if add_to_csv:
self.metric_results_df.update(metric_series)
self.metric_results_df.to_csv(self.metric_results_path)
self.metrics_table = self.metric_results_df.copy().loc[
self.id_list, :
]
if save_adata:
latent_space.uns["metrics"] = metric_series
latent_space.write(wf.adata_path)
return metric_series
def compute_metrics_csv(self):
for ID, wf in self.workflow_list.items():
metric_series = self.metric_results_df.loc[ID, metrics_list].copy()
metric_series = pd.DataFrame(
[metric_series.values], index=[ID], columns=metric_series.index
)
# metric_series = pd.read_csv(wf.metric_path, index_col = 0)
metric_clone = metric_series.copy()
print(f"computing ID {ID}")
for metric in metrics_list:
if (metric not in metric_series.columns) or (
metric_series.isna().loc[ID, metric]
):
print("computing metric")
if metric == "balanced_accuracy_scores":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=wf.latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_test":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"]
== "test"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_val":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"] == "val"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "balanced_accuracy_scores_train":
try:
metric_series.loc[ID, metric] = balanced_accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"]
== "train"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores":
try:
metric_series.loc[ID, metric] = accuracy(
adata=wf.latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_test":
try:
metric_series.loc[ID, metric] = accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"]
== "test"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_val":
try:
metric_series.loc[ID, metric] = accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"] == "val"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "accuracy_scores_train":
try:
metric_series.loc[ID, metric] = accuracy(
adata=wf.latent_space[
wf.latent_space.obs["train_split"]
== "train"
],
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_true":
try:
metric_series.loc[ID, metric] = silhouette(
adata=wf.latent_space,
partition_key=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_pred":
try:
metric_series.loc[ID, metric] = silhouette(
adata=wf.latent_space,
partition_key=f"{wf.class_key}_pred",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_true":
try:
metric_series.loc[ID, metric] = davies_bouldin(
adata=wf.latent_space,
partition_key=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "silhouette_pred":
try:
metric_series.loc[ID, metric] = davies_bouldin(
adata=wf.latent_space,
partition_key=f"{wf.class_key}_pred",
)
except:
metric_series.loc[ID, metric] = "NC"
elif metric == "nmi":
try:
metric_series.loc[ID, metric] = nmi(
adata=wf.latent_space,
partition_key=f"{wf.class_key}_pred",
reference=f"true_{wf.class_key}",
)
except:
metric_series.loc[ID, metric] = "NC"
if not metric_series.equals(metric_clone):
metric_series.to_csv(wf.metric_path)
print(f"metric_series saved for ID {ID}")
self.metric_results_df.update(metric_series)
self.metric_results_df.to_csv(self.metric_results_path)
self.metrics_table = self.metric_results_df.copy().loc[self.id_list, :]
# def compute_metrics(self, verbose=False): # y_iterate = lambda:zip(self.workflow_list.keys(),self.true_class.values(),self.pred_class.values())
# def verbose_print(to_print):
# if verbose:
# print(to_print)
# verbose_print('computing balanced_accuracy_scores')
# self.balanced_accuracy_scores = {wf_id: balanced_accuracy(adata=workflow.latent_space,
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.balanced_accuracy_scores = pd.Series(self.balanced_accuracy_scores, name = 'balanced_accuracy_scores')
# self.balanced_accuracy_scores_test = {wf_id: balanced_accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'test'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.balanced_accuracy_scores_test = pd.Series(self.balanced_accuracy_scores_test, name = 'balanced_accuracy_scores_test')
# self.balanced_accuracy_scores_val = {wf_id: balanced_accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'val'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.balanced_accuracy_scores_val = pd.Series(self.balanced_accuracy_scores_val, name = 'balanced_accuracy_scores_val')
# self.balanced_accuracy_scores_train = {wf_id: balanced_accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'train'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.balanced_accuracy_scores_train = pd.Series(self.balanced_accuracy_scores_train, name = 'balanced_accuracy_scores_train')
# verbose_print('computing accuracy_scores')
# self.accuracy_scores = {wf_id: accuracy(adata=workflow.latent_space,
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.accuracy_scores = pd.Series(self.accuracy_scores, name = 'accuracy_scores')
# self.accuracy_scores_test = {wf_id: accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'test'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.accuracy_scores_test = pd.Series(self.accuracy_scores_test, name = 'accuracy_scores_test')
# self.accuracy_scores_val = {wf_id: accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'val'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.accuracy_scores_val = pd.Series(self.accuracy_scores_val, name = 'accuracy_scores_val')
# self.accuracy_scores_train = {wf_id: accuracy(adata=workflow.latent_space[workflow.latent_space.obs['train_split'] == 'train'],
# partition_key=f'{workflow.class_key}_pred',
# reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# self.accuracy_scores_train = pd.Series(self.accuracy_scores_train, name = 'accuracy_scores_train')
# # verbose_print('computing silhouette_true')
# # self.silhouette_true = {wf_id: silhouette(adata=workflow.latent_space,
# # partition_key=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# # self.silhouette_true = pd.Series(self.silhouette_true, name = 'silhouette_true')
# # verbose_print('computing silhouette_pred')
# # self.silhouette_pred = {wf_id: silhouette(adata=workflow.latent_space,
# # partition_key=f'{workflow.class_key}_pred') for wf_id, workflow in self.workflow_list.items()}
# # self.silhouette_pred = pd.Series(self.silhouette_pred, name = 'silhouette_pred')
# # verbose_print('computing davies_bouldin_true')
# # self.davies_bouldin_true = {wf_id: davies_bouldin(adata=workflow.latent_space,
# # partition_key=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# # self.davies_bouldin_true = pd.Series(self.davies_bouldin_true, name = 'davies_bouldin_true')
# # verbose_print('computing davies_bouldin_pred')
# # self.davies_bouldin_pred = {wf_id: davies_bouldin(adata=workflow.latent_space,
# # partition_key=f'{workflow.class_key}_pred') for wf_id, workflow in self.workflow_list.items()}
# # self.davies_bouldin_pred = pd.Series(self.davies_bouldin_pred, name = 'davies_bouldin_pred')
# # verbose_print('computing nmi')
# # self.nmi = {wf_id: nmi(adata=workflow.latent_space,
# # partition_key=f'{workflow.class_key}_pred',
# # reference=f'true_{workflow.class_key}') for wf_id, workflow in self.workflow_list.items()}
# # self.nmi = pd.Series(self.nmi, name = 'nmi')
# metrics_table_add = pd.concat([ self.accuracy_scores,
# self.accuracy_scores_test,
# self.accuracy_scores_val,
# self.accuracy_scores_train,
# self.balanced_accuracy_scores,
# self.balanced_accuracy_scores_test,
# self.balanced_accuracy_scores_val,
# self.balanced_accuracy_scores_train,
# # self.silhouette_true,
# # self.silhouette_pred,
# # self.davies_bouldin_true,
# # self.davies_bouldin_pred,
# # self.nmi
# ], axis = 1)
# self.metrics_table = pd.concat([metrics_table_add, self.metrics_table], axis=1)
# self.metrics_table = self.metrics_table.loc[:,~self.metrics_table.columns.duplicated()].copy() # Removing duplicate cols
def compute_clustering_metrics(self, cluster_key, verbose=False):
"""
Compute the clustering metrics for a selected cluster for example batches
"""
self.davies_bouldin_cluster = {
wf_id: davies_bouldin(
adata=workflow.latent_space, partition_key=cluster_key
)
for wf_id, workflow in self.workflow_list.items()
}
self.davies_bouldin_cluster = pd.Series(
self.davies_bouldin_cluster, name=f"davies_bouldin_{cluster_key}"
)
self.silhouette_cluster = {
wf_id: silhouette(
adata=workflow.latent_space,
partition_key=f"true_{workflow.class_key}",
)
for wf_id, workflow in self.workflow_list.items()
}
self.silhouette_cluster = pd.Series(
self.silhouette_cluster, name=f"silhouette_{cluster_key}"
)
metrics_table_add = pd.concat(
[self.davies_bouldin_cluster, self.silhouette_cluster], axis=1
)
self.metrics_table = pd.concat(
[metrics_table_add, self.metrics_table], axis=1
)
def plot_confusion_matrices_single(
self, workflow_ID, test_only, normalize="true", ax=None, **kwargs
):
if not test_only:
y_true = self.true_class[workflow_ID]
y_pred = self.pred_class[workflow_ID]
else:
y_true = self.true_class[workflow_ID][
self.pred_tables[workflow_ID]["train_split"] == "test"
]
y_pred = self.pred_class[workflow_ID][
self.pred_tables[workflow_ID]["train_split"] == "test"
]
labels = sorted(list(set(y_true.unique()) | set(y_pred.unique())))
conf_mat = confusion_matrix(
y_true, y_pred, labels=labels, normalize=normalize
)
confusion_to_plot = pd.DataFrame(
conf_mat, index=labels, columns=labels
)
plt.figure(figsize=(15, 15))
ax = sns.heatmap(confusion_to_plot, annot=True, ax=ax, **kwargs)
return confusion_to_plot, ax
def compute_confusion_matrix(
self, workflow_ID, test_only, normalize="true"
):
if not test_only:
y_true = self.true_class[workflow_ID]
y_pred = self.pred_class[workflow_ID]
else:
y_true = self.true_class[workflow_ID][
self.pred_tables[workflow_ID]["train_split"] == "test"
]
y_pred = self.pred_class[workflow_ID][
self.pred_tables[workflow_ID]["train_split"] == "test"
]
labels = sorted(list(set(y_true.unique()) | set(y_pred.unique())))
conf_mat = confusion_matrix(
y_true, y_pred, labels=labels, normalize=normalize
)
confusion_to_plot = pd.DataFrame(
conf_mat, index=labels, columns=labels
)
return confusion_to_plot
def plot_average_conf_matrix(
self, split_by, test_only, normalize="true", **kwargs
):
"""
split_by can be a list of obs columns in which case they'll be regrouped in a meta split_by
"""
metrics_to_plot = self.metrics_table.copy()
if (
type(split_by) == list
): # Creating the "meta split by" ie potentially a combination of several obs fields
meta_split_by = "-".join(split_by)
col_to_plot = pd.Series(
metrics_to_plot[split_by[0]].astype(str),
index=metrics_to_plot.index,
)
for h in split_by[1:]:
col_to_plot = (
col_to_plot + "-" + metrics_to_plot[h].astype(str)
)
metrics_to_plot[meta_split_by] = col_to_plot
else:
meta_split_by = split_by
split_id = {}
for split in metrics_to_plot[meta_split_by].unique():
print(split)
split_id[str(split)] = metrics_to_plot.loc[
metrics_to_plot[meta_split_by] == split, "workflow_ID"
]
confusion_dict = {
wf_id: self.compute_confusion_matrix(wf_id, test_only, normalize)
for wf_id in self.workflow_list.keys()
}
avg_conf_dict = {}
for split, IDs in split_id.items():
conf_concat = pd.concat([confusion_dict[ID] for ID in IDs])
by_row_index = conf_concat.groupby(conf_concat.index)
avg_conf = (
by_row_index.mean()
) # Averaging the confusion matrices by split_by conditions. Note that we're averaging normalized values (depending on 'normalize').
avg_conf_dict[split] = avg_conf
for split, mat in avg_conf_dict.items():
plt.figure(figsize=(15, 15))
ax = sns.heatmap(mat, annot=True, **kwargs)
split_title = split.split("-")
plt.title(
"average conf matrix, split by "
+ " ".join(
[f"{sp}={s}" for sp, s in zip(split_by, split_title)]
)
)
def plot_confusion_matrices_multiple(
self, IDs_to_plot="all", params=["dataset_name"]
):
"""
This plots confusion matrices
IDs_to_plot should be either a list of the IDs to plot or 'all' in which case all the indices of the object are plotted
"""
if IDs_to_plot == "all":
IDs_to_plot = list(self.workflow_list.keys())
n_plots = len(IDs_to_plot)
f, axes = plt.subplots(n_plots, 2, figsize=(30, 12.5 * n_plots))
i = 0
for wf_id in IDs_to_plot:
self.plot_prediction_results_single(
workflow_ID=wf_id,
test_only=False,
ax=axes[i, 0],
normalize="true",
)
axes[i, 0].set_title(
[
f"{param} = {getattr(self.workflow_list[wf_id], param)}"
for param in params
]
)
axes[i, 0].set_xlabel(
f"worflow_{wf_id}, acc={round(self.accuracy_scores[wf_id],2)}, weighted_acc={round(self.balanced_accuracy_scores[wf_id],2)}"
)
self.plot_prediction_results_single(
workflow_ID=wf_id,
test_only=True,
ax=axes[i, 1],
normalize="true",
)
axes[i, 1].set_xlabel(
f"worflow_{wf_id}_test_only, acc={round(self.accuracy_scores[wf_id],2)}, weighted_acc={round(self.balanced_accuracy_scores[wf_id],2)}"
)
i += 1
return f
def plot_umaps(self, IDs_to_plot="all", params=["dataset"], **kwargs):
"""
params are the params to specify in the umap title
"""
if IDs_to_plot == "all":
IDs_to_plot = self.workflow_list.keys()
for wf_id in IDs_to_plot:
sc.pl.umap(
self.latent_spaces[wf_id],
title=f"workflow_{wf_id}"
+ str(
[
f"{param} = {getattr(self.workflow_list[wf_id], param)}"
for param in params
]
),
**kwargs,
)
def plot_comparative_boxplot(
self, x, y, hue, title=None, IDs_to_plot="all", **kwargs
):
metrics_to_plot = self.metrics_table.copy()
if IDs_to_plot != "all":
metrics_to_plot = metrics_to_plot.loc[
metrics_to_plot["workflow_ID"].isin(IDs_to_plot)
]
print(metrics_to_plot)
if type(hue) == list:
meta_hue = "_".join(hue)
col_to_plot = pd.Series(
metrics_to_plot[hue[0]].astype(str),
index=metrics_to_plot.index,
)
for h in hue[1:]:
col_to_plot = (
col_to_plot + "_" + metrics_to_plot[h].astype(str)
)
metrics_to_plot[meta_hue] = col_to_plot
else:
meta_hue = hue
sns.set(rc={"figure.figsize": (11.7, 8.27)})
sns.boxplot(x=x, y=y, hue=meta_hue, data=metrics_to_plot, **kwargs)
plt.xticks(rotation=90)
plt.legend(title=x)
if title:
plt.title(title)
else:
plt.title(f"{y} split by {x}")
def plot_class_confidence(self, ID, mode="box", layout=True, **kwargs):
"""
mode is either bar (average) or box (boxplot)
"""
adata = self.latent_spaces[ID]
workflow = self.workflow_list[ID]
true_key = f"true_{workflow.class_key}"
pred_key = f"{workflow.class_key}_pred"
y_pred_raw = pd.DataFrame(
adata.obsm["y_pred_raw"],
index=adata.obs_names,
columns=adata.uns["prediction_decoder"],
) # n_obs x n_class matrix
y_pred_raw = y_pred_raw[adata.obs[true_key].cat.categories]
class_df_dict = {
ct: y_pred_raw.loc[adata.obs[true_key] == ct, :]
for ct in adata.obs[true_key].cat.categories
}
mean_acc_dict = {
ct: df.mean(axis=0) for ct, df in class_df_dict.items()
}
n = math.ceil(np.sqrt(len(adata.obs[true_key].cat.categories)))
f, axes = plt.subplots(n, n, constrained_layout=layout)
# plt.constrained_layout()
i = 0
for ct, df in class_df_dict.items():
r = i // n
c = i % n
ax = axes[r, c]
if mode == "box":
df.plot.box(
ax=ax,
figsize=(20, 15),
ylim=(-0.01, 1.01),
xlabel=adata.obs[true_key].cat.categories,
**kwargs,
)
if mode == "bar":
df = mean_acc_dict[ct]
df.plot.bar(ax=ax, figsize=(20, 15), ylim=(0, 1), **kwargs)
ax.tick_params(axis="x", labelrotation=90)
ax.set_title(ct + f"- {class_df_dict[ct].shape[0]} cells")
i += 1
def plot_size_conf_correlation(self, ID):
adata = self.latent_spaces[ID]
workflow = self.workflow_list[ID]
true_key = f"true_{workflow.class_key}"
pred_key = f"{workflow.class_key}_pred"
y_pred_raw = pd.DataFrame(
adata.obsm["y_pred_raw"],
index=adata.obs_names,
columns=adata.uns["prediction_decoder"],
) # n_obs x n_class matrix
class_df_dict = {
ct: y_pred_raw.loc[adata.obs[true_key] == ct, :]
for ct in adata.obs[true_key].cat.categories
} # The order of the plot is defined here (adata.obs['true_louvain'].cat.categories)
mean_acc_dict = {
ct: df.mean(axis=0) for ct, df in class_df_dict.items()
}
f, axes = plt.subplots(1, 2, figsize=(10, 5))
f.suptitle("correlation between confidence and class size")
pd.Series(
{ct: class_df_dict[ct].shape[0] for ct in mean_acc_dict.keys()}
).plot.bar(ax=axes[0])
pd.Series(
{ct: mean_acc_dict[ct][ct] for ct in mean_acc_dict.keys()}
).plot.bar(ax=axes[1])
def plot_size_conf_correlation(self, ID):
adata = self.latent_spaces[ID]
workflow = self.workflow_list[ID]
true_key = f"true_{workflow.class_key}"
pred_key = f"{workflow.class_key}_pred"
y_pred_raw = pd.DataFrame(
adata.obsm["y_pred_raw"],
index=adata.obs_names,
columns=adata.uns["prediction_decoder"],
) # n_obs x n_class matrix
class_df_dict = {
ct: y_pred_raw.loc[adata.obs[true_key] == ct, :]
for ct in adata.obs[true_key].cat.categories
} # The order of the plot is defined here (adata.obs['true_louvain'].cat.categories)
mean_acc_dict = {
ct: df.mean(axis=0) for ct, df in class_df_dict.items()
}
f, axes = plt.subplots(1, 2, figsize=(10, 5))
f.suptitle("correlation between confidence and class size")
pd.Series(
{ct: class_df_dict[ct].shape[0] for ct in mean_acc_dict.keys()}
).plot.bar(ax=axes[0])
pd.Series(
{ct: mean_acc_dict[ct][ct] for ct in mean_acc_dict.keys()}
).plot.bar(ax=axes[1])
def plot_class_accuracy(self, ID, layout=True, **kwargs):
"""
mode is either bar (average) or box (boxplot)
"""
adata = self.latent_spaces[ID]
workflow = self.workflow_list[ID]
true_key = f"true_{workflow.class_key}"
pred_key = f"{workflow.class_key}_pred"
labels = adata.obs[true_key].cat.categories
conf_mat = pd.DataFrame(
confusion_matrix(
adata.obs[true_key], adata.obs[pred_key], labels=labels
),
index=labels,
columns=labels,
)
n = math.ceil(np.sqrt(len(labels)))
f, axes = plt.subplots(n, n, constrained_layout=layout)
f.suptitle("Accuracy & confusion by celltype")
# plt.constrained_layout()
i = 0
for ct in labels:
r = i // n
c = i % n
ax = axes[r, c]
df = conf_mat.loc[ct, :] / conf_mat.loc[ct, :].sum()
df.plot.bar(ax=ax, figsize=(20, 15), ylim=(0, 1), **kwargs)
ax.tick_params(axis="x", labelrotation=90)
ax.set_title(ct + f"- {conf_mat.loc[ct,:].sum()} cells")
i += 1
def plot_history(self, split_by, DR_or_pred, value, params=None):
"""
Plots various train history information.
split_by : parameter to split the plots by
value : one of 'loss', 'lr' (DR only), 'acc' , 'val_acc', 'val_loss' (pred only)
params : parameters to put in the legend
"""
metrics_to_plot = self.metrics_table.copy()
if type(split_by) == list:
meta_split_by = "_".join(split_by)
col_to_plot = pd.Series(
metrics_to_plot[split_by[0]].astype(str),
index=metrics_to_plot.index,
)
for h in split_by[1:]:
col_to_plot = (
col_to_plot + "_" + metrics_to_plot[h].astype(str)
)
metrics_to_plot[meta_split_by] = col_to_plot
else:
meta_split_by = split_by
split_id = {}
for split in metrics_to_plot[meta_split_by].unique():
split_id[split] = metrics_to_plot.loc[
metrics_to_plot[meta_split_by] == split, "workflow_ID"
]
for split, IDs in split_id.items():
plt.figure(figsize=(10, 8))
sub_metrics = self.runfile_df.loc[
self.runfile_df["workflow_ID"].isin(IDs)
]
legend = pd.Series("", index=sub_metrics.index)
if params:
for param in params:
legend += (
param + "_" + sub_metrics[param].astype(str) + "_"
)
else:
nunique = sub_metrics.nunique()
cols_to_leg = list(nunique[nunique > 1].index)
for col in cols_to_leg:
legend += col + "_" + sub_metrics[col].astype(str) + "_"
for ID in IDs:
DR_hist_path = self.workflow_list[ID].DR_history_path
pred_hist_path = self.workflow_list[ID].pred_history_path
if DR_or_pred == "DR":
try:
with open(DR_hist_path, "rb") as f:
data = pickle.load(f)
except FileNotFoundError:
print(f"DR hist was not found for ID {ID}")
elif DR_or_pred == "pred":
try:
with open(pred_hist_path, "rb") as f:
data = pickle.load(f)
except FileNotFoundError:
print(f"pred hist was not found for ID {ID}")
plt.plot(data[value])
plt.title(split)
plt.tight_layout()
plt.legend(legend, loc="center left", bbox_to_anchor=(1, 0.5))
def plot_single_pred_hist(self, IDs_to_plot=None, params=None):
metrics_to_plot = self.metrics_table.copy()
if not IDs_to_plot:
IDs_to_plot = metrics_to_plot["workflow_ID"]
for ID in IDs_to_plot:
plt.figure()
pred_hist_path = self.workflow_list[ID].pred_history_path
try:
with open(pred_hist_path, "rb") as f:
data = pickle.load(f)
except FileNotFoundError:
print(f"pred hist was not found for ID {ID}")
pd.DataFrame(data).plot(figsize=(8, 5))
if params:
title = ""
for param in params:
title += (
param
+ "_"
+ metrics_to_plot.loc[ID, param].astype(str)
+ "_"
)
else:
title = "worklow_ID_" + str(
metrics_to_plot.loc[ID, "workflow_ID"]
)
plt.title(title)
plt.tight_layout()
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/analysis.py
| 0.621541 | 0.170301 |
analysis.py
|
pypi
|
import yaml
def load_runfile(runfile_path):
workflow_ID = 1
yaml_config_dict = {
"dataset": "dataset", # keys are the name in the runfile, values are the "official" keys
"dataset_name": "dataset_name",
"filter_min_counts": "filter_min_counts",
"normalize_size_factors": "normalize_size_factors",
"scale_input": "scale_input",
"logtrans_input": "logtrans_input",
"class_key": "class_key",
"model_spec": "model_spec",
"ae_type": "ae_type",
"hidden_size": "hidden_size",
"hidden_dropout": "hidden_dropout",
"batchnorm": "batchnorm",
"activation": "activation",
"init": "init",
"training_spec": "training_spec",
"epochs": "epochs",
"reduce_lr": "reduce_lr",
"early_stop": "early_stop",
"batch_size": "batch_size",
"optimizer": "optimizer",
"verbose": "verbose",
"threads": "threads",
"learning_rate": "learning_rate",
"n_perm": "n_perm",
"permute": "permute",
"semi_sup": "semi_sup",
"unlabelled_category": "unlabelled_category",
"train_split": "train_split",
"mode": "mode",
"pct_split": "pct_split",
"obs_key": "obs_key",
"n_keep": "n_keep",
"keep_obs": "keep_obs",
"random_seed": "random_seed",
"obs_subsample": "obs_subsample",
"train_test_random_seed": "train_test_random_seed",
"fake_annotation": "fake_annotation",
"true_celltype": "true_celltype",
"false_celltype": "false_celltype",
"pct_false": "pct_false",
"predictor_activation": "predictor_activation",
}
yml = open(runfile_path)
parsed_yml = yaml.load(yml, Loader=yaml.FullLoader)
n_hidden = parsed_yml["model_spec"]["hidden_size"]
for key in parsed_yml.keys():
if type(parsed_yml[key]) == dict:
for ki in parsed_yml[key].keys():
if parsed_yml[key][ki] == "None":
parsed_yml[key][ki] = None
if (
type(parsed_yml[key][ki]) == str
and "(" in parsed_yml[key][ki]
): # Tuples are parsed as string, we convert them back to tuples
parsed_yml[key][ki] = tuple(
[
int(i)
for i in parsed_yml[key][ki]
.strip("(")
.strip(")")
.replace(" ", "")
.split(",")
]
)
return parsed_yml
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/load.py
| 0.437824 | 0.394318 |
load.py
|
pypi
|
import os
import random
import shutil
import tempfile
import anndata
import numpy as np
import scanpy as sc
try:
import tensorflow as tf
except ImportError:
raise ImportError(
"DCA requires tensorflow. Please follow instructions"
" at https://www.tensorflow.org/install/ to install"
" it."
)
from .io import normalize, read_dataset
from .network import AE_types
from .train import train
def dca(
adata,
mode="denoise",
ae_type="zinb-conddisp",
normalize_per_cell=True,
scale=True,
log1p=True,
hidden_size=(64, 32, 64), # network args
hidden_dropout=0.0,
batchnorm=True,
activation="relu",
init="glorot_uniform",
network_kwds={},
epochs=300, # training args
reduce_lr=10,
early_stop=15,
batch_size=32,
optimizer="RMSprop",
learning_rate=None,
random_state=0,
threads=None,
verbose=False,
training_kwds={},
return_model=False,
return_info=False,
copy=False,
permute=True,
):
"""Deep count autoencoder(DCA) API.
Fits a count autoencoder to the count data given in the anndata object
in order to denoise the data and capture hidden representation of
cells in low dimensions. Type of the autoencoder and return values are
determined by the parameters.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
An anndata file with `.raw` attribute representing raw counts.
mode : `str`, optional. `denoise`(default), or `latent`.
`denoise` overwrites `adata.X` with denoised expression values.
In `latent` mode DCA adds `adata.obsm['X_dca']` to given adata
object. This matrix represent latent representation of cells via DCA.
ae_type : `str`, optional. `zinb-conddisp`(default), `zinb`, `nb-conddisp` or `nb`.
Type of the autoencoder. Return values and the architecture is
determined by the type e.g. `nb` does not provide dropout
probabilities.
normalize_per_cell : `bool`, optional. Default: `True`.
If true, library size normalization is performed using
the `sc.pp.normalize_per_cell` function in Scanpy and saved into adata
object. Mean layer is re-introduces library size differences by
scaling the mean value of each cell in the output layer. See the
manuscript for more details.
scale : `bool`, optional. Default: `True`.
If true, the input of the autoencoder is centered using
`sc.pp.scale` function of Scanpy. Note that the output is kept as raw
counts as loss functions are designed for the count data.
log1p : `bool`, optional. Default: `True`.
If true, the input of the autoencoder is log transformed with a
pseudocount of one using `sc.pp.log1p` function of Scanpy.
hidden_size : `tuple` or `list`, optional. Default: (64, 32, 64).
Width of hidden layers.
hidden_dropout : `float`, `tuple` or `list`, optional. Default: 0.0.
Probability of weight dropout in the autoencoder (per layer if list
or tuple).
batchnorm : `bool`, optional. Default: `True`.
If true, batch normalization is performed.
activation : `str`, optional. Default: `relu`.
Activation function of hidden layers.
init : `str`, optional. Default: `glorot_uniform`.
Initialization method used to initialize weights.
network_kwds : `dict`, optional.
Additional keyword arguments for the autoencoder.
epochs : `int`, optional. Default: 300.
Number of total epochs in training.
reduce_lr : `int`, optional. Default: 10.
Reduces learning rate if validation loss does not improve in given number of epochs.
early_stop : `int`, optional. Default: 15.
Stops training if validation loss does not improve in given number of epochs.
batch_size : `int`, optional. Default: 32.
Number of samples in the batch used for SGD.
learning_rate : `float`, optional. Default: None.
Learning rate to use in the training.
optimizer : `str`, optional. Default: "RMSprop".
Type of optimization method used for training.
random_state : `int`, optional. Default: 0.
Seed for python, numpy and tensorflow.
threads : `int` or None, optional. Default: None
Number of threads to use in training. All cores are used by default.
verbose : `bool`, optional. Default: `False`.
If true, prints additional information about training and architecture.
training_kwds : `dict`, optional.
Additional keyword arguments for the training process.
return_model : `bool`, optional. Default: `False`.
If true, trained autoencoder object is returned. See "Returns".
return_info : `bool`, optional. Default: `False`.
If true, all additional parameters of DCA are stored in `adata.obsm` such as dropout
probabilities (obsm['X_dca_dropout']) and estimated dispersion values
(obsm['X_dca_dispersion']), in case that autoencoder is of type
zinb or zinb-conddisp.
copy : `bool`, optional. Default: `False`.
If true, a copy of anndata is returned.
Returns
-------
If `copy` is true and `return_model` is false, AnnData object is returned.
In "denoise" mode, `adata.X` is overwritten with the denoised values. In "latent" mode, latent
low dimensional representation of cells are stored in `adata.obsm['X_dca']` and `adata.X`
is not modified. Note that these values are not corrected for library size effects.
If `return_info` is true, all estimated distribution parameters are stored in AnnData such as:
- `.obsm["X_dca_dropout"]` which is the mixture coefficient (pi) of the zero component
in ZINB, i.e. dropout probability. (Only if ae_type is zinb or zinb-conddisp)
- `.obsm["X_dca_dispersion"]` which is the dispersion parameter of NB.
- `.uns["dca_loss_history"]` which stores the loss history of the training.
Finally, the raw counts are stored as `.raw`.
If `return_model` is given, trained model is returned. When both `copy` and `return_model`
are true, a tuple of anndata and model is returned in that order.
"""
print("entering_dca")
assert isinstance(
adata, anndata.AnnData
), "adata must be an AnnData instance"
assert mode in ("denoise", "latent"), "%s is not a valid mode." % mode
# set seed for reproducibility
random.seed(random_state)
np.random.seed(random_state)
tf.set_random_seed(random_state)
os.environ["PYTHONHASHSEED"] = "0"
# this creates adata.raw with raw counts and copies adata if copy==True
adata = read_dataset(adata, transpose=False, test_split=False, copy=copy)
# check for zero genes
nonzero_genes, _ = sc.pp.filter_genes(adata.X, min_counts=1)
assert (
nonzero_genes.all()
), "Please remove all-zero genes before using DCA."
adata = normalize(
adata,
filter_min_counts=False, # no filtering, keep cell and gene idxs same
size_factors=normalize_per_cell,
normalize_input=scale,
logtrans_input=log1p,
)
network_kwds = {
**network_kwds,
"hidden_size": hidden_size,
"hidden_dropout": hidden_dropout,
"batchnorm": batchnorm,
"activation": activation,
"init": init,
}
input_size = output_size = adata.n_vars
net = AE_types[ae_type](
input_size=input_size, output_size=output_size, **network_kwds
)
net.save()
net.build()
training_kwds = {
**training_kwds,
"epochs": epochs,
"reduce_lr": reduce_lr,
"early_stop": early_stop,
"batch_size": batch_size,
"optimizer": optimizer,
"verbose": verbose,
"threads": threads,
"learning_rate": learning_rate,
"permute": permute,
}
hist = train(adata[adata.obs.dca_split == "train"], net, **training_kwds)
res = net.predict(adata, mode, return_info, copy)
adata = res if copy else adata
if return_info:
adata.uns["dca_loss_history"] = hist.history
if return_model:
return (adata, net) if copy else net
else:
return adata if copy else None
|
/sc_permut-0.1.1-py3-none-any.whl/sc_permut/api.py
| 0.863823 | 0.61438 |
api.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import ManifestUtils
from sc_retail_analysis.analyzer.base_analyzer import BaseAnalyzer
class CreditCardAmountAnalyzer(BaseAnalyzer):
"""
信用卡大额分期投放额分析
"""
def __init__(self, *, config: ConfigManager, excel_writer: pd.ExcelWriter):
super().__init__(config=config, excel_writer=excel_writer)
self._key_enabled = "retail.credit_card.credit_amount.enabled"
self._key_business_type = "retail.credit_card.credit_amount.business_type"
self._key_export_column_list = "retail.credit_card.credit_amount.sheet_config.export_column_list"
def _read_config(self, *, config: ConfigManager):
# 信用卡大额分期报表文件路径
self._src_filepath = config.get("retail.credit_card.credit_amount.source_file_path")
# Sheet名称
self._sheet_name = config.get("retail.credit_card.credit_amount.sheet_name")
# 表头行索引
self._header_row = config.get("retail.credit_card.credit_amount.sheet_config.header_row")
# 员工工号列索引
self._id_column = self._calculate_column_index_from_config(
config, "retail.credit_card.credit_amount.sheet_config.id_column"
)
# 员工姓名列索引
self._name_column = self._calculate_column_index_from_config(
config, "retail.credit_card.credit_amount.sheet_config.name_column"
)
# 需要统计的列的索引与输出列名对
self._init_value_column_config(config, "retail.credit_card.credit_amount.sheet_config.value_column_pairs")
def _read_src_file(self) -> pd.DataFrame:
logging.getLogger(__name__).info("读取源文件:{}".format(self._src_filepath))
data = pd.read_excel(self._src_filepath, sheet_name=self._sheet_name, header=self._header_row)
self._id_column_name = data.columns[self._id_column]
self._name_column_name = data.columns[self._name_column]
self._init_value_column_pairs(data)
if not data.empty:
# 工号前面的列可能包括合计,去除合计行
for index in range(self._id_column):
# 过滤合计行
if not data.empty:
criterion = data[data.columns[index]].map(lambda x: x != '合计')
data = data[criterion].copy()
return data
def _add_export_column_manifest_branch(self, origin_data: pd.DataFrame):
if origin_data is None or origin_data.empty:
return origin_data
# 与花名册整合,添加花名册所在部门一列
data = origin_data.merge(
ManifestUtils.get_name_branch_data_frame(),
how="left",
left_on=[self._name_column_name],
right_on=[ManifestUtils.get_name_column_name()]
)
return data
def _rename_target_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
df = data.rename(columns=self._value_column_pairs)
return df
def _pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
index_columns = [self._id_column_name, self._name_column_name]
value_columns = self._get_value_columns()
logging.getLogger(__name__).info("按{} 透视数据项:{}".format(
index_columns,
value_columns,
))
if data.empty:
return pd.DataFrame(columns=index_columns + value_columns)
table = pd.pivot_table(data, values=value_columns,
index=index_columns,
aggfunc=np.sum, fill_value=0)
return table
def _after_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.reset_index()
def _merge_with_manifest(self, *, manifest_data: pd.DataFrame, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("与花名册合并...")
merge_result = ManifestUtils.merge_with_manifest(manifest_data=manifest_data, data=data,
id_column_name=self._id_column_name,
name_column_name=self._name_column_name)
return merge_result
def _drop_duplicated_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.drop(columns=[self._id_column_name, self._name_column_name])
def _add_target_columns(self) -> None:
self._add_value_pair_target_columns()
|
/sc_retail_analysis-0.0.49-py3-none-any.whl/sc_retail_analysis/credit_card/credit_card_amount_analyzer.py
| 0.411584 | 0.16848 |
credit_card_amount_analyzer.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import ManifestUtils
from sc_retail_analysis.analyzer.base_analyzer import BaseAnalyzer
class LargeInstallmentsAnalyzer(BaseAnalyzer):
"""
信用卡大额分期余额分析
"""
def __init__(self, *, config: ConfigManager, excel_writer: pd.ExcelWriter):
super().__init__(config=config, excel_writer=excel_writer)
self._key_enabled = "retail.loan.large_installments.enabled"
self._key_business_type = "retail.loan.large_installments.business_type"
self._key_export_column_list = "retail.loan.large_installments.sheet_config.export_column_list"
def _read_config(self, *, config: ConfigManager):
# 信用卡大额分期报表文件路径
self._src_filepath = config.get("retail.loan.large_installments.source_file_path")
# Sheet名称
self._sheet_name = config.get("retail.loan.large_installments.sheet_name")
# 表头行索引
self._header_row = config.get("retail.loan.large_installments.sheet_config.header_row")
# 员工工号列索引
self._id_column = self._calculate_column_index_from_config(
config, "retail.loan.large_installments.sheet_config.id_column"
)
# 员工姓名列索引
self._name_column = self._calculate_column_index_from_config(
config, "retail.loan.large_installments.sheet_config.name_column"
)
# 需要统计的列的索引与输出列名对
key = "retail.loan.large_installments.sheet_config.value_column_pairs"
self._init_value_column_config(config, key)
def _read_src_file(self) -> pd.DataFrame:
logging.getLogger(__name__).info("读取源文件:{}".format(self._src_filepath))
data = pd.read_excel(self._src_filepath, sheet_name=self._sheet_name, header=self._header_row)
self._id_column_name = data.columns[self._id_column]
self._name_column_name = data.columns[self._name_column]
self._init_value_column_pairs(data)
if not data.empty:
# 工号前面的列可能包括合计,去除合计行
for index in range(self._id_column):
# 过滤合计行
if not data.empty:
criterion = data[data.columns[index]].map(lambda x: x != '合计')
data = data[criterion].copy()
return data
def _add_export_column_manifest_branch(self, origin_data: pd.DataFrame):
if origin_data is None or origin_data.empty:
return origin_data
# 与花名册整合,添加花名册所在部门一列
data = origin_data.merge(
ManifestUtils.get_name_branch_data_frame(),
how="left",
left_on=[self._name_column_name],
right_on=[ManifestUtils.get_name_column_name()]
)
return data
def _rename_target_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
df = data.rename(columns=self._value_column_pairs)
return df
def _pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
index_columns = [self._id_column_name, self._name_column_name]
value_columns = self._get_value_columns()
logging.getLogger(__name__).info("按{} 透视数据项:{}".format(
index_columns,
value_columns,
))
if data.empty:
return pd.DataFrame(columns=index_columns + value_columns)
table = pd.pivot_table(data, values=value_columns,
index=index_columns,
aggfunc=np.sum, fill_value=0)
return table
def _after_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.reset_index()
def _merge_with_manifest(self, *, manifest_data: pd.DataFrame, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("与花名册合并...")
merge_result = ManifestUtils.merge_with_manifest(manifest_data=manifest_data, data=data,
id_column_name=self._id_column_name,
name_column_name=self._name_column_name)
return merge_result
def _drop_duplicated_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.drop(columns=[self._id_column_name, self._name_column_name])
def _add_target_columns(self) -> None:
self._add_value_pair_target_columns()
|
/sc_retail_analysis-0.0.49-py3-none-any.whl/sc_retail_analysis/loan/large_installments_analyzer.py
| 0.406037 | 0.157105 |
large_installments_analyzer.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import BranchUtils, ManifestUtils
from sc_retail_analysis.analyzer.base_analyzer import BaseAnalyzer
class EasyLoanAnalyzer(BaseAnalyzer):
"""
易得贷分析
"""
def __init__(self, *, config: ConfigManager, excel_writer: pd.ExcelWriter):
super().__init__(config=config, excel_writer=excel_writer)
self._key_enabled = "retail.loan.easy_loan.enabled"
self._key_business_type = "retail.loan.easy_loan.business_type"
self._key_export_column_list = "retail.loan.easy_loan.sheet_config.export_column_list"
def _read_config(self, *, config: ConfigManager):
# 易得贷客户经理业绩表文件路径
self._src_filepath = config.get("retail.loan.easy_loan.source_file_path")
# Sheet名称
self._sheet_name = config.get("retail.loan.easy_loan.sheet_name")
# 表头行索引
self._header_row = config.get("retail.loan.easy_loan.sheet_config.header_row")
# 工号列索引
self._id_column = self._calculate_column_index_from_config(
config, "retail.loan.easy_loan.sheet_config.id_column"
)
# 客户经理列索引
self._name_column = self._calculate_column_index_from_config(
config, "retail.loan.easy_loan.sheet_config.name_column"
)
# 所属支行列索引
self._branch_column = self._calculate_column_index_from_config(
config, "retail.loan.easy_loan.sheet_config.branch_column"
)
# 需要统计的列的索引与输出列名对
key = "retail.loan.easy_loan.sheet_config.value_column_pairs"
self._init_value_column_config(config, key)
def _read_src_file(self) -> pd.DataFrame:
logging.getLogger(__name__).info("读取源文件:{}".format(self._src_filepath))
data = pd.read_excel(self._src_filepath, sheet_name=self._sheet_name, header=self._header_row)
self._id_column_name = data.columns[self._id_column]
self._name_column_name = data.columns[self._name_column]
self._branch_column_name = data.columns[self._branch_column]
self._init_value_column_pairs(data)
return data
def _add_export_column_manifest_branch(self, origin_data: pd.DataFrame):
if origin_data is None or origin_data.empty:
return origin_data
# 与花名册整合,添加花名册所在部门一列
data = origin_data.merge(
ManifestUtils.get_name_branch_data_frame(),
how="left",
left_on=[self._name_column_name],
right_on=[ManifestUtils.get_name_column_name()]
)
return data
def _rename_target_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
df = data.rename(columns=self._value_column_pairs)
return df
def _mapping_leave_employee(self, *, data: pd.DataFrame) -> pd.DataFrame:
"""
映射离职员工到对应的客户经理
:param data: 原DataFrame
:return: 操作后的DataFrame
"""
mapping = ManifestUtils.get_leave_employee_mapping()
name_mapping = dict.fromkeys(mapping, np.nan)
data = data.replace({self._name_column_name: name_mapping})
return data
def _replace_common_account(self, *, df: pd.DataFrame, id_column_name: str, name_column_name: str,
branch_column_name: str):
"""
处理公共户
如果客户经理是公共户,则归属到对应的机构去,将客户经理名称修改为对应机构名称
:param df: DataFrame
:param id_column_name: ID列名称
:param name_column_name: 客户经理列名称
:param branch_column_name: 机构列名称
:return: 替换公共户和机构名称后的DataFrame
"""
for row_i, row in df.iterrows():
id_value = row[id_column_name]
name = row[name_column_name]
branch_name = row[branch_column_name]
new_branch_name = BranchUtils.replace_branch_name(branch_name=branch_name)
if name is np.nan or id_value == 0:
df.at[row_i, name_column_name] = new_branch_name
df.at[row_i, id_column_name] = 0
def _pre_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
data = self._mapping_leave_employee(data=data)
logging.getLogger(__name__).info("开始处理公共户信息...")
self._replace_common_account(df=data, id_column_name=self._id_column_name,
name_column_name=self._name_column_name,
branch_column_name=self._branch_column_name)
logging.getLogger(__name__).info("解决姓名与工号不匹配的问题...")
df = ManifestUtils.fix_name_error(data, self._id_column_name, self._name_column_name)
return df
def _pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
index_columns = [self._id_column_name, self._name_column_name]
value_columns = self._get_value_columns()
logging.getLogger(__name__).info("按{} 透视数据项:{}".format(
index_columns,
value_columns,
))
if data.empty:
return pd.DataFrame(columns=index_columns + value_columns)
table = pd.pivot_table(data, values=value_columns,
index=index_columns,
aggfunc=np.sum, fill_value=0)
return table
def _after_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.reset_index()
def _merge_with_manifest(self, *, manifest_data: pd.DataFrame, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("与花名册合并...")
merge_result = ManifestUtils.merge_with_manifest(manifest_data=manifest_data, data=data,
id_column_name=self._id_column_name,
name_column_name=self._name_column_name)
return merge_result
def _drop_duplicated_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.drop(columns=[self._name_column_name])
def _add_target_columns(self) -> None:
self._add_value_pair_target_columns()
|
/sc_retail_analysis-0.0.49-py3-none-any.whl/sc_retail_analysis/loan/easy_loan_analyzer.py
| 0.409575 | 0.191781 |
easy_loan_analyzer.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import ManifestUtils
from sc_retail_analysis.analyzer.base_analyzer import BaseAnalyzer
class LoanDetailAnalyzer(BaseAnalyzer):
"""
贷款明细分析
"""
def __init__(self, *, config: ConfigManager, excel_writer: pd.ExcelWriter):
self._business_type_mapping_loaded = False
self._business_type_mapping = dict()
super().__init__(config=config, excel_writer=excel_writer)
self._key_enabled = "retail.loan.loan_detail.enabled"
self._key_business_type = "retail.loan.loan_detail.business_type"
self._key_export_column_list = "retail.loan.loan_detail.sheet_config.export_column_list"
# 基于客户名称的原始数据
self._client_origin_data = pd.DataFrame()
# 基于客户名称统计的数据
self._client_data = pd.DataFrame()
def get_business_type_mapping(self) -> dict:
"""
业务种类与业务大类对应关系
:return: 业务种类与业务大类对应关系
"""
return self._business_type_mapping
def _load_business_type_mapping(self, config: ConfigManager):
"""
加载业务种类与业务大类对应关系
:return:
"""
if self._business_type_mapping_loaded:
return
mapping = config.get("retail.loan.loan_detail.business_type_mapping")
self._business_type_mapping.update(mapping)
self._business_type_mapping_loaded = True
def _read_config(self, *, config: ConfigManager):
self._load_business_type_mapping(config)
# 贷款明细报表文件路径
self._src_filepath = config.get("retail.loan.loan_detail.source_file_path")
# Sheet名称
self._sheet_name = config.get("retail.loan.loan_detail.sheet_name")
# 表头行索引
self._header_row = config.get("retail.loan.loan_detail.sheet_config.header_row")
# 客户经理列索引
self._name_column = self._calculate_column_index_from_config(
config, "retail.loan.loan_detail.sheet_config.name_column"
)
# 客户名称列索引
self._client_name_column = self._calculate_column_index_from_config(
config, "retail.loan.loan_detail.sheet_config.client_name_column"
)
# 客户证件号码列索引
self._client_id_column = self._calculate_column_index_from_config(
config, "retail.loan.loan_detail.sheet_config.client_id_column"
)
# 余额列索引
self._balance_column = self._calculate_column_index_from_config(
config, "retail.loan.loan_detail.sheet_config.balance_column"
)
# 业务种类列索引
self._business_type_column = self._calculate_column_index_from_config(
config, "retail.loan.loan_detail.sheet_config.business_type_column"
)
self._business_genre_column_name = "业务大类"
def _read_src_file(self) -> pd.DataFrame:
logging.getLogger(__name__).info("读取源文件:{}".format(self._src_filepath))
data = pd.read_excel(self._src_filepath, sheet_name=self._sheet_name, header=self._header_row)
self._name_column_name = data.columns[self._name_column]
self._client_name_column_name = data.columns[self._client_name_column]
self._client_id_column_name = data.columns[self._client_id_column]
self._balance_column_name = data.columns[self._balance_column]
self._business_type_column_name = data.columns[self._business_type_column]
# 业务种类与业务大类对应关系
mapping = self.get_business_type_mapping()
# 筛选需要统计的业务大类
if not data.empty:
criterion = data[self._business_type_column_name].map(lambda x: x in mapping.keys())
data = data[criterion].copy()
# 筛选余额不为0的记录
if not data.empty:
criterion = data[self._balance_column_name].map(lambda x: x != 0)
data = data[criterion].copy()
return data
def _add_export_column_manifest_branch(self, origin_data: pd.DataFrame):
if origin_data is None or origin_data.empty:
return origin_data
# 与花名册整合,添加花名册所在部门一列
data = origin_data.merge(
ManifestUtils.get_name_branch_data_frame(),
how="left",
left_on=[self._name_column_name],
right_on=[ManifestUtils.get_name_column_name()]
)
return data
def _mapping_leave_employee(self, *, data: pd.DataFrame) -> pd.DataFrame:
"""
映射离职员工到对应的客户经理
:param data: 原DataFrame
:return: 操作后的DataFrame
"""
mapping = ManifestUtils.get_leave_employee_mapping()
result = data.replace({self._name_column_name: mapping})
return result
def _pre_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
# 增加业务大类一列
data[self._business_genre_column_name] = data[self._business_type_column_name]
# 业务种类与业务大类对应关系
mapping = self.get_business_type_mapping()
# 处理业务种类与业务大类对应关系
data = data.replace({self._business_genre_column_name: mapping})
# 筛选需要统计的业务大类
if not data.empty:
criterion = data[self._business_genre_column_name].map(lambda x: x in mapping.values())
data = data[criterion].copy()
data = self._mapping_leave_employee(data=data)
self._client_origin_data = data.copy()
return data
def _pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
index_columns = [self._name_column_name]
value_columns = [self._balance_column_name]
if data.empty:
return pd.DataFrame(columns=index_columns + value_columns)
table = pd.pivot_table(
data=data, values=value_columns,
index=index_columns,
columns=[self._business_genre_column_name],
aggfunc=np.sum,
fill_value=0,
)
if self._client_origin_data is not None and (not self._client_origin_data.empty):
index_columns = [
self._client_name_column_name,
self._client_id_column_name,
self._name_column_name,
]
self._client_data = pd.pivot_table(
data=self._client_origin_data,
values=value_columns,
index=index_columns,
columns=[self._business_genre_column_name],
aggfunc=np.sum,
fill_value=0,
)
return table
def _after_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
data.columns = data.columns.droplevel(0)
self._client_data.columns = self._client_data.columns.droplevel(0)
self._client_data = self._client_data.reset_index()
return data.reset_index()
def _merge_with_manifest(self, *, manifest_data: pd.DataFrame, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("与花名册合并...")
merge_result = ManifestUtils.merge_with_manifest(
manifest_data=manifest_data,
data=data,
name_column_name=self._name_column_name,
)
self._client_data = ManifestUtils.merge_with_manifest(
manifest_data=manifest_data,
data=self._client_data,
name_column_name=self._name_column_name,
how="right",
)
return merge_result
def _drop_duplicated_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.drop(columns=[self._name_column_name])
def _add_target_columns(self) -> None:
# 业务种类与业务大类对应关系
mapping = self.get_business_type_mapping()
# 筛选需要统计的业务大类
for column in mapping.values():
self._add_target_column(column)
def write_detail_report(self, data: pd.DataFrame):
super().write_detail_report(data=data)
# 如果未启用,则直接返回
if not self._enabled():
return
# 没有数据
if self._client_data is None or self._client_data.empty:
return
# 将按客户统计维度的数据输出
self._client_data.to_excel(
excel_writer=self._excel_writer,
index=False,
sheet_name=self._business_type + "-按客户统计",
)
|
/sc_retail_analysis-0.0.49-py3-none-any.whl/sc_retail_analysis/loan/loan_detail_analyzer.py
| 0.420362 | 0.175326 |
loan_detail_analyzer.py
|
pypi
|
import logging
import numpy as np
import pandas as pd
from config42 import ConfigManager
from sc_analyzer_base import BranchUtils, ManifestUtils
from sc_retail_analysis.analyzer.base_analyzer import BaseAnalyzer
class DepositAnalyzer(BaseAnalyzer):
"""
存款分析
"""
def __init__(self, *, config: ConfigManager, excel_writer: pd.ExcelWriter):
super().__init__(config=config, excel_writer=excel_writer)
self._key_enabled = "retail.non_interest.deposit.enabled"
self._key_business_type = "retail.non_interest.deposit.business_type"
self._key_export_column_list = "retail.non_interest.deposit.sheet_config.export_column_list"
def _read_config(self, *, config: ConfigManager):
self._src_filepath = config.get("retail.non_interest.deposit.source_file_path")
self._sheet_name = config.get("retail.non_interest.deposit.sheet_name")
self._header_row = config.get("retail.non_interest.deposit.sheet_config.header_row")
# 工号列索引
self._id_column = self._calculate_column_index_from_config(
config, "retail.non_interest.deposit.sheet_config.id_column"
)
# 姓名列索引
self._name_column = self._calculate_column_index_from_config(
config, "retail.non_interest.deposit.sheet_config.name_column"
)
# 所属机构列索引
self._branch_column = self._calculate_column_index_from_config(
config, "retail.non_interest.deposit.sheet_config.branch_column"
)
# 需要统计的列的索引与输出列名对
key = "retail.non_interest.deposit.sheet_config.value_column_pairs"
self._init_value_column_config(config, key)
def _read_src_file(self) -> pd.DataFrame:
logging.getLogger(__name__).info("读取源文件:{}".format(self._src_filepath))
data = pd.read_excel(self._src_filepath, sheet_name=self._sheet_name, header=self._header_row, thousands=",")
self._id_column_name = data.columns[self._id_column]
self._name_column_name = data.columns[self._name_column]
self._branch_column_name = data.columns[self._branch_column]
self._init_value_column_pairs(data)
return data
def _add_export_column_manifest_branch(self, origin_data: pd.DataFrame):
if origin_data is None or origin_data.empty:
return origin_data
# 与花名册整合,添加花名册所在部门一列
data = origin_data.merge(
ManifestUtils.get_name_branch_data_frame(),
how="left",
left_on=[self._name_column_name],
right_on=[ManifestUtils.get_name_column_name()]
)
return data
def _rename_target_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
df = data.rename(columns=self._value_column_pairs)
return df
def _replace_common_account(self, *, df: pd.DataFrame, id_column_name: str, name_column_name: str,
branch_column_name: str):
"""
处理公共户
如果客户经理是公共户,则归属到对应的机构去,将客户经理名称修改为对应机构名称
:param df: DataFrame
:param id_column_name: ID列名称
:param name_column_name: 姓名列名称
:param branch_column_name: 机构列名称
:return: 替换公共户和机构名称后的DataFrame
"""
for row_i, row in df.iterrows():
name = row[name_column_name]
branch_name = row[branch_column_name]
new_branch_name = BranchUtils.replace_branch_name(branch_name=branch_name)
if BranchUtils.is_common_account(name):
df.at[row_i, name_column_name] = new_branch_name
df.at[row_i, id_column_name] = 0
def _pre_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("开始处理公共户信息...")
self._replace_common_account(df=data, id_column_name=self._id_column_name,
name_column_name=self._name_column_name,
branch_column_name=self._branch_column_name)
logging.getLogger(__name__).info("解决姓名与工号不匹配的问题...")
# 将工号变成数字,好与匹配花名册,修正员工名称错误的问题
data[self._id_column_name] = data[self._id_column_name].astype("int64")
data = ManifestUtils.fix_name_error(data, self._id_column_name, self._name_column_name)
return data
def _pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
index_columns = [
self._id_column_name,
self._name_column_name,
]
value_columns = self._get_value_columns()
logging.getLogger(__name__).info("按{} 透视数据项:{}".format(
index_columns,
value_columns,
))
if data.empty:
return pd.DataFrame(columns=index_columns + value_columns)
table = pd.pivot_table(data,
values=value_columns,
index=index_columns,
aggfunc=np.sum, fill_value=0)
return table
def _after_pivot_table(self, *, data: pd.DataFrame) -> pd.DataFrame:
data.reset_index(inplace=True)
# 调整列的顺序
columns_list = [
self._id_column_name,
self._name_column_name,
]
columns_list.extend(self._value_column_pairs.values())
data = data[columns_list]
return data
def _merge_with_manifest(self, *, manifest_data: pd.DataFrame, data: pd.DataFrame) -> pd.DataFrame:
logging.getLogger(__name__).info("与花名册合并...")
merge_result = ManifestUtils.merge_with_manifest(manifest_data=manifest_data, data=data,
id_column_name=self._id_column_name,
name_column_name=self._name_column_name)
return merge_result
def _drop_duplicated_columns(self, *, data: pd.DataFrame) -> pd.DataFrame:
return data.drop(columns=[self._id_column_name, self._name_column_name])
def _add_target_columns(self) -> None:
self._add_value_pair_target_columns()
|
/sc_retail_analysis-0.0.49-py3-none-any.whl/sc_retail_analysis/non_interest_income/deposit_analyzer.py
| 0.423339 | 0.162015 |
deposit_analyzer.py
|
pypi
|
import json
import logging
from urllib.parse import urljoin
import requests
import urllib3
from .exception import *
class RequestClient(object):
"""
A class to interact with Http
"""
def __init__(self, *, url, username=None, password=None, x509_verify=True):
"""
Create a RequestClient object.
:param url: the request url.
:param username: the user name.
:param password: password.
:param x509_verify: Whether to validate the x509 certificate when using https
"""
self._url = url
self._username = username
self._password = password
self._x509_verify = x509_verify
@property
def url(self):
"""
Current username.
:rtype: str
"""
return self._url
@property
def username(self):
"""
Current username.
:rtype: str
"""
return self._username
@property
def password(self):
"""
Current password.
:rtype: str
"""
return self._password
@property
def x509_verify(self):
"""
Whether to validate the x509 certificate when using https
:rtype: str
"""
return self._x509_verify
def http_request(self, method, endpoint, **kwargs):
"""
Performs a HTTP request to the Nexus REST API on the specified
endpoint.
:param method: one of ``get``, ``put``, ``post``, ``delete``.
:type method: str
:param endpoint: URI path to be appended to the service URL.
:type endpoint: str
:param kwargs: as per :py:func:`requests.request`.
:rtype: requests.Response
"""
url = urljoin(self._url, endpoint)
try:
response = requests.request(
method=method, auth=(self._username, self._password), url=url,
verify=self._x509_verify, timeout=(3.15, 27), **kwargs)
except requests.exceptions.ConnectionError as e:
logging.error("failed to connect to %s, cause: %s", url, e)
raise HttpClientAPIError(e)
except urllib3.exceptions.ReadTimeoutError as e:
logging.error("read timeout error to %s, cause: %s", url, e)
raise HttpClientAPIError(e)
except requests.exceptions.ReadTimeout as e:
logging.error("read timeout to %s, cause: %s", url, e)
raise HttpClientAPIError(e)
if response.status_code == 400:
raise BadRequestException(response.text)
if response.status_code == 401:
raise HttpClientInvalidCredentials("Invalid credential {0}, {1}".format(
self._username, self._password))
return response
def http_get(self, endpoint):
"""
Performs a HTTP GET request on the given endpoint.
:param endpoint: name of the Nexus REST API endpoint.
:type endpoint: str
:rtype: requests.Response
"""
return self.http_request('get', endpoint, stream=True)
def http_head(self, endpoint):
"""
Performs a HTTP HEAD request on the given endpoint.
:param endpoint: name of the Nexus REST API endpoint.
:type endpoint: str
:rtype: requests.Response
"""
return self.http_request('head', endpoint)
def _get_paginated(self, endpoint, **request_kwargs):
"""
Performs a GET request using the given args and kwargs. If the response
is paginated, the method will repeat the request, manipulating the
`params` keyword argument each time in order to receive all pages of
the response.
Items in the responses are sent in "batches": when all elements of a
response have been yielded, a new request is made and the process
repeated.
:param request_kwargs: passed verbatim to the _request() method, except
for the argument needed to paginate requests.
:return: a generator that yields on response item at a time.
:rtype: typing.Iterator[dict]
"""
response = self.http_request('get', endpoint, **request_kwargs)
if response.status_code == 404:
raise HttpClientAPIError(response.reason)
try:
content = response.json()
except json.decoder.JSONDecodeError:
raise HttpClientAPIError(response.content)
while True:
for item in content.get('items'):
yield item
continuation_token = content.get('continuationToken')
if continuation_token is None:
break
request_kwargs['params'].update(
{'continuationToken': continuation_token})
response = self.http_request('get', endpoint, **request_kwargs)
content = response.json()
def http_post(self, endpoint, **kwargs):
"""
Performs a HTTP POST request on the given endpoint.
:param endpoint: name of the Nexus REST API endpoint.
:type endpoint: str
:param kwargs: as per :py:func:`requests.request`.
:rtype: requests.Response
"""
return self.http_request('post', endpoint, **kwargs)
def http_put(self, endpoint, **kwargs):
"""
Performs a HTTP PUT request on the given endpoint.
:param endpoint: name of the Nexus REST API endpoint.
:type endpoint: str
:param kwargs: as per :py:func:`requests.request`.
:rtype: requests.Response
"""
return self.http_request('put', endpoint, **kwargs)
def http_delete(self, endpoint, **kwargs):
"""
Performs a HTTP DELETE request on the given endpoint.
:param endpoint: name of the Nexus REST API endpoint.
:type endpoint: str
:param kwargs: as per :py:func:`requests.request`.
:rtype: requests.Response
"""
return self.http_request('delete', endpoint, **kwargs)
|
/sc-search-gav-0.0.2.tar.gz/sc-search-gav-0.0.2/sc_gav/request_api.py
| 0.813201 | 0.16228 |
request_api.py
|
pypi
|
from enum import Enum
from typing import Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import seaborn as sb
from adjustText import adjust_text
from matplotlib import colors
from rich import print
class Colormaps(Enum):
"""Useful Colormaps for e.g. UMAPs."""
grey_red = colors.LinearSegmentedColormap.from_list("grouping", ["lightgray", "red", "darkred"], N=128)
grey_green = colors.LinearSegmentedColormap.from_list("grouping", ["lightgray", "limegreen", "forestgreen"], N=128)
grey_yellow = colors.LinearSegmentedColormap.from_list("grouping", ["lightgray", "yellow", "gold"], N=128)
grey_violet = colors.LinearSegmentedColormap.from_list(
"grouping", ["lightgray", "mediumvioletred", "indigo"], N=128
)
grey_blue = colors.LinearSegmentedColormap.from_list("grouping", ["lightgray", "cornflowerblue", "darkblue"], N=128)
def custom_plot_size(width: int, height: int, dpi: int):
"""Create a custom axis object of desired sizes.
Args:
width: Desired plot width
height: Desired plot height
dpi: Desired plot DPI.
Returns: Axis of desired sizes
"""
fig, ax = plt.subplots(figsize=(width, height), dpi=dpi)
return fig.gca()
def standard_lineplot(
data,
order: List,
xlabel: str,
ylabel: str,
hue=None,
gene=None,
smooth: Optional[bool] = None,
palette=None,
title=None,
rotation: Optional[int] = None,
figsize: Tuple[int, int] = (15, 5),
tick_size=None,
label_size=None,
order_smooth: int = 3,
confidence_interval=None,
scatter=None,
save: Optional[str] = None,
):
"""Draws a standard line plot based on Seaborn's lmplot.
Args:
data: Data frame containing averaged expression values
order: Order of x-axis labels from left to right
xlabel: x-axis label
ylabel: y-axis label
hue: Subsets of the data which will be drawn on separate facets in the grid. Example: "condition"
gene: Gene of interest
smooth: Whether to smoothen (interpolate) the curve
palette: Color palette. For example a list of colors.
title: Title of the plot
rotation: Rotation of the x-axis labels
figsize: Size of the figure as specified in matplotlib
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
order_smooth: If greater than 1, numpy.polyfit is used to estimate a polynomial regression
confidence_interval: Confidence interval
scatter: Set to true in order to add mean expression per sample in form of scatter point
save: Path to save the plot to
"""
if smooth:
# Possible to set alpha of scatter with scatter_kws={'alpha': 0.1}
if hue:
cat = sb.lmplot(
data=data,
x=xlabel,
y=gene,
ci=confidence_interval,
order=order_smooth,
scatter=scatter,
hue=hue,
truncate=True,
palette=palette,
)
else:
cat = sb.lmplot(
data=data,
x=xlabel,
y=gene,
ci=confidence_interval,
order=order_smooth,
scatter=scatter,
palette=palette,
)
else:
# Removed Parameter order = order, as order should be given numerically anyways.
if hue:
cat = sb.catplot(data=data, x=xlabel, y=gene, linestyles="-", kind="point", hue=hue, palette=palette)
else:
cat = sb.catplot(data=data, x=xlabel, y=gene, linestyles="-", kind="point", palette=palette)
if scatter:
cat2 = sb.stripplot(data=data, x=xlabel, y=gene, palette=palette, hue=hue, size=7)
if hue:
cat2.legend_.remove()
cat.set(xticks=np.unique(data.loc[:, xlabel]))
cat.set_xticklabels(order)
cat.fig.set_size_inches(figsize)
if rotation:
cat.ax.set_xticklabels(order, rotation="vertical")
cat.ax.set_title(title, size=label_size)
cat.ax.set_xlabel(xlabel, size=label_size)
cat.ax.set_ylabel(ylabel, size=label_size)
cat.ax.tick_params(labelsize=tick_size)
if save:
full_save_name = f"{gene}_{save}"
cat.fig.savefig(f"{full_save_name}", bbox_inches="tight")
print(f"[bold blue]Saving figure to {full_save_name}")
plt.show()
plt.close()
def average_expression(
gene_expression,
genes,
order: List[str],
id_label: str = "identifier",
xlabel: str = "days",
cluster: str = "all",
hue=None,
palette: str = "tab:blue",
figsize: Tuple[int, int] = (15, 6),
smooth=None,
rotation: Optional[int] = None,
order_smooth=None,
conf_int=None,
scatter=None,
save: Optional[str] = None,
):
"""Draw a line plot showing the gene expression over time. Expression values are averaged by individual sample.
Args:
gene_expression: Data frame containing gene expression values
genes: List of genes for which individual line plots will be generated
order: Order of x-axis labels from left to right
id_label: Adata column in which sample id information is stored
xlabel: x-axis label
cluster: Which clusters to plot. Select 'all" if all clusters should be drawn.
hue: Which value to color by
figsize: Size of the figure as specified in matplotlib
smooth: Set to true for smoothened line plot using polynomial regression
rotation: set to True to rotate x-axis labels 90 degrees
order_smooth: If greater than 1, use numpy.polyfit to estimate a polynomial regression
conf_int: Size of the confidence interval for the regression estimate
scatter: Set to True to add average expression values per sample ID as dots
save: Path to save the plot to
Example smooth:
.. image:: /_images/average_expression_smooth.png
Example raw:
.. image:: /_images/average_expression_raw.png
"""
for gene in genes:
meanpid = gene_expression.groupby([id_label, xlabel])[gene].mean().reset_index()
# cluster_label = ", ".join(cluster)
cluster_label = ", ".join(cluster) if isinstance(cluster, list) else cluster
standard_lineplot(
meanpid,
order=order,
xlabel=xlabel,
ylabel=f"Average expression in cluster {cluster_label}",
hue=hue,
gene=gene,
smooth=smooth,
palette=palette,
title=gene,
rotation=rotation,
figsize=figsize,
save=save,
order_smooth=order_smooth,
confidence_interval=conf_int,
scatter=scatter,
)
def average_expression_per_cluster(
gene_expression,
genes,
order,
obs=None,
id_label: str = "identifier",
xlabel: str = "days",
cluster: str = "all",
hue=None,
figsize: Tuple[int, int] = (15, 6),
smooth=None,
rotation=None,
tick_size: int = 12,
label_size: int = 15,
order_smooth=None,
conf_int=None,
palette=None,
scatter=None,
save: Optional[str] = None,
):
"""Plots gene expression over time split by cluster identity.
One line per cluster.
Args:
gene_expression: Data frame containing gene expression values
genes: List of genes for which individual line plots will be generated
order: Order of x-axis labels from left to right
obs: Data frame containing meta data information
xlabel: x-axis label
cluster: Which clusters to plot. Select 'all" if all clusters should be drawn.
id_label: Meta data column in which sample id information is stored
hue: Split expression values by this grouping, one line per category will be drawn
figsize: Size of the figure as specified in matplotlib
smooth: Set to True for smoothened line plot using polynomial regression
rotation: Set to True to rotate x-axis labels 90 degrees
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
order_smooth: If greater than 1, use numpy.polyfit to estimate a polynomial regression
conf_int: Size of the confidence interval for the regression estimate
palette: Color palette that gets passed to Seaborn's lineplot. For example a list of colors.
scatter: Set to True to add average expression values per sample ID as dots
save: Path to save the plot to
"""
for gene in genes:
meanpid = gene_expression.groupby([id_label, xlabel])[gene].mean().reset_index()
if hue:
cell_types = {}
combis = obs.groupby([id_label, hue]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
meanpid[hue] = [cell_types[label] for label in meanpid.identifier]
# cluster_label = ", ".join(cluster)
cluster_label = ", ".join(cluster) if isinstance(cluster, list) else cluster
standard_lineplot(
meanpid,
order=order,
xlabel=xlabel,
ylabel=f"Average expression in cluster {cluster_label}",
hue=hue,
gene=gene,
smooth=smooth,
palette=palette,
title=gene,
tick_size=tick_size,
label_size=label_size,
rotation=rotation,
figsize=figsize,
save=save,
order_smooth=order_smooth,
confidence_interval=conf_int,
scatter=scatter,
)
def average_expression_split_cluster(
gene_expression,
genes,
order,
id_label="identifier",
xlabel="days",
hue="genotype",
cluster=None,
figsize=(15, 6),
smooth=None,
rotation=None,
cols=None,
tick_size=12,
label_size=15,
order_smooth=None,
conf_int=None,
scatter=None,
save=None,
):
"""
Plot average gene expression as line plots for multiple clusters at once.
Args:
gene_expression: Data frame containing gene expression values
genes: List of genes for which individual line plots will be generated
order: Order of x-axis labels from left to right
id_label: Meta data column in which sample id information is stored
xlabel: x-axis label
hue: Split expression values by this grouping, one line per category, will be drawn
cluster: Which clusters to plot. Select 'all" if all clusters should be drawn.
figsize: Size of the figure as specified in matplotlib
smooth: Set to True for smoothened line plot using polynomial regression
rotation: x-axis label rotation
cols: List of colors to use for line plot
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
order_smooth: If greater than 1, numpy.polyfit is used to estimate a polynomial regression
conf_int: Size of the confidence interval for the regression estimate
scatter: Set to True to add average expression values per sample ID as dots
save: Path to save the plot to
Example smooth:
.. image:: /_images/average_expression_per_cluster_smooth.png
Example raw:
.. image:: /_images/average_expression_per_cluster_raw.png
"""
if cluster:
if isinstance(cluster, list):
ylab = f"Average expression in {', '.join(cluster)}"
else:
ylab = f"Average expression in {cluster}"
else:
ylab = "Average expression"
for gene in genes:
meanpid = gene_expression.groupby([id_label, hue, xlabel])[gene].mean().reset_index()
standard_lineplot(
meanpid,
order=order,
xlabel=xlabel,
ylabel=ylab,
hue=hue,
gene=gene,
smooth=smooth,
palette=cols,
title=gene,
tick_size=tick_size,
label_size=label_size,
rotation=rotation,
figsize=figsize,
save=save,
order_smooth=order_smooth,
confidence_interval=conf_int,
scatter=scatter,
)
def average_expression_per_cell(
gene_expression,
genes,
order,
xlabel: str = "days",
cluster: str = "all",
hue=None,
figsize: Tuple[int, int] = (15, 6),
smooth=None,
rotation=None,
tick_size=12,
label_size=15,
order_smooth=None,
conf_int=None,
scatter=None,
cols=None,
save: Optional[str] = None,
):
"""
Plots the average gene expression as a line plot per cell.
Ideally used when the scatter point should not be sample wise, but cell wise.
Args:
gene_expression: Data frame containing gene expression values
genes: List of genes for which individual line plots will be generated
order: Order of x-axis labels from left to right
xlabel: x-axis label
cluster: Which clusters to plot. Select 'all" if all clusters should be drawn.
hue: Split expression values by this grouping, one line per category, will be drawn
figsize: Size of the figure as specified in matplotlib
smooth: Set to true for smoothened line plot using polynomial regression
rotation: Set to True to rotate x-axis labels 90 degrees
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
order_smooth: If greater than 1, use numpy.polyfit to estimate a polynomial regression
conf_int: Size of the confidence interval for the regression estimate
scatter: Set to True to add average expression values per sample ID as dots
cols: List of colors to use for line plot
save: Path to save the plot to
"""
for gene in genes:
cluster_label = ", ".join(cluster) if isinstance(cluster, list) else cluster
standard_lineplot(
gene_expression,
order=order,
xlabel=xlabel,
ylabel=f"Average expression in cluster {cluster_label}",
hue=hue,
gene=gene,
smooth=smooth,
palette=cols,
title=gene,
tick_size=tick_size,
label_size=label_size,
rotation=rotation,
figsize=figsize,
save=save,
order_smooth=order_smooth,
confidence_interval=conf_int,
scatter=scatter,
)
def gene_expression_dpt_ordered(
data,
genes,
xlabel,
order=3,
conf_int=95,
figsize: Tuple[int, int] = (12, 6),
condition=None,
label_size: int = 15,
cols=None,
scale=None,
ylim=None,
save: Optional[str] = None,
):
"""
Plot smoothed expression of all cells ordered by pseudo time.
Args:
data: AnnData object
genes: List of genes for which individual line plots will be generated
xlabel: x-axis label
order: Order of x-axis labels from left to right
conf_int: Size of the confidence interval for the regression estimate
figsize: Size of the figure as specified in matplotlib
condition: Split expression values by this grouping, one line per category will be drawn
label_size: Size of the labels as specified in matplotlib
cols: List of colors to use for line plot
scale: Set to True to scale expression value to a range between 0 and 1
ylim: Upper limit on the y-axis if desired
save: Path to save the plot to
Example:
.. image:: /_images/gene_expression_dpt_ordered.png
Example with columns:
.. image:: /_images/gene_expression_dpt_ordered_col.png
"""
import matplotlib.patches as mpatches
patches = []
data = data.copy()
fig, ax = plt.subplots(figsize=figsize)
# use rainbow colour palette if no colours are specified
if cols is None:
from matplotlib import colors
bins = len(np.unique(data.loc[:, condition])) if condition else len(genes)
cmap = plt.cm.rainbow
cmaplist = [cmap(i) for i in range(cmap.N)]
cmap = colors.LinearSegmentedColormap.from_list("colours", cmaplist, N=bins)
cols = [cmap(i) for i in range(bins)]
# only working for one gene at a time for now
if condition:
conditions = np.unique(data.loc[:, condition])
gene = genes[0]
data = pd.pivot(data, columns=[condition])
columns = [
f"{data.columns.get_level_values(0)[i]}_{data.columns.get_level_values(1)[i]}"
for i in range(len(data.columns.values))
]
data.columns = columns
data[xlabel] = data.filter(like=xlabel).sum(axis=1).values
for i, con in enumerate(conditions):
col = f"{gene}_{con}"
if scale:
data[col] = np.interp(data[col], (data[col].min(), data[col].max()), (0, +1))
cat = sb.regplot(
data=data, x=xlabel, y=col, scatter=False, order=order, truncate=True, ax=ax, color=cols[i], ci=conf_int
)
patches.append(mpatches.Patch(color=cols[i], label=col))
else:
for i, gene in enumerate(genes):
if scale:
data[gene] = np.interp(data[gene], (data[gene].min(), data[gene].max()), (0, +1))
cat = sb.regplot(
data=data,
x=xlabel,
y=gene,
scatter=False,
order=order,
truncate=True,
ax=ax,
color=cols[i],
ci=conf_int,
)
patches.append(mpatches.Patch(color=cols[i], label=gene))
cat.set_ylabel("expression", size=label_size)
cat.set_xlabel(xlabel, size=label_size)
cat.tick_params(labelsize=label_size)
sb.despine()
plt.legend(handles=patches, loc="center left", bbox_to_anchor=(1.02, 0.5), prop={"size": label_size}, frameon=False)
if ylim:
cat.set(ylim=ylim)
if save:
plt.savefig(f"{save}", bbox_to_anchor="tight")
print("[bold blue]Saving figure to {save}")
plt.show()
plt.close()
def relative_frequencies_boxplots(
relative_frequencies: pd.DataFrame,
cluster,
cols,
order,
xlabel: str = "days",
hue: str = "batch",
figsize: Tuple[int, int] = (15, 6),
width: float = 0.5,
jitter=None,
save=None,
) -> None:
"""Plots the relative frequencies as split boxplots.
Use calc_relative_frequencies to get the required input format.
Args:
relative_frequencies: Calculated by calc_relative_frequencies as Pandas DataFrame
cluster: Cluster to be plotted
cols: List of colors to use for boxes
order: Order of x-axis labels from left to right
xlabel: x-axis label
hue: Value to color by
figsize: Size of the figure as specified in matplotlib
width: Width of the plot as specified in matplotlib
jitter: Set to True for individual dots per sample
save: Path to save the plot to
Example:
.. image:: /_images/relative_frequencies_boxplots.png
"""
# Subset according to order
relative_frequencies = relative_frequencies.loc[relative_frequencies[xlabel].isin(order)]
split_boxplot(
relative_frequencies,
order=order,
xlabel=xlabel,
ylabel="relative frequency",
hue=hue,
column=cluster,
cols=cols,
width=width,
title=cluster,
figsize=figsize,
jitter=jitter,
save=save,
)
def split_boxplot(
table,
order,
xlabel: str,
ylabel: str,
column=None,
hue=None,
cols=None,
width: float = 1,
title=None,
figsize: Tuple[int, int] = (15, 6),
jitter=None,
save: Optional[str] = None,
) -> None:
"""Draws a boxsplit split by hue.
Args:
table: Table containing the data to draw the boxplots for
order: Order of the boxplot labels
xlabel: x-axis label
ylabel: y-axis label
column:
hue: Value to split relative frequencies by
cols: List of colors to use for boxes
width: Width of the desired plot
title: Title of the plot
figsize: Size of the figure as specified in matplotlib
jitter: Set to True for individual dots per sample
save: Path to save the plot to
"""
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
if cols is not None:
fig = sb.boxplot(data=table, hue=hue, x=xlabel, y=column, order=order, width=width, palette=cols)
else:
fig = sb.boxplot(data=table, hue=hue, x=xlabel, y=column, order=order, width=width)
if jitter is not None:
fig = sb.swarmplot(data=table, color="black", x=xlabel, y=column, order=order)
if hue is not None:
plt.legend(loc="upper right")
if title:
fig.set_title(title, size=15)
fig.set_xlabel(xlabel, size=15)
fig.set_ylabel(ylabel, size=15)
fig.tick_params(labelsize=12)
if save:
fig.get_figure().savefig("{save}")
plt.show()
plt.close()
def marker_dendrogram(
marker_table: pd.DataFrame,
threshold: float = 0.7,
column: str = "cluster",
log_fc_key: str = "log_FC",
label_size: int = 10,
orientation: str = "top",
figsize: Tuple[int, int] = (10, 4),
save: Optional[str] = None,
):
"""Plots a dendogram of used marker genes.
Args:
marker_table: A marker table as generated by sct.calc.extended_marker_table
threshold: Threshold for the log fold change
column: Column to create pivot by; usually just the clusters
log_fc_key: Key for the stored log fold changes in the marker table
label_size: Font size of the labels
orientation: Orientation of the figure; Currently just 'top' or no orientation
figsize: Size of the figure as specified in matplotlib
save: Path to save the plot to
Example:
.. image:: /_images/marker_dendrogram.png
"""
import scipy.cluster.hierarchy as hc
marker_table = marker_table[marker_table[log_fc_key] > threshold]
marker_table = marker_table.pivot(index="gene", columns=column, values=log_fc_key)
marker_table.fillna(value=0, inplace=True)
corr = 1 - marker_table.corr()
corr = hc.distance.squareform(corr) # convert to condensed
z = hc.linkage(corr, method="complete")
plt.figure(figsize=figsize)
rot = 90 if orientation == "top" else 0
hc.dendrogram(
z,
labels=marker_table.columns,
leaf_rotation=rot,
color_threshold=0,
orientation=orientation,
leaf_font_size=label_size,
above_threshold_color="black",
)
plt.yticks(size=label_size)
if save is None:
plt.show()
else:
plt.savefig("{save}")
print(f"[bold blue]Saving figure to {save}")
plt.close()
def volcano_plot(
table,
fdr_thresh: Optional[float] = None,
log_fc_thresh: float = 0,
adj_p_val: str = "adj_p_val",
log_fc: str = "avg_logFC",
gene: str = "gene",
sig_col: str = "tab:orange",
col: str = "tab:blue",
figsize: Tuple[int, int] = (8, 6),
save=None,
):
"""
Scatter plot of differential gene expression results generated by diffxpy
Args:
table: diffxpy generated table of results
fdr_thresh: -log(FDR) threshold for labeling genes. If set to None, we
will consider the 99th percentile of -log(FDR) values the threshold.
log_fc_thresh: absolute(log_fc) threshold for labeling genes.
adj_p_val: Label of the adjusted p value, these are considered FDRs
log_fc: Label of the log fold change
gene: Label of column with gene names
col: Color of dots
sig_col: Colour of dots surpassing defined FDR threshold
figsize: Size of the figure as specified in matplotlib
save: Path to save the plot to
Example:
.. image:: /_images/diffxpy_volcano.png
"""
table["-log_FDR"] = -np.log(table[adj_p_val])
# take the 99% quantile by default for highlighting
if not fdr_thresh:
fdr_thresh = np.percentile(table.loc[:, "-log_FDR"], 99)
if not log_fc_thresh:
log_fc_thresh = 0
lowqval_highfc_de = table.loc[(table["-log_FDR"] > fdr_thresh) & (abs(table[log_fc]) >= log_fc_thresh)]
other_de = table.loc[~table.index.isin(lowqval_highfc_de.index)]
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
sb.regplot(x=other_de[log_fc], y=other_de["-log_FDR"], fit_reg=False, scatter_kws={"s": 6})
sb.regplot(x=lowqval_highfc_de[log_fc], y=lowqval_highfc_de["-log_FDR"], fit_reg=False, scatter_kws={"s": 6})
ax.set_xlabel("log2 FC", fontsize=20)
ax.set_ylabel("-log Q-value", fontsize=20)
ax.tick_params(labelsize=15)
ax.grid(False)
# Label names and positions
x = [i - 0.1 for i in lowqval_highfc_de[log_fc]]
y = [i + 0.1 for i in lowqval_highfc_de["-log_FDR"]]
labels = lowqval_highfc_de[gene]
max_n_labels = 50
if len(labels) > max_n_labels:
print(f"[bold yellow]Warning: given your thresholds, more than {max_n_labels} genes would have to be labeled.")
print(
"[bold yellow]To prevent overcrowding of your plot, make your thresholds stricter.\n"
"We will leave out the labels for now."
)
else:
# plot labels, and use adjust_text to make sure that labels don't overlap:
labels = [
plt.text(x, y, label, ha="center", va="center") for x, y, label in zip(x, y, labels) if not x == np.inf
]
adjust_text(labels)
if save:
fig.savefig(f"{save}")
else:
plt.show()
plt.close()
def cluster_composition_stacked_barplot(
relative_frequencies: pd.DataFrame,
xlabel: str = "name",
figsize: Tuple[int, int] = (6, 10),
width: float = 0.8,
order=None,
error_bar=None,
label_size: int = 15,
tick_size: int = 13,
capsize: Optional[int] = None,
margins: Tuple[float, float] = (0.02, 0.04),
colors=None,
save: Optional[str] = None,
):
"""Plot relative frequencies as a stacked barplot.
Args:
relative_frequencies: Data frame containing relative Frequencies as calculated by calc_relFreq()
xlabel: x-axis label
figsize: Size of the figure as specified in matplotlib
width: Width of the bars
order: Order of x-axis labels from left to right
error_bar: Set to True to add error bars (only possible when grouping the frequencies)
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
capsize: Size of the horizontal lines of the error bar
margins: Change margins of the plot if desired
colors: List of colors to use for the bands
save: Path to save the plot to
Example:
.. image:: /_images/cluster_composition_stacked_barplot.png
"""
import matplotlib.patches as mpatches
if not colors:
raise ValueError("Colors was not passed. Obtain them from e.g. adata.uns['cluster_key_colors']")
patches = []
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
order = np.unique(relative_frequencies.loc[:, xlabel]) if order is None else order
ci = 95 if error_bar else None
ax.margins(margins[0], margins[1])
cell_types = np.flip([col for col in relative_frequencies.columns if col not in ["identifier", xlabel]])
# cell_types = np.flip(np.setdiff1d(relFreqs.columns, ["identifier", xlabel]))
bars = pd.DataFrame(index=order, data=np.zeros(len(order)))
plot_data = pd.DataFrame(relative_frequencies.loc[:, xlabel])
for i, typ in enumerate(cell_types):
sum_up = [
relative_frequencies.loc[:, typ].values[i] + bars.loc[g].values[0]
for i, g in enumerate(relative_frequencies.loc[:, xlabel])
]
plot_data[typ] = sum_up
bars.iloc[:, 0] = (
bars.iloc[:, 0] + relative_frequencies.loc[:, [typ, xlabel]].groupby(xlabel).mean().loc[order, typ]
)
for i, typ in enumerate(reversed(cell_types)):
fig = sb.barplot(
data=plot_data, x=xlabel, y=typ, order=order, ci=ci, errcolor="black", color=colors[i], capsize=capsize
)
patches.append(mpatches.Patch(color=colors[i], label=typ))
ax.set_xlabel(xlabel, size=label_size)
ax.set_ylabel("relative frequency", size=label_size)
ax.tick_params(labelsize=tick_size)
ax.set_xticklabels(labels=order, rotation="vertical")
# Change the bar width
for bar in fig.patches:
centre = bar.get_x() + bar.get_width() / 2.0
bar.set_x(centre - width / 2.0)
bar.set_width(width)
plt.legend(handles=patches, loc="center left", bbox_to_anchor=(1.02, 0.5), prop={"size": tick_size}, frameon=False)
if save:
plt.savefig(f"{save}")
print(f"[bold blue]Saving Figure to {save}")
plt.show()
plt.close()
def gene_boxplot(
table,
palette: List[str],
xlabel: str = "cell_types",
hue: Optional[str] = None,
figsize: Tuple[int, int] = (10, 5),
legend=True,
score="Axin2",
scatter=None,
rotate=False,
width=0.7,
save=None,
):
"""Plot gene values as split boxplots.
Args:
table: Pandas DataFrame
palette:
xlabel: x-axis label
hue:
figsize: Size of the figure as specified in matplotlib
legend: Whether to draw a legend or not
score:
scatter:
rotate:
width: Width of the desired plot
save: Path to save the plot to
Example:
.. image:: /_images/gene_boxplot.png
"""
sb.set_style("ticks")
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
sf = False if scatter else True
if hue:
fig = sb.boxplot(data=table, x=xlabel, y=score, width=width, hue=hue, showfliers=sf, palette=palette)
if scatter:
fig = sb.stripplot(data=table, x=xlabel, y=score, palette=["black"], size=4, hue=hue, dodge=True)
else:
fig = sb.boxplot(data=table, x=xlabel, y=score, width=width, showfliers=sf, palette=palette)
if scatter:
fig = sb.stripplot(data=table, x=xlabel, y=score, palette=["black"], size=4, dodge=True)
if rotate:
fig.set_xticklabels(fig.get_xticklabels(), rotation=90)
else:
fig.set_xticklabels(fig.get_xticklabels())
if legend:
ax.legend(bbox_to_anchor=(1.05, 1.06))
else:
ax.legend_.remove()
plt.setp(ax.artists, edgecolor="black")
plt.setp(ax.lines, color="black")
sb.despine() # to not show ouline box
if save:
print(f"Saving to {save}")
plt.savefig(save, bbox_to_anchor="tight")
plt.show()
def colors_overview(colors: Dict, ncols: int = 2, figsize: Tuple[int, int] = (8, 5), save: Optional[str] = None):
"""Draw an overview plot of all used colors.
Args:
colors: Dictionary of color name and color
ncols: How many columns for the plot
figsize: Size of the figure as specified in matplotlib
save: Path to save the plot to
Example:
.. image:: /_images/colors.png
"""
from matplotlib import colors as mcolors
# Sort colors by hue, saturation, value and name.
by_hsv = sorted((tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name) for name, color in colors.items())
sorted_names = [name for hsv, name in by_hsv]
n = len(sorted_names)
nrows = n // ncols + 1
fig, ax = plt.subplots(figsize=figsize)
# Get height and width
x, y = fig.get_dpi() * fig.get_size_inches()
h = y / (nrows + 1)
w = x / ncols
for i, name in enumerate(sorted_names):
col = i % ncols
row = i // ncols
y = y - (row * h) - h
xi_line = w * (col + 0.05)
xf_line = w * (col + 0.25)
xi_text = w * (col + 0.3)
ax.text(
xi_text,
y,
"%s %s" % (name, colors[name]),
fontsize=(h * 0.4),
horizontalalignment="left",
verticalalignment="center",
)
ax.hlines(y + h * 0.1, xi_line, xf_line, color=colors[name], linewidth=(h * 0.6))
ax.set_xlim(0, x)
ax.set_ylim(0, y)
ax.set_axis_off()
fig.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0, wspace=0)
if save:
print(f"Saving to {save}")
plt.savefig(save, bbox_to_anchor="tight")
plt.show()
def relative_frequencies_lineplot(
relative_frequencies: pd.DataFrame,
order,
cluster,
xlabel: str = "days",
ylabel: str = "relative frequency",
hue: Optional[str] = None,
smooth: Optional[bool] = None,
cols=None,
title: Optional[str] = None,
rotation: Optional[int] = None,
figsize: Tuple[int, int] = (15, 5),
tick_size: Optional[int] = None,
label_size: Optional[int] = None,
order_smooth: int = 3,
conf_int=None,
scatter=None,
save: Optional[str] = None,
):
"""Plot relative frequencies as a line plot.
Args:
relative_frequencies: Data frame containing relative Frequencies as calculated by calc_relFreq()
order: Order of x-axis labels from left to right
cluster: Which cluster to plot
xlabel: x-axis label
ylabel: y-axis label
hue: Value to color by
smooth: Whether to smoothen the plot
cols: List of colors to use for line plot
title: Title of the plot
rotation: Rotation of the x-axis labels
figsize: Size of the figure as specified in matplotlib
tick_size: Size of the ticks as specified in matplotlib
label_size: Size of the labels as specified in matplotlib
order_smooth: If greater than 1, numpy.polyfit is used to estimate a polynomial regression
conf_int: Size of the confidence interval for the regression estimate
scatter: Set to True to add average expression values per sample ID as dots
save: Path to save the plot to
Example:
.. image:: /_images/relative_frequencies_lineplots.png
"""
if hue:
sub_freqs = relative_frequencies.loc[:, [cluster] + [xlabel, hue]]
sub_freqs = pd.melt(sub_freqs, id_vars=[xlabel, hue])
else:
sub_freqs = relative_frequencies.loc[:, [cluster] + [xlabel]]
sub_freqs = pd.melt(sub_freqs, id_vars=[xlabel])
standard_lineplot(
sub_freqs,
order=order,
xlabel=xlabel,
ylabel=ylabel,
hue=hue,
gene="value",
smooth=smooth,
palette=cols,
title=title,
rotation=rotation,
figsize=figsize,
tick_size=tick_size,
label_size=label_size,
order_smooth=order_smooth,
confidence_interval=conf_int,
scatter=scatter,
save=save,
)
def annotated_cell_type_umap(
adata,
primary_color: Union[str, Sequence[str]],
cell_type_color: str,
legend_loc: str = "on data",
legend_fontsize: int = 8,
title: str = "Plot title",
palette=None,
cmap=None,
figsize=(8, 6),
save=None,
):
"""Plots a UMAP which is colored by the primary_color, but also draws all labels on top of all clusters.
Args:
adata: AnnData object
primary_color: Primary color to color all cells by, e.g. 'genotype'
cell_type_color: Key containing all cell types, e.g. 'cell_type'
legend_loc: Location of the legend (default: 'on data')
legend_fontsize: Font size of the legend (default: 8)
title: Title of the plot
palette: Color
cmap: Color map of the UMAP
figsize: Size of the figure
save: Path to save the plot to
Returns:
fig and axs Matplotlib objects
Example:
.. image:: /_images/annotated_cell_type_umap.png
"""
fig, axs = plt.subplots(figsize=figsize)
sc.pl.umap(adata, color=primary_color, show=False, palette=palette, cmap=cmap, ax=axs)
sc.pl.umap(
adata,
color=cell_type_color,
alpha=0,
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
title=title,
show=False,
ax=axs,
)
if save:
fig.savefig(save, dpi=1200, format="pdf", bbox_inches="tight")
return fig, axs
def genotype_vs_genotype_umaps(
adata,
genotype_key: str,
genotype_label_1: str,
genotype_label_2: str,
color: str,
hide_one_legend: bool = True,
figsize: Tuple[int, int] = (12, 6),
):
"""Plots a two UMAPs of genotypes next to each other displaying only the colors of the second UMAP.
Args:
adata: AnnData object
genotype_key: Key of the genotypes
genotype_label_1: Name of the first genotype; Must be contained in the genotypes
genotype_label_2: Name of the second genotype; Must be contained in the genotypes
color: Key to color by
hide_one_legend: Whether to hide the legend of the genotype_label_1
figsize: Size of the figure
Example:
.. image:: /_images/genotype_vs_genotype_umaps.png
"""
genotype_data_1 = adata[adata.obs[genotype_key].isin([genotype_label_1])].copy()
genotype_data_2 = adata[adata.obs[genotype_key].isin([genotype_label_2])].copy()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)
sc.pl.umap(
genotype_data_1,
color=color,
ax=ax1,
palette=sc.pl.palettes.default_20,
legend_fontsize="xx-small",
size=40,
show=False,
)
if hide_one_legend:
ax1.get_legend().remove()
ax1.set_title(genotype_label_1)
sc.pl.umap(
genotype_data_2,
color=color,
ax=ax2,
palette=sc.pl.palettes.default_20,
legend_fontsize="xx-small",
size=40,
show=False,
)
_ = ax2.set_title(genotype_label_2)
|
/sc_toolbox-0.12.3-py3-none-any.whl/sc_toolbox/plot/__init__.py
| 0.963446 | 0.528473 |
__init__.py
|
pypi
|
from __future__ import annotations
import os
from typing import List
from pandas import Categorical
from statsmodels.stats.multitest import fdrcorrection
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
from typing import Optional
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from rich import print
WORKING_DIRECTORY = os.path.dirname(__file__)
def generate_expression_table(
adata,
cluster: str = "all",
subset_by: str = "cell_type",
xlabel: Optional[str] = None,
condition: Optional[str] = None,
use_raw: Optional[bool] = None,
):
"""Generates a table of cells by genes of expression values as a Pandas DataFrame.
Args:
adata: Anndata object
cluster: Which label of the subsets to generate the table for. Use 'all' if for all subsets.
subset_by: Which label to subset the clusters by
xlabel: Label that will be used for subsequent line plots as x-axis label. Typically a time series such as "days".
condition: Column name of the condition to include.
use_raw: Whether to use adata.raw.X for the calculations
Returns:
Gene expression table.
"""
if cluster == "all":
cells = adata.obs_names
else:
cells = [True if val in cluster else False for val in adata.obs[subset_by]]
if use_raw:
gen_expression_table = pd.DataFrame(
adata[cells].raw.X.todense(), index=adata[cells].obs_names, columns=adata[cells].raw.var_names
)
else:
gen_expression_table = pd.DataFrame(
adata[cells].X, index=adata[cells].obs_names, columns=adata[cells].var_names
)
gen_expression_table["identifier"] = adata[cells].obs["identifier"]
if xlabel:
gen_expression_table[xlabel] = adata[cells].obs[xlabel]
if condition:
# For multiple cluster, split internally per condition
if isinstance(cluster, list) and len(cluster) > 1 and subset_by != condition:
gen_expression_table[condition] = [
f"{t}_{c}" for t, c in zip(adata[cells].obs[condition], adata[cells].obs[subset_by])
]
else:
gen_expression_table[condition] = adata[cells].obs[condition]
return gen_expression_table
def relative_frequencies(adata, group_by: str = "cell_type", xlabel: str = "days", condition: str = "batch"):
"""Calculates the relative frequencies of conditions grouped by an observation.
Args:
adata: AnnData Objet containing the data
group_by: Column name to group by
xlabel: x-axis label
condition:
Returns:
Relative frequencies in a Pandas DataFrame
"""
freqs = adata.obs.groupby(["identifier", group_by]).size()
samples = np.unique(adata.obs["identifier"])
ind = adata.obs[group_by].cat.categories
relative_frequencies = [freqs[ident] / sum(freqs[ident]) for ident in samples]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=samples).fillna(0)
# relFreqs[xlabel] = grouping.loc[samples, xlabel] ## when using Grouping Table
cell_types = {}
combis = adata.obs.groupby(["identifier", xlabel]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[xlabel] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
# Todo, add for condition
if condition:
combis = adata.obs.groupby(["identifier", condition]).groups.keys()
for c in combis:
cell_types[c[0]] = c[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def relative_frequency_per_cluster(adata, group_by: str = "cell_type", xlabel: str = "days", condition=None):
"""
Calculates relative frequencies per cluster
Args:
adata: AnnData object containing the data
group_by: The label to group by for the clusters
xlabel: x-axis label
condition: condition to combine by
Returns:
Pandas DataFrame of relative frequencies
"""
frequencies = adata.obs.groupby([group_by, xlabel]).size()
celltypes = np.unique(adata.obs[group_by])
ind = adata.obs[xlabel].cat.categories
relative_frequencies = [frequencies[ident] / sum(frequencies[ident]) for ident in celltypes]
relative_frequencies = pd.DataFrame(relative_frequencies, columns=ind, index=celltypes).fillna(0)
cell_types = {}
combinations = adata.obs.groupby([group_by, xlabel]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[group_by] = relative_frequencies.index # type: ignore
# Todo, add for condition
if condition:
combinations = adata.obs.groupby([group_by, condition]).groups.keys()
for combination in combinations:
cell_types[combination[0]] = combination[1]
relative_frequencies[condition] = [cell_types[label] for label in relative_frequencies.index] # type: ignore
return relative_frequencies
def correlate_to_signature(
adata,
marker: pd.DataFrame,
log_fc_threshold: float = 0.7,
cell_type: str = "AT2 cells",
cell_type_label: str = "cell_type",
log_fc_label: str = "logfoldchange",
gene_label: str = "gene",
use_raw: bool = True,
):
"""
Correlations Score (based on cell type signature (logFC)) - alternative to sc.tl.score
Args:
adata: AnnData object containing the data
marker: Pandas DataFrame containing marker genes
log_fc_threshold: Log fold change label
cell_type: Cell type to calculate the correlation for
cell_type_label: Label of all cell types in the AnnData object
log_fc_label: Label of fold change in the AnnData object
gene_label: Label of genes in the AnnData object
use_raw: Whether to use adata.raw.X
Returns:
List of correlations
"""
from scipy.sparse import issparse
topmarker = marker[marker.loc[:, cell_type_label] == cell_type]
topmarker = topmarker.loc[topmarker.loc[:, log_fc_label] > log_fc_threshold, [gene_label, log_fc_label]]
gene_names = list(np.intersect1d(adata.var_names, topmarker.loc[:, gene_label].astype(str)))
topmarker = topmarker[topmarker.loc[:, gene_label].isin(gene_names)]
print(f"[bold blue]{len(gene_names)} genes used for correlation score to {cell_type}")
if use_raw:
if issparse(adata.raw.X):
gene_expression = adata.raw[:, gene_names].X.todense()
else:
gene_expression = adata.raw[:, gene_names].X
else:
if issparse(adata.X):
gene_expression = adata[:, gene_names].X.todense()
else:
gene_expression = adata[:, gene_names].X
gene_expression = pd.DataFrame(gene_expression.T, index=gene_names)
# For each cell separately
gene_expression = pd.DataFrame.fillna(gene_expression, value=0)
res = [
np.correlate(topmarker.loc[:, log_fc_label], gene_expression.iloc[:, c])[0]
for c in range(gene_expression.shape[1])
]
return res
def remove_outliers(cords, eps: int = 1, min_samples: int = 2) -> Categorical:
"""Remove outlying cells based on UMAP embeddings with DBScan (density based clustering).
Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10)
Args:
cords: adata UMAP coordinates, typically adata.obsm["X_umap"]
eps: Maximum distance between two clusters to still be considered neighbors
min_samples: Minimum samples of a cluster
Returns:
Pandas Categorical of clusters
"""
from natsort import natsorted
from sklearn.cluster import DBSCAN
clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords)
cluster = clustering.labels_.astype("U")
return pd.Categorical(cluster, categories=natsorted(np.unique(cluster)))
def add_percentages(adata, table, ids, group_by: str, threshold: int = 0, gene_label: str = "gene"):
"""Add columns to existing diffxpy table specifying percentage of expressing cells.
Args:
adata: AnnData object containing the data
table: Table as generated by diffxpy
ids: Identifiers to add percentages for.
group_by: Label to group by
threshold: Cell count threshold.
gene_label: Label of the genes
Returns:
Table containing percentage of expressing cells
"""
for ident in ids:
cells = adata.obs_names[adata.obs[group_by] == ident]
data_temp = pd.DataFrame(
((adata[cells].layers["counts"] > threshold).sum(0) / adata[cells].layers["counts"].shape[0]).T,
index=adata.var_names,
)
if gene_label == "index":
table[f"pct.{ident}s"] = data_temp.reindex(table.index.values).values
else:
table[f"pct.{ident}s"] = data_temp.reindex(table.loc[:, gene_label]).values
return table
def ranksums_between_groups(
table, id1: str = "bystander", id2: str = "infected", xlabel: str = "condition", cells=None, score: str = "Axin2"
):
"""
Perform Wilcoxon Rank-sum test between two groups.
Args:
table:
id1:
id2:
xlabel: x-axis label
cells:
score:
Returns:
Pandas DataFrame containing test statistic and p-value
"""
from scipy import stats
if cells is not None:
table = table.loc[cells].copy()
group1 = table[table.loc[:, xlabel] == id1].copy()
group2 = table[table.loc[:, xlabel] == id2].copy()
t, p = stats.ranksums(group1.loc[:, score], group2.loc[:, score])
result = pd.DataFrame(columns=["wilcoxon_ranksum", "pval"])
result.loc[0] = [t, p]
return result
def generate_count_object(
adata,
hue: str = "disease",
cell_type_label: str = "cell_type",
cell_type: Optional[List[str]] = None,
min_samples: int = 2,
min_cells: int = 5,
ref: str = "healthy",
subset: Optional[List[str]] = None,
layer: str = "counts",
outliers_removal: bool = False,
):
"""
@Meshal what is this really supposed to do?
Args:
adata: AnnData object
hue: Value to color by
cell_type_label: Label containing cell types
cell_type: Cells type to generate counts for
min_samples: Minimum samples for outlier removal with DBScan
min_cells: Minimal number of cells
ref:
subset:
layer:
outliers_removal: Whether to remove outliers or not
Returns:
AnnData object containing counts
Example Call:
subset = ['3d PI-KO', '3d PI-WT']
raw_counts = generate_count_object(adata,
condition = "grouping",
cell_type_label = "celltype_refined", cell_type = ["AT2"],
ref = "3d PI-WT",
subset = subset)
"""
adata_subset = adata[adata.obs.grouping.isin(subset)]
cells = [
True if (adata_subset.obs[cell_type_label][i] in cell_type) else False # type: ignore
for i in range(adata_subset.n_obs)
]
# Raw count data for diffxpy
obs = adata_subset[cells].obs.copy()
var = adata_subset.var_names.copy()
adata_raw = sc.AnnData(X=adata_subset[cells].layers[layer].copy())
adata_raw.obs = obs
adata_raw.var.index = var
adata_raw.obsm = adata_subset[cells].obsm.copy()
# Also automate tidy up with DBScan :)
if outliers_removal:
adata_raw.obs["dcluster"] = remove_outliers(adata_raw.obsm["X_umap"], min_samples=min_samples)
sc.pl.umap(adata_raw, color=[hue, "dcluster"])
adata_raw = adata_raw[adata_raw.obs.dcluster == "0"].copy()
sc.pp.filter_genes(adata_raw, min_cells=min_cells)
# Set reference as first column
adata_raw.obs.loc[:, hue].cat.reorder_categories([ref, np.setdiff1d(subset, ref)[0]], inplace=True) # type: ignore
pal = adata_subset.uns[f"{hue}_colors"]
sc.pl.umap(adata_raw, color=[hue], palette=list(pal))
return adata_raw
def tidy_de_table(de_test, adata, cells, ids=None, qval_thresh: float = 0.9, group_by: str = "treatment", cols=None):
"""
Sorts diffxpy de table and adds percentages of expression per group
Args:
de_test: diffxpy de test
adata: AnnData object
cells:
ids:
qval_thresh:
group_by:
cols:
Returns:
Pandas Dataframe of diffxpy table with percentages
"""
result = de_test.summary().sort_values(by=["qval"], ascending=True)
result = result[result.qval < qval_thresh].loc[:, cols].copy()
# Add percentages
result = add_percentages(adata[cells], result, ids=ids, group_by=group_by)
return result
def correlate_means_to_gene(means: pd.DataFrame, corr_gene: str = "EOMES"):
"""
Calculate gene to gene correlation based on a mean expression table
Args:
means:
corr_gene:
Returns:
Pandas DataFrame of correlations
"""
import scipy.stats
genes = means.columns.values
cors = pd.DataFrame(index=genes, columns=["spearman_corr", "pvalue"])
# tab = sc.get.obs_df(sub, keys = [corr_gene], layer = None, use_raw = True)
table = means.loc[:, [corr_gene]].values
# Loop over all genes.
for gene in genes:
tmp = scipy.stats.spearmanr(table, means.loc[:, [gene]]) # Spearman's rho
cors.loc[gene, :] = tmp[0:2]
cors.dropna(axis=0, inplace=True)
cors.sort_values("spearman_corr", ascending=False, inplace=True)
return cors
def extended_marker_table(
adata: AnnData,
qval_thresh: float = 0.05,
cell_type_label: str = "cell_type",
gene_ranks_key: str = "rank_genes_groups",
):
"""
Generates an extended marker table with cell types and percentages of expressed cell types per cluster.
Run scanpy.tl.rank_genes_groups before using this function.
Args:
adata: AnnData object containing ranked genes
qval_thresh: Threshold to filter the log fold change for
cell_type_label: Label containing all cell types
gene_ranks_key: Key for the ranked gene groups (generated by sc.tl.rank_genes_groups)
Returns:
A Pandas DataFrame
"""
result = adata.uns[gene_ranks_key]
all_markers = []
for cluster in result["names"].dtype.names:
current = pd.DataFrame(
{
"gene": result["names"][cluster],
"score": result["scores"][cluster],
"log_FC": result["logfoldchanges"][cluster],
"pval": result["pvals"][cluster],
"pval_adj": result["pvals_adj"][cluster],
"cell_type": cluster,
}
)
# Add percentage expressed per cell type
adata.obs["group"] = ["within" if ct == cluster else "outside" for ct in adata.obs.loc[:, cell_type_label]]
current = add_percentages(adata, table=current, group_by="group", gene_label="gene", ids=["within", "outside"])
all_markers.append(current)
all_markers_df = pd.concat(all_markers)
all_markers_df = all_markers_df[all_markers_df.pval_adj < qval_thresh].copy()
return all_markers_df
def generate_pseudobulk(
adata: AnnData, group_key: str = "identifier", sep="\t", save: Optional[str] = None
) -> pd.DataFrame:
"""
Generates a pseudobulk for a given key of groups in the AnnData object.
Looks like:
+------------+------------------+------------------+
| Genes | Group Member 1 | Group Member 2 |
+============+==================+==================+
| Gene 1 | Value 1 | Value 2 |
+------------+------------------+------------------+
| Gene 2 | Value 2 | Value 3 |
+------------+------------------+------------------+
Args:
adata: AnnData object
group_key: The key to group by. E.g. by mice, by condition, ... (default: 'identifier')
sep: Separator to use when saving the pseudobulk table (default: '\t')
save: Path to save the pseudobulk table to (default: None)
Returns:
A Pandas DataFrame containing the pseudobulk table
"""
pseudobulk = pd.DataFrame(data=adata.var_names.values, columns=["Genes"])
for i in adata.obs.loc[:, group_key].cat.categories:
temp = adata.obs.loc[:, group_key] == i
pseudobulk[i] = adata[temp].X.sum(0, dtype=int) # column sums (genes)
if save:
pseudobulk.to_csv(save, sep=sep, index=False)
return pseudobulk
def automated_marker_annotation(
adata: AnnData,
organism: str,
tissue: str,
marker_file: str,
key: str = "rank_genes_groups",
normalize: Optional[Literal["reference", "data"]] = "reference",
p_value: float = 0.05,
log_fold_change: float = 2,
):
"""Calculates a marker gene overlap based on pre-existing annotations.
Currently supported marker files:
+------------+------------+------------------------------+
| Organism | Tissue | Marker File |
+============+============+==============================+
| Mouse | Lung | lung_particle_markers.txt |
+------------+------------+------------------------------+
| Human | NA | |
+------------+------------+------------------------------+
Args:
adata: AnnData object containing ranked genes
organism: Currently supported: 'mouse'
tissue: Currently supported: 'lung'
marker_file: Name of the marker file to be used - refer to table
key: Key of ranked genes in adata (default: 'rank_genes_groups')
normalize: Normalization option for the marker gene overlap output (default: 'reference')
p_value: p-value threshold for existing marker genes (default: 0.05)
log_fold_change: log fold change threshold for existing marker genes (default: 2)
Returns:
Pandas DataFrame of overlapping genes. Visualize with a Seaborn Heatmap
"""
supported_organisms = {"mouse"}
supported_tissues = {"lung"}
supported_marker_files = {"lung_particle_markers.txt"}
if organism not in supported_organisms:
print(f"[bold red]Unfortunately organism {organism} is not yet supported.")
return
if tissue not in supported_tissues:
print(f"[bold red]Unfortunately tissue {tissue} is not yet supported.")
return
if marker_file not in supported_marker_files:
print(f"[bold red]Unfortunately marker file {marker_file} could not be found. Please check your spelling.")
return
marker_table = pd.read_csv(f"{WORKING_DIRECTORY}/markers/{marker_file}", sep="\t", index_col=None)
marker_table = marker_table[
(marker_table.logfoldchange > log_fold_change) & (marker_table.pval_adj < p_value)
].copy()
marker = dict()
for ct in marker_table["cell_type"].unique():
tmp = marker_table[marker_table["cell_type"] == ct]
marker[ct] = tmp.gene.values
return sc.tl.marker_gene_overlap(adata, marker, key=key, normalize=normalize)
def de_res_to_anndata(
adata: AnnData,
de_res: pd.DataFrame,
*,
groupby: str,
gene_id_col: str = "gene_symbol",
score_col: str = "score",
pval_col: str = "pvalue",
pval_adj_col: Optional[str] = None,
lfc_col: str = "lfc",
key_added: str = "rank_genes_groups",
) -> None:
"""Add a tabular differential expression result to AnnData as if it was produced by scanpy.tl.rank_genes_groups.
Args:
adata: Annotated data matrix
de_res: Tablular DE result as Pandas DataFrame
groupby: Column in `de_res` that indicates the group. This column must also exist in `adata.obs`.
gene_id_col: Column in `de_res` that holds the gene identifiers
score_col: Column in `de_res` that holds the score (results will be ordered by score).
pval_col: Column in `de_res` that holds the unadjusted pvalue
pval_adj_col: Column in `de_res` that holds the adjusted pvalue.
If not specified, the unadjusted p values will be FDR-adjusted.
lfc_col: Column in `de_res` that holds the log fold change
key_added: Key under which the results will be stored in adata.uns
"""
if groupby not in adata.obs.columns or groupby not in de_res.columns:
raise ValueError("groupby column must exist in both adata and de_res. ")
res_dict = {
"params": {
"groupby": groupby,
"reference": "rest",
"method": "other",
"use_raw": True,
"layer": None,
"corr_method": "other",
},
"names": [],
"scores": [],
"pvals": [],
"pvals_adj": [],
"logfoldchanges": [],
}
df_groupby = de_res.groupby(groupby)
for _, tmp_df in df_groupby:
tmp_df = tmp_df.sort_values(score_col, ascending=False)
res_dict["names"].append(tmp_df[gene_id_col].values) # type: ignore
res_dict["scores"].append(tmp_df[score_col].values) # type: ignore
res_dict["pvals"].append(tmp_df[pval_col].values) # type: ignore
if pval_adj_col is not None:
res_dict["pvals_adj"].append(tmp_df[pval_adj_col].values) # type: ignore
else:
res_dict["pvals_adj"].append(fdrcorrection(tmp_df[pval_col].values)[1]) # type: ignore
res_dict["logfoldchanges"].append(tmp_df[lfc_col].values) # type: ignore
for key in ["names", "scores", "pvals", "pvals_adj", "logfoldchanges"]:
res_dict[key] = pd.DataFrame(
np.vstack(res_dict[key]).T, # type: ignore
columns=list(df_groupby.groups.keys()),
).to_records(index=False, column_dtypes="O")
adata.uns[key_added] = res_dict
|
/sc_toolbox-0.12.3-py3-none-any.whl/sc_toolbox/tools/__init__.py
| 0.875893 | 0.500305 |
__init__.py
|
pypi
|
import importlib.resources
import pickle
from enum import Enum
from typing import Dict, List, Tuple
from scipy import interpolate
from .data_vortex import temperature_03, temperature_05, temperature_08
from .vortex_instance import VortexInstance
class _DictKey(str, Enum):
DELTA_KEY = "delta"
class _FileName(str, Enum):
delta = "delta.pkl"
spectra = "spectra.pkl"
u = "u.pkl"
v = "v.pkl"
class GeneralParameters(Enum):
"""GeneralParameters
Parameters independent of temperature.
Attributes:
KF_XI: Value of fermi wave number multiplied by pippard length. This is
equivalent to value of 2 times zero-temperature bulk gap devided by
fermi energey. (k_F * xi = 2 * E_F / Delta_0)
MAX_ANGULAR_MOMENTUM: Max value of angular momentum quantum number.
MIN_ANGULAR_MOMENTUM: Min value of angular momentum quantum number.
SIZE_KF: Systemsize scaled by fermi wave number, this is used in
calculation of self-consistent equation(BdG).
"""
KF_XI = 50
MAX_ANGULAR_MOMENTUM = 99
MIN_ANGULAR_MOMENTUM = -100
SIZE_KF = 400
class VortexInstanceT03(VortexInstance):
class Parameters(Enum):
"""Parameters
Parameters involving parameters depending on temperature.
Attributes:
KF_XI: Value of fermi wave number multiplied by pippard length. This is
equivalent to value of 2 times zero-temperature bulk gap devided by
fermi energey. (k_F * xi = 2 * E_F / Delta_0)
DELTA_OVER_CDGM: Value of zero-temperature bulk gap devided by
level spacing of CdGM mode.
T_OVER_TC: Temperature scaled by taransition temperature T_c.
MAX_ANGULAR_MOMENTUM: Max value of angular momentum quantum number.
MIN_ANGULAR_MOMENTUM: Min value of angular momentum quantum number.
SIZE_KF: Systemsize scaled by fermi wave number, this is used in
calculation of self-consistent equation(BdG).
"""
KF_XI = GeneralParameters.KF_XI.value
DELTA_OVER_CDGM = 31.363654447926976
T_OVER_TC = 0.3
MAX_ANGULAR_MOMENTUM = GeneralParameters.MAX_ANGULAR_MOMENTUM.value
MIN_ANGULAR_MOMENTUM = GeneralParameters.MIN_ANGULAR_MOMENTUM.value
SIZE_KF = GeneralParameters.SIZE_KF.value
def __init__(self) -> None:
self.pair_potential: interpolate.CubicSpline
self.spectra_dict: Dict[int, float] = dict()
self.u_dict: Dict[int, interpolate.CubicSpline] = dict()
self.v_dict: Dict[int, interpolate.CubicSpline] = dict()
self._construct()
def _construct(self) -> None:
self.pair_potential, self.spectra_dict, self.u_dict, self.v_dict = _construct(
temperature_03
)
def get_pair_potential(self) -> interpolate.CubicSpline:
"""get_pair_potential()
This method returns pair potential at T = 0.3 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
Value of pair potential is scaled by zero-temperature bulk gap.
Returns:
scipy.interpolate.CubicSpline: pair potential at T = 0.3 T_c
"""
return self.pair_potential
def get_ith_eigen_func(
self, i: int
) -> Tuple[interpolate.CubicSpline, interpolate.CubicSpline]:
"""get_ith_eigen_func()
This method returns ith eigen functions (u, v) at T = 0.3 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
As u and v have dimension of inverse of L.L, value of them
is scaled by 2 times fermi wave number. (k_F * k_F)
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
Tuple[scipy.interpolate.CubicSpline, scipy.interpolate.CubicSpline]:
ith eigen functions (u_i, v_i)
"""
return self.u_dict[i], self.v_dict[i]
def get_ith_eigen_energy(self, i: int) -> float:
"""get_ith_eigen_energy()
This method returns ith eigen energy e_i at T = 0.3 T_c.
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
float: Value of ith eigen energy e_i scaled by level spacing of CdGM mode.
"""
return self.spectra_dict[i]
class VortexInstanceT05(VortexInstance):
class Parameters(Enum):
"""Parameters
Parameters involving parameters depending on temperature.
Attributes:
KF_XI: Value of fermi wave number multiplied by pippard length. This is
equivalent to value of 2 times zero-temperature bulk gap devided by
fermi energey. (k_F * xi = 2 * E_F / Delta_0)
DELTA_OVER_CDGM: Value of zero-temperature bulk gap devided by
level spacing of CdGM mode.
T_OVER_TC: Temperature scaled by taransition temperature T_c.
MAX_ANGULAR_MOMENTUM: Max value of angular momentum quantum number.
MIN_ANGULAR_MOMENTUM: Min value of angular momentum quantum number.
SIZE_KF: Systemsize scaled by fermi wave number, this is used in
calculation of self-consistent equation(BdG).
"""
KF_XI = GeneralParameters.KF_XI.value
DELTA_OVER_CDGM = 43.66058913995896
T_OVER_TC = 0.5
MAX_ANGULAR_MOMENTUM = GeneralParameters.MAX_ANGULAR_MOMENTUM.value
MIN_ANGULAR_MOMENTUM = GeneralParameters.MIN_ANGULAR_MOMENTUM.value
SIZE_KF = GeneralParameters.SIZE_KF.value
def __init__(self) -> None:
self.pair_potential: interpolate.CubicSpline
self.spectra_dict: Dict[int, float] = dict()
self.u_dict: Dict[int, interpolate.CubicSpline] = dict()
self.v_dict: Dict[int, interpolate.CubicSpline] = dict()
self._construct()
def _construct(self) -> None:
self.pair_potential, self.spectra_dict, self.u_dict, self.v_dict = _construct(
temperature_05
)
def get_pair_potential(self) -> interpolate.CubicSpline:
"""get_pair_potential()
This method returns pair potential at T = 0.5 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
Value of pair potential is scaled by zero-temperature bulk gap.
Returns:
scipy.interpolate.CubicSpline: pair potential at T = 0.5 T_c
"""
return self.pair_potential
def get_ith_eigen_func(
self, i: int
) -> Tuple[interpolate.CubicSpline, interpolate.CubicSpline]:
"""get_ith_eigen_func()
This method returns ith eigen functions (u, v) at T = 0.5 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
As (u, v) have dimension of inverse of L.L, value of them
is scaled by 2 times fermi wave number. (k_F * k_F)
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
Tuple[scipy.interpolate.CubicSpline, scipy.interpolate.CubicSpline]:
ith eigen functions (u_i, v_i)
"""
return self.u_dict[i], self.v_dict[i]
def get_ith_eigen_energy(self, i: int):
"""get_ith_eigen_energy()
This method returns ith eigen energy e_i at T = 0.5 T_c.
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
float: Value of ith eigen energy e_i scaled by level spacing of CdGM mode.
"""
return self.spectra_dict[i]
class VortexInstanceT08(VortexInstance):
class Parameters(Enum):
"""Parameters
Parameters involving parameters depending on temperature.
Attributes:
KF_XI: Value of fermi wave number multiplied by pippard length. This is
equivalent to value of 2 times zero-temperature bulk gap devided by
fermi energey. (k_F * xi = 2 * E_F / Delta_0)
DELTA_OVER_CDGM: Value of zero-temperature bulk gap devided by
level spacing of CdGM mode.
T_OVER_TC: Temperature scaled by taransition temperature T_c.
MAX_ANGULAR_MOMENTUM: Max value of angular momentum quantum number.
MIN_ANGULAR_MOMENTUM: Min value of angular momentum quantum number.
SIZE_KF: Systemsize scaled by fermi wave number, this is used in
calculation of self-consistent equation(BdG).
"""
KF_XI = GeneralParameters.KF_XI.value
DELTA_OVER_CDGM = 64.11317362045985
T_OVER_TC = 0.8
MAX_ANGULAR_MOMENTUM = GeneralParameters.MAX_ANGULAR_MOMENTUM.value
MIN_ANGULAR_MOMENTUM = GeneralParameters.MIN_ANGULAR_MOMENTUM.value
SIZE_KF = GeneralParameters.SIZE_KF.value
def __init__(self) -> None:
self.pair_potential: interpolate.CubicSpline
self.spectra_dict: Dict[int, float] = dict()
self.u_dict: Dict[int, interpolate.CubicSpline] = dict()
self.v_dict: Dict[int, interpolate.CubicSpline] = dict()
self._construct()
def _construct(self) -> None:
self.pair_potential, self.spectra_dict, self.u_dict, self.v_dict = _construct(
temperature_08
)
def get_pair_potential(self) -> interpolate.CubicSpline:
"""get_pair_potential()
This method returns pair potential at T = 0.8 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
Value of pair potential is scaled by zero-temperature bulk gap.
Returns:
scipy.interpolate.CubicSpline: pair potential at T = 0.8 T_c.
"""
return self.pair_potential
def get_ith_eigen_func(
self, i: int
) -> Tuple[interpolate.CubicSpline, interpolate.CubicSpline]:
"""get_ith_eigen_func()
This method returns ith eigen functions (u, v) at T = 0.8 T_c.
Radial coordinates is scaled by inverse of fermi wave number.
As (u, v) have dimension of inverse of L.L, value of them
is scaled by 2 times fermi wave number. (k_F * k_F)
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
Tuple[scipy.interpolate.CubicSpline, scipy.interpolate.CubicSpline]:
ith eigen functions (u_i, v_i)
"""
return self.u_dict[i], self.v_dict[i]
def get_ith_eigen_energy(self, i: int):
"""get_ith_eigen_energy()
This method returns ith eigen energy e_i at T = 0.8 T_c.
Args:
i (int): Angular momentum quantum number you want to get.
Returns:
float: Value of ith eigen energy e_i scaled by level spacing of CdGM mode.
"""
return self.spectra_dict[i]
def _make_spline(x_vector: List[float], f: List[float]) -> interpolate.CubicSpline:
return interpolate.CubicSpline(x_vector, f)
def _construct(my_package):
delta_dict: Dict[str, List[float]] = dict()
u_dict: Dict[int, interpolate.CubicSpline] = dict()
v_dict: Dict[int, interpolate.CubicSpline] = dict()
spectra_dict: Dict[int, float] = dict()
kfr: List[float] = [
i * 0.1
for i in range(
0, GeneralParameters.SIZE_KF.value * 10 + 1
) # [0.0, 0.1, 0.2, ..., 400]
]
with importlib.resources.open_binary(
my_package, _FileName.delta.value
) as delta, importlib.resources.open_binary(
my_package, _FileName.spectra.value
) as spc, importlib.resources.open_binary(
my_package, _FileName.u.value
) as u, importlib.resources.open_binary(
my_package, _FileName.v.value
) as v:
delta_dict: Dict[str, List[float]] = pickle.load(delta)
pair_potential = _make_spline(kfr, delta_dict[_DictKey.DELTA_KEY.value])
spectra_dict = pickle.load(spc)
u_load = pickle.load(u)
v_load = pickle.load(v)
for i in range(
GeneralParameters.MIN_ANGULAR_MOMENTUM.value,
GeneralParameters.MAX_ANGULAR_MOMENTUM.value + 1,
):
u_dict[i] = _make_spline(kfr, u_load[i])
v_dict[i] = _make_spline(kfr, v_load[i])
return pair_potential, spectra_dict, u_dict, v_dict
|
/sc_vortex_2d-1.0.2-py3-none-any.whl/sc_vortex_2d/vortex.py
| 0.952802 | 0.375535 |
vortex.py
|
pypi
|
from plone.app.vocabularies.catalog import QuerySearchableTextSourceView
from plone.app.vocabularies.catalog import SearchableTextSource
from plone.app.vocabularies.terms import BrowsableTerm
from Products.CMFCore.interfaces._content import IFolderish
from Products.CMFCore.utils import getToolByName
from sc.contentrules.groupbydate.config import RELPATHVOC
from zope.component import queryUtility
from zope.interface import implements
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleVocabulary
import logging
logger = logging.getLogger('sc.contentrules.groupbydate')
class RelPathSearchableTextSource(SearchableTextSource):
""" A special case of a SearchableTextSource where we always support
relative paths
"""
def __contains__(self, value):
"""Return whether the value is available in this source
"""
if not (value[0] == '.'):
result = super(RelPathSearchableTextSource,
self).__contains__(value)
else:
result = True
return result
def search(self, query_string):
""" Add relative paths to vocabulary
"""
results = super(RelPathSearchableTextSource,
self).search(query_string)
relPaths = RELPATHVOC.keys()
results = relPaths + list(results)
return (r for r in results)
class RelPathQSTSourceView(QuerySearchableTextSourceView):
""" A special case of a QuerySearchableTextSourceView where we
always support relative paths
"""
def getTerm(self, value):
if not (value[0] == '.'):
return super(RelPathQSTSourceView, self).getTerm(value)
terms = RELPATHVOC
token = value
title = terms.get(value, value)
browse_token = parent_token = None
return BrowsableTerm(value, token=token, title=title,
description=value,
browse_token=browse_token,
parent_token=parent_token)
class ContainerSearcher(object):
""" Check for all availables/allowed-addable folderish
content types in the site.
"""
implements(IVocabularyFactory)
def __call__(self, context):
context = getattr(context, 'context', context)
portal_url = getToolByName(context, 'portal_url')
site = portal_url.getPortalObject()
pt = getToolByName(site, 'portal_types')
# Use only Friendly Types
util = queryUtility(IVocabularyFactory, 'plone.app.vocabularies.ReallyUserFriendlyTypes')
types = util(context)
types_ids = types.by_token.keys()
folderish = []
for type_id in types_ids:
site_type = pt[type_id]
if (site_type.global_allow) and (site_type.isConstructionAllowed(site)):
term = types.by_token[type_id]
site.invokeFactory(type_id, 'item')
item = site['item']
if IFolderish.providedBy(item):
folderish.append(term)
del site['item']
return SimpleVocabulary(folderish)
ContainerSearcherFactory = ContainerSearcher()
|
/sc.contentrules.groupbydate-2.0.1.zip/sc.contentrules.groupbydate-2.0.1/src/sc/contentrules/groupbydate/vocabulary.py
| 0.654453 | 0.22564 |
vocabulary.py
|
pypi
|
from Acquisition import aq_parent
from plone.app.contentrules.conditions.portaltype import IPortalTypeCondition
from plone.contentrules.rule.interfaces import IRule
from Products.CMFCore.utils import getToolByName
from sc.contentrules.layout import MessageFactory as _
from zope.browsermenu.interfaces import IBrowserMenu
from zope.component import getUtility
from zope.component import queryMultiAdapter
from zope.interface import implements
from zope.interface import Interface
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
import logging
logger = logging.getLogger('sc.contentrules.layout')
class ViewsVocabulary(object):
"""Vocabulary factory listing available views
"""
implements(IVocabularyFactory)
def _get_rule(self, context):
''' Return rule that contains the action '''
rule = None
if IRule.providedBy(context):
rule = context
else:
rule = aq_parent(context)
return rule
def _get_portal_types(self, rule):
''' Return a portal type condition for
a given rule IF exists
'''
conditions = rule.conditions
for condition in conditions:
if IPortalTypeCondition.providedBy(condition):
types = condition.check_types
return types
return []
def _get_views_titles(self, views):
result = []
for mid in views:
view = queryMultiAdapter((self.context, self.REQUEST),
Interface, name=mid)
if view is not None:
menu = getUtility(IBrowserMenu, 'plone_displayviews')
item = menu.getMenuItemByAction(self, self.REQUEST, mid)
title = item and item.title or mid
result.append((mid, title))
else:
method = getattr(self.context, mid, None)
if method is not None:
# a method might be a template, script or method
try:
title = method.aq_inner.aq_explicit.title_or_id()
except AttributeError:
title = mid
else:
title = mid
result.append((mid, title))
return result
def _get_views(self, context):
''' List portal types available for this rule
'''
views = set()
portal_types = getToolByName(context, 'portal_types')
rule = self._get_rule(context)
if rule:
types = self._get_portal_types(rule)
for type_name in types:
pt = portal_types[type_name]
pt_views = pt.getAvailableViewMethods(context)
if not views:
views = set(pt_views)
else:
views = views.intersection(pt_views)
return self._get_views_titles(views)
def __call__(self, context):
self.context = context
self.REQUEST = context.REQUEST
terms = [SimpleTerm('_default_view',
title=_('Default Content View'))]
views = self._get_views(context)
for key, title in views:
terms.append(
SimpleTerm(
key,
title=_(title)))
return SimpleVocabulary(terms)
ViewsVocabularyFactory = ViewsVocabulary()
|
/sc.contentrules.layout-1.0.1.zip/sc.contentrules.layout-1.0.1/src/sc/contentrules/layout/vocabulary.py
| 0.550366 | 0.162081 |
vocabulary.py
|
pypi
|
from Acquisition import aq_inner
from OFS.SimpleItem import SimpleItem
from plone.app.contentrules import PloneMessageFactory as _
from plone.app.contentrules.browser.formhelper import AddForm
from plone.app.contentrules.browser.formhelper import EditForm
from plone.contentrules.rule.interfaces import IExecutable
from plone.contentrules.rule.interfaces import IRuleElementData
from sc.contentrules.metadata import utils
from zope.component import adapts
from zope.formlib import form
from zope.interface import implements
from zope.interface import Interface
from zope.schema import Choice
from zope.schema import Set
VOCAB = 'plone.app.vocabularies.Keywords'
FORM_NAME = _(u"Configure condition")
FORM_DESC = _(u'A tag condition makes the rule apply only to contents with '
u'any of the selected tags. If no tag is selected the rule '
u'will apply to contents without tags applied to them.')
class ISubjectCondition(Interface):
'''Interface for the configurable aspects of a Tag condition.
This is also used to create add and edit forms, below.
'''
subject = Set(title=_(u'Tags'),
description=_(u'Tags to check for. Leave it blank '
u'to check for contents without any '
u'tag set'),
required=False,
value_type=Choice(vocabulary=VOCAB))
class SubjectCondition(SimpleItem):
'''The actual persistent implementation of the Tag condition element.
'''
implements(ISubjectCondition, IRuleElementData)
subject = []
element = "sc.contentrules.conditions.Subject"
@property
def summary(self):
subject = self.subject
if not subject:
msg = _(u"No tags selected")
else:
msg = _(u"Tags contains ${tags}",
mapping=dict(tags=" or ".join(subject)))
return msg
class SubjectConditionExecutor(object):
"""The executor for this condition.
This is registered as an adapter in configure.zcml
"""
implements(IExecutable)
adapts(Interface, ISubjectCondition, Interface)
def __init__(self, context, element, event):
self.context = context
self.element = element
self.event = event
def __call__(self):
expected = self.element.subject
obj = aq_inner(self.event.object)
if not (utils.subject_available(obj)):
return False
subject = utils.subject_for_object(obj)
if not expected:
return not (expected or subject)
intersection = set(expected).intersection(subject)
return intersection and True or False
class SubjectAddForm(AddForm):
"""An add form for Tag conditions.
"""
form_fields = form.FormFields(ISubjectCondition)
label = _(u'Add Tag Condition')
description = FORM_DESC
form_name = FORM_NAME
def create(self, data):
c = SubjectCondition()
form.applyChanges(c, self.form_fields, data)
return c
class SubjectEditForm(EditForm):
"""An edit form for Tag conditions
"""
form_fields = form.FormFields(ISubjectCondition)
label = _(u"Edit Tag Condition")
description = FORM_DESC
form_name = FORM_NAME
|
/sc.contentrules.metadata-1.0.1.zip/sc.contentrules.metadata-1.0.1/src/sc/contentrules/metadata/conditions/subject.py
| 0.711932 | 0.161949 |
subject.py
|
pypi
|
from Acquisition import aq_parent
from OFS.SimpleItem import SimpleItem
from plone.app.contentrules.browser.formhelper import AddForm
from plone.app.contentrules.browser.formhelper import EditForm
from plone.contentrules.rule.interfaces import IExecutable
from plone.contentrules.rule.interfaces import IRuleElementData
from sc.contentrules.metadata import MessageFactory as _
from sc.contentrules.metadata import utils
from zope.component import adapts
from zope.formlib import form
from zope.interface import implements
from zope.interface import Interface
from zope.schema import Bool
from zope.schema import Choice
from zope.schema import Set
VOCAB = 'plone.app.vocabularies.Keywords'
FORM_NAME = _(u"Configure action")
FORM_DESC = _(u'An action that applies Tags to a content.')
class ISubjectAction(Interface):
'''Interface for the configurable aspects of a set Tag action.
This is also used to create add and edit forms, below.
'''
same_as_parent = Bool(title=_(u"Use Tags from parent object"),
description=_(u"Select this to use Tags as defined "
u"in the parent object. If this "
u"option is selected this action "
u"ignores the following field."))
subject = Set(title=_(u'Tags'),
description=_(u'Tags to check for. Leave it blank '
u'to check for contents without any '
u'tag set'),
required=False,
value_type=Choice(vocabulary=VOCAB))
class SubjectAction(SimpleItem):
""" Stores action settings
"""
implements(ISubjectAction, IRuleElementData)
element = 'sc.contentrules.actions.Subject'
same_as_parent = False
subject = []
@property
def summary(self):
same_as_parent = self.same_as_parent
subject = self.subject
if same_as_parent:
msg = _(u"Apply tags from parent object.")
else:
msg = _(u"Apply tags ${tags}",
mapping=dict(tags=", ".join(subject)))
return msg
class SubjectActionExecutor(object):
""" Execute an action
"""
implements(IExecutable)
adapts(Interface, ISubjectAction, Interface)
def __init__(self, context, element, event):
self.context = context
self.element = element
self.event = event
def __call__(self):
''' Apply selected layout to a content item
'''
obj = self.event.object
same_as_parent = self.element.same_as_parent
subject = self.element.subject
if not (utils.subject_available(obj)):
return False
if same_as_parent:
parent = aq_parent(obj)
if not (utils.subject_available(parent)):
return False
subject = utils.subject_for_object(parent)
return utils.set_subject(obj, subject)
class SubjectAddForm(AddForm):
"""
An add form for the Tags contentrules action
"""
form_fields = form.FormFields(ISubjectAction)
label = _(u"Add set Tags content rules action")
description = FORM_DESC
form_name = FORM_NAME
def create(self, data):
a = SubjectAction()
form.applyChanges(a, self.form_fields, data)
return a
class SubjectEditForm(EditForm):
"""
An edit form for the set Tags contentrules action
"""
form_fields = form.FormFields(ISubjectAction)
label = _(u"Edit the set Tags content rules action")
description = FORM_DESC
form_name = FORM_NAME
|
/sc.contentrules.metadata-1.0.1.zip/sc.contentrules.metadata-1.0.1/src/sc/contentrules/metadata/actions/subject.py
| 0.699768 | 0.17094 |
subject.py
|
pypi
|
import itertools
from zope.interface import implements
try:
from zope.schema.interfaces import IVocabularyFactory
except ImportError:
from zope.app.schema.vocabulary import IVocabularyFactory
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from plone.app.vocabularies.catalog import SearchableTextSource
from plone.app.vocabularies.catalog import QuerySearchableTextSourceView
from plone.app.vocabularies.terms import BrowsableTerm
from zope.app.form.browser.interfaces import ISourceQueryView, ITerms
from sc.contentrules.movebyattribute import MessageFactory as _
RELPATHVOC = {'../': _(u" One level up"),
'./': _(u' Same folder of content')}
class RelPathSearchableTextSource(SearchableTextSource):
""" A special case of a SearchableTextSource where we always support
relative paths
"""
def __contains__(self, value):
"""Return whether the value is available in this source
"""
if not (value[0] == '.'):
result = super(RelPathSearchableTextSource,self).__contains__(value)
else:
result = True
return result
def search(self, query_string):
""" Add relative paths to vocabulary
"""
results = super(RelPathSearchableTextSource,self).search(query_string)
relPaths = RELPATHVOC.keys()
results = relPaths + list(results)
return (r for r in results)
class RelPathQSTSourceView(QuerySearchableTextSourceView):
""" A special case of a QuerySearchableTextSourceView where we always support
relative paths
"""
def getTerm(self, value):
if not (value[0] == '.'):
return super(RelPathQSTSourceView,self).getTerm(value)
terms = RELPATHVOC
token = value
title = terms.get(value,value)
browse_token = parent_token = None
return BrowsableTerm(value, token=token, title=title,
description=value,
browse_token=browse_token,
parent_token=parent_token)
|
/sc.contentrules.movebyattribute-0.5.tar.gz/sc.contentrules.movebyattribute-0.5/sc/contentrules/movebyattribute/vocabulary.py
| 0.613815 | 0.196537 |
vocabulary.py
|
pypi
|
from zope.interface import Interface
from zope import schema
from z3c.form import field
from z3c.form import group
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
import logging
logger = logging.getLogger('sc.galleria.support')
# dependencies
# Thanks: collective.gallery
try:
#plone4
from plone.app.folder.folder import IATUnifiedFolder as IFolder
from Products.ATContentTypes.interfaces.link import IATLink as ILink
from Products.ATContentTypes.interfaces.topic import IATTopic as ITopic
from Products.ATContentTypes.interfaces.image import IATImage as IImage
except ImportError, e:
logger.info('switch to plone3 %s' % e)
#plone3
from Products.ATContentTypes.interface import IATFolder as IFolder
from Products.ATContentTypes.interface import IATLink as ILink
from Products.ATContentTypes.interface import IATTopic as ITopic
from Products.ATContentTypes.interface import IATImage as IImage
from sc.galleria.support import MessageFactory as _
transitionsvoc = SimpleVocabulary(
[SimpleTerm(value='fade', title=_(u'Fade')),
SimpleTerm(value='flash', title=_(u'Flash')),
SimpleTerm(value='pulse', title=_(u'Pulse')),
SimpleTerm(value='slide', title=_(u'Slide')),
SimpleTerm(value='fadeslide', title=_(u'FadeSlide')), ]
)
thumbnailsvoc = SimpleVocabulary(
[SimpleTerm(value='show', title=_(u"Show thumbnails")),
SimpleTerm(value='empty', title=_(u"Don't show thumbnails")), ]
)
class IGalleriaLayer(Interface):
"""
Marker Default browser layer this product.
"""
class IGalleria(Interface):
"""
"""
def __init__(self, context, request, *args, **kwargs):
""" """
def galleriajs(self):
""" """
def getThumbnails(self):
""" """
def get_theme(self):
""" """
def portal_url(self):
""" """
def galleria_flickrid(self):
""" """
def galleria_picasauserandid(self):
""" """
class IGeneralSettings(Interface):
"""Some general settings.
These fields will appear on the 'Default' tab.
Option informations: http://galleria.io/docs/1.2/options/
"""
autoplay = schema.Bool(title=_(u"Auto Play."),
description=_(u"Sets Galleria to play slidehow when initialized."),
default=True,
required=True,)
gallery_wait = schema.Int(title=_(u"Gallery Wait"),
description=_(u"Sets how long Galleria should wait when trying to extract measurements."),
default=5000,
required=True,)
showInf = schema.Bool(title=_(u"Show informations"),
description=_(u"Toggles the caption."),
default=True,
required=True,)
gallery_width = schema.Int(title=_(u"Gallery width"),
description=_(u"Manually set a gallery width."),
default=500,
required=True,)
gallery_height = schema.Int(title=_(u"Gallery height"),
description=_(u"Manually set a gallery height."),
default=500,
required=True,)
imagePosition = schema.TextLine(title=_(u"Image css position"),
description=_(u"Eg. 'top right' or '20% 100%'"),
default=_(u'center'),
required=True,)
lightbox = schema.Bool(title=_(u"Enable lightbox"),
default=False,
required=True,)
showCounting = schema.Bool(title=_(u"Show counting"),
description=_(u"Toggles the counter."),
default=True,
required=True,)
transitions = schema.Choice(title=_(u"Transitions"),
description=_(u"Defines what transition to use."),
default=_(u'fade'),
vocabulary=transitionsvoc,
required=True,)
transitionSpeed = schema.Int(title=_(u"Transition Speed"),
description=_(u"Defines the speed of the transition."),
default=400,
required=True,)
showimagenav = schema.Bool(title=_(u"show image navigation"),
description=_(u"toggles the image navigation arrows."),
default=True,
required=True,)
swipe = schema.Bool(title=_(u"swipe"),
description=_(u"Enables a swipe movement for flicking through images on touch devices."),
default=True,
required=True,)
selector = schema.TextLine(title=_(u"Selector jQuery"),
description=_(u"Eg. '#content-core' or '#content' or '.galleria'. Do not change if you do not know what I mean."),
default=u"#content-galleria",
required=True,)
thumbnails = schema.Choice(title=_(u"Show Thumbnails"),
description=_(u"Sets the creation of thumbnails"),
default=_(u'show'),
vocabulary=thumbnailsvoc,
required=True,)
imagecrop = schema.Bool(title=_(u"Enable image crop"),
description=_(u"Defines how the main image will be cropped inside it is container."),
default=True,
required=True,)
responsive = schema.Bool(title=_(u"Sets Gallery in responsive mode"),
description=_(u"Means that it will resize the entire container in dynamic proportions added in your CSS."),
default=True,
required=True,)
debug = schema.Bool(title=_(u"Enable debug mode"),
description=_(u"Set this to false to prevent debug messages."),
default=False,
required=True,)
class IFaceBookPlugin(Interface):
""" Enable/Disable FaceBook plugin
"""
facebook = schema.Bool(title=_(u"Enable facebook plugin"),
description=_(u""),
default=False,)
facebook_max = schema.Int(title=_(u"Maximum number of photos."),
description=_(u"Maximum number of photos to return (maximum value 100)."),
default=20,
required=True,)
facebook_desc = schema.Bool(title=_(u"Show Description"),
description=_(u"The plugin fetches the title per default. If you also wish to fetch the description, set this option to true."),
default=False)
class IFlickrPlugin(Interface):
""" Enable/Disable Flickr plugin
http://galleria.io/docs/1.2/plugins/flickr/
"""
flickr = schema.Bool(title=_(u"Enable flickr plugin"),
description=_(u""),
default=False,)
flickr_max = schema.Int(title=_(u"Maximum number of photos."),
description=_(u"Maximum number of photos to return (maximum value 100)."),
default=20,
required=True,)
flickr_desc = schema.Bool(title=_(u"Show Description"),
description=_(u"The plugin fetches the title per default. If you also wish to fetch the description, set this option to true."),
default=False)
class IPicasaPlugin(Interface):
""" Enable/Disable Picasa plugin
http://galleria.io/docs/1.2/plugins/picasa/
"""
picasa = schema.Bool(title=_(u"Enable picasa plugin"),
description=_(u""),
default=False,)
picasa_max = schema.Int(title=_(u"Maximum number of photos."),
description=_(u"Maximum number of photos to return (maximum value 100)."),
default=20,
required=True,)
picasa_desc = schema.Bool(title=_(u"Show Description"),
description=_(u"The plugin fetches the title per default. If you also wish to fetch the description, set this option to true."),
default=False)
class IHistoryPlugin(Interface):
""" Enable/Disable History plugin
http://galleria.io/docs/1.2/plugins/picasa/
"""
history = schema.Bool(title=_(u"Enable history plugin"),
description=_(u""),
default=False,)
class IGalleriaSettings(IGeneralSettings, IFlickrPlugin, IPicasaPlugin,
IHistoryPlugin, IFaceBookPlugin):
"""The form schema contains all settings."""
class FormGroup1(group.Group):
label = _(u"Flickr Plugin")
fields = field.Fields(IFlickrPlugin)
class FormGroup2(group.Group):
label = _(u"Picasa Plugin")
fields = field.Fields(IPicasaPlugin)
class FormGroup3(group.Group):
label = _(u"History Plugin")
fields = field.Fields(IHistoryPlugin)
class FormGroup4(group.Group):
label = _(u"FaceBook Plugin")
fields = field.Fields(IFaceBookPlugin)
|
/sc.galleria.support-1.0.1.tar.gz/sc.galleria.support-1.0.1/src/sc/galleria/support/interfaces.py
| 0.68763 | 0.181336 |
interfaces.py
|
pypi
|
from plone import api
from plone.dexterity.browser.view import DefaultView
from plone.memoize import forever
from plone.memoize.instance import memoizedproperty
from sc.photogallery.config import HAS_ZIPEXPORT
from sc.photogallery.interfaces import IPhotoGallerySettings
from sc.photogallery.utils import human_readable_size
from sc.photogallery.utils import last_modified
from sc.photogallery.utils import PhotoGalleryMixin
from zope.component import getMultiAdapter
import os
if HAS_ZIPEXPORT:
from ftw.zipexport.generation import ZipGenerator
from ftw.zipexport.interfaces import IZipRepresentation
class View(DefaultView, PhotoGalleryMixin):
"""Slideshow view for Photo Gallery content type."""
def id(self):
return id(self)
@memoizedproperty
def results(self):
return self.context.listFolderContents()
@property
def is_empty(self):
return len(self.results) == 0
def image(self, obj, scale='large'):
"""Return an image scale if the item has an image field.
:param obj: [required]
:type obj: content type object
:param scale: the scale to be used
:type scale: string
"""
scales = obj.restrictedTraverse('@@images')
return scales.scale('image', scale)
def localized_time(self, obj, long_format=False):
"""Return the object time in a user-friendly way.
:param item: [required]
:type item: content type object
:param long_format: show long date format if True
:type scale: string
"""
return api.portal.get_localized_time(obj.Date(), long_format)
@property
def can_download(self):
"""Check if original images can be explicitly downloaded, that is,
if downloading is enabled globally and the current object allows it.
"""
record = IPhotoGallerySettings.__identifier__ + '.enable_download'
enabled_globally = api.portal.get_registry_record(record)
allow_download = self.context.allow_download
return enabled_globally and allow_download
def img_size(self, item):
try:
size = item.size() # Archetypes
except AttributeError:
size = item.image.size # Dexterity
return human_readable_size(size)
@property
def can_zipexport(self):
"""Check if original images can be downloaded as a ZIP file,
that is, if ftw.zipexport is installed and downloading is
allowed in the current object.
"""
return HAS_ZIPEXPORT and self.can_download
@property
def last_modified(self):
return last_modified(self.context)
def zip_url(self):
base_url = self.context.absolute_url()
url = '{0}/@@zip/{1}/{2}.zip'.format(
base_url, str(self.last_modified), self.context.getId())
return url
@forever.memoize
def _zip_size(self, last_modified=None):
if not HAS_ZIPEXPORT:
return
with ZipGenerator() as generator:
for obj in [self.context]:
repre = getMultiAdapter(
(obj, self.request), interface=IZipRepresentation)
for path, pointer in repre.get_files():
generator.add_file(path, pointer)
zip_file = generator.generate()
size = os.stat(zip_file.name).st_size
return human_readable_size(size)
def zip_size(self):
return self._zip_size(self.last_modified)
|
/sc.photogallery-1.0b3.zip/sc.photogallery-1.0b3/src/sc/photogallery/browser/view.py
| 0.776835 | 0.209591 |
view.py
|
pypi
|
from collective.cover.tiles.base import IPersistentCoverTile
from collective.cover.tiles.base import PersistentCoverTile
from plone.app.uuid.utils import uuidToObject
from plone.memoize import view
from plone.tiles.interfaces import ITileDataManager
from plone.uuid.interfaces import IUUID
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from sc.photogallery import _
from sc.photogallery.utils import PhotoGalleryMixin
from zope import schema
from zope.interface import implementer
class IPhotoGalleryTile(IPersistentCoverTile):
"""A tile that shows a photo gallery."""
uuid = schema.TextLine(
title=_(u'UUID'),
required=False,
readonly=True,
)
@implementer(IPhotoGalleryTile)
class PhotoGalleryTile(PersistentCoverTile, PhotoGalleryMixin):
"""A tile that shows a photo gallery."""
index = ViewPageTemplateFile('photogallery.pt')
is_configurable = True
is_editable = False
is_droppable = True
short_name = _(u'msg_short_name_photogallery', u'Photo Gallery')
def accepted_ct(self):
"""Accept only Photo Gallery objects."""
return ['Photo Gallery']
def populate_with_object(self, obj):
super(PhotoGalleryTile, self).populate_with_object(obj) # check permissions
if obj.portal_type in self.accepted_ct():
uuid = IUUID(obj)
data_mgr = ITileDataManager(self)
data_mgr.set(dict(uuid=uuid))
def is_empty(self):
return (self.data.get('uuid', None) is None or
uuidToObject(self.data.get('uuid')) is None)
@view.memoize
def gallery(self):
return uuidToObject(self.data.get('uuid'))
@view.memoize
def results(self):
gallery = self.gallery()
return gallery.listFolderContents()
def image(self, obj, scale='large'):
"""Return an image scale if the item has an image field.
:param obj: [required]
:type obj: content type object
:param scale: the scale to be used
:type scale: string
"""
scales = obj.restrictedTraverse('@@images')
return scales.scale('image', scale)
|
/sc.photogallery-1.0b3.zip/sc.photogallery-1.0b3/src/sc/photogallery/tiles/photogallery.py
| 0.756447 | 0.217691 |
photogallery.py
|
pypi
|
import logging
import re
from string import Template
from Acquisition import aq_inner
from Acquisition import Explicit
from Products.CMFCore.utils import getToolByName
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets import ViewletBase
from plone.memoize.view import memoize
from plone.registry.interfaces import IRegistry
from zope.component import adapts
from zope.component import getUtility
from zope.contentprovider.interfaces import IContentProvider
from zope.interface import Interface
from zope.interface import implements
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IBrowserView
from ..controlpanel.bookmarks import IProvidersSchema
logger = logging.getLogger(__name__)
class SocialBookmarksBase(object):
"""Abstract Base class for social bookmarks.
"""
def _registry(self):
return getUtility(IRegistry)
def _all_providers(self):
""" Return a dict with all providers """
reg = self._registry()
providers = [reg[k] for k in reg.records.keys()
if k.startswith('sc.social.bookmarks.providers')]
all_providers = dict([(p.get('id'), p) for p in providers])
return all_providers
def settings(self):
reg = self._registry()
controlpanel = reg.forInterface(IProvidersSchema,
prefix="sc.social.bookmarks")
return controlpanel
def _availableProviders(self):
all_providers = self._all_providers()
bookmark_providers = self.settings().bookmark_providers or []
providers = []
for bookmark_id in bookmark_providers:
provider = all_providers.get(bookmark_id, None)
if not provider:
continue
providers.append(provider)
return providers
def providers(self):
"""Returns a list of dicts with providers already
filtered and populated.
"""
context = aq_inner(self.context)
portal_url = getToolByName(context, 'portal_url')()
available = self._availableProviders()
providers = []
# Attributes available to be substituted in the URL
param = {
'title': context.Title(),
'description': context.Description(),
'url': context.absolute_url()
}
# BBB: Instead of using string formatting we moved to string Templates
pattern = re.compile("\%\(([a-zA-Z]*)\)s")
for provider in available:
rendered_provider = provider.copy()
url_tmpl = provider.get('url', '').strip()
logo = provider.get('logo', '')
if not url_tmpl or not logo:
# A provider must have a logo and a share URL
logger.error('Provider %s has not URL or logo specified', provider['id'])
continue
url_tmpl = re.sub(pattern, r'${\1}', url_tmpl)
rendered_provider['url'] = Template(url_tmpl).safe_substitute(param)
resource_name = provider.get('resource', 'sb_images')
logo = provider.get('logo', '')
rendered_provider['icon_url'] = '%s/++resource++%s/%s' % (
portal_url,
resource_name,
logo
)
providers.append(rendered_provider)
return providers
@property
def icons_only(self):
"""Flag whether to show icons only.
"""
return self.settings().show_icons_only or False
@property
def action_enabled(self):
"""Validates if social bookmarks should be enabled
for this context using an action.
"""
return self.settings().use_as_action or False
@property
def enabled(self):
"""Validates if social bookmarks should be enabled
for this context.
"""
context = aq_inner(self.context)
enabled_portal_types = self.settings().enabled_portal_types or []
return context.portal_type in enabled_portal_types
class SocialBookmarksProvider(Explicit, SocialBookmarksBase):
"""Social Bookmarks Viewlet content provider
"""
implements(IContentProvider)
adapts(Interface, IBrowserRequest, IBrowserView)
template = ViewPageTemplateFile(u'templates/bookmarks.pt')
def __init__(self, context, request, view):
self.__parent__ = view
self.context = context
self.request = request
def update(self):
pass
def render(self):
return self.template(self)
class SocialBookmarksView(BrowserView, SocialBookmarksBase):
"""Social Bookmarks View
"""
class SocialBookmarksViewlet(ViewletBase, SocialBookmarksBase):
"""Social Bookmarks Viewlet
"""
template = ViewPageTemplateFile('templates/bookmarks_viewlet.pt')
def render(self):
return self.template(self)
|
/sc.social.bookmarks-1.3.2.tar.gz/sc.social.bookmarks-1.3.2/src/sc/social/bookmarks/browser/common.py
| 0.598664 | 0.15393 |
common.py
|
pypi
|
from plone.autoform import directives as form
from plone.formwidget.namedfile.widget import NamedImageFieldWidget
from plone.supermodel import model
from sc.social.like import LikeMessageFactory as _
from sc.social.like.config import DEFAULT_ENABLED_CONTENT_TYPES
from sc.social.like.config import DEFAULT_PLUGINS_ENABLED
from sc.social.like.utils import validate_canonical_domain
from sc.social.like.utils import validate_og_fallback_image
from sc.social.like.vocabularies import FacebookButtonsVocabulary
from sc.social.like.vocabularies import FacebookVerbsVocabulary
from sc.social.like.vocabularies import TypeButtonVocabulary
from zope import schema
from zope.interface import Interface
# BBB: for compatibility with installations made before 2.5.0
import sys
sys.modules['sc.social.like.interfaces.socialikes'] = sys.modules[__name__]
class ISocialLikeLayer(Interface):
"""A layer specific for this add-on product."""
class ISocialLikes(Interface):
"""
"""
class IHelperView(Interface):
"""Social Like configuration helpers."""
def configs():
"""Social Like configuration."""
def enabled_portal_types():
"""Portal Types that will display our viewlet."""
def plugins_enabled():
"""List of plugins enabled."""
def typebutton():
"""Button to be used."""
def enabled(view):
"""Validates if the viewlet should be enabled for this context."""
def available_plugins():
"""Return available plugins."""
def plugins():
"""Return enabled plugins."""
def view_template_id():
"""View or template id for this context."""
class ISocialLikeSettings(model.Schema):
"""Schema for the control panel form."""
enabled_portal_types = schema.Tuple(
title=_(u'Content types'),
description=_(
u'help_portal_types',
default=u'Please select content types in which the '
u'viewlet will be applied.',
),
required=True,
default=DEFAULT_ENABLED_CONTENT_TYPES,
value_type=schema.Choice(
vocabulary='plone.app.vocabularies.ReallyUserFriendlyTypes')
)
plugins_enabled = schema.Tuple(
title=_(u'Plugins'),
description=_(
u'help_enabled_plugins',
default=u'Please select which plugins will be used',
),
required=False,
default=DEFAULT_PLUGINS_ENABLED,
value_type=schema.Choice(vocabulary='sc.social.likes.plugins')
)
validation_enabled = schema.Bool(
title=_(u'Enable content validation?'),
description=_(
u'help_validation_enabled',
default=u'Enables validation to check if content follows social networks sharing best practices. '
u'The validation includes title, description and lead image fields. '
u'This feature is only available for Dexterity-based content types.'
),
default=True,
)
typebutton = schema.Choice(
title=_(u'Button style'),
description=_(
u'help_selected_buttons',
default=u'Choose your button style.',
),
required=True,
default=u'horizontal',
vocabulary=TypeButtonVocabulary,
)
do_not_track = schema.Bool(
title=_(u'Do not track users'),
description=_(
u'help_do_not_track',
default=u'If enabled, the site will not provide advanced sharing '
u'widgets , instead simple links will be used.\n'
u'This will limits user experience and features '
u'(like the share count) but will enhance users privacy: '
u'no 3rd party cookies will be sent to users.'
),
default=False,
)
model.fieldset(
'open_graph',
label=u'Open Graph',
fields=[
'canonical_domain',
'fallback_image',
],
)
canonical_domain = schema.URI(
title=_(u'Canonical domain'),
description=_(
u'help_canonical_domain',
default=u'The canonical domain will be used to construct the canonical URL (<code>og:url</code> property) of portal objects. '
u'Use the domain name of your site (e.g. <strong>http://www.example.org</strong> or <strong>https://www.example.org</strong>). '
u'Facebook will use the canonical URL to ensure that all actions such as likes and shares aggregate at the same URL rather than spreading across multiple versions of a page. '
u'Check <a href="https://pypi.python.org/pypi/sc.social.like">package documentation</a> for more information on how to use this feature.'
),
required=True,
constraint=validate_canonical_domain,
)
form.widget('fallback_image', NamedImageFieldWidget)
fallback_image = schema.ASCII(
title=_(u'Fallback image'),
description=_(
u'help_fallback_image',
default=u'Content without a lead image will use this image as fallback (<code>og:image</code> property). '
u'There could be a delay of up to 2 minutes when replacing this image.'
),
required=False,
constraint=validate_og_fallback_image,
)
model.fieldset(
'facebook',
label=u'Facebook',
fields=[
'fbaction',
'facebook_username',
'facebook_app_id',
'fbbuttons',
'fbshowlikes',
'facebook_prefetch_enabled'
],
)
fbaction = schema.Choice(
title=_(u'Verb to display'),
description=_(
u'help_verb_display',
default=u'The verb to display in the Facebook button. '
u'Currently only "like" and "recommend" are '
u'supported.',
),
required=True,
default=u'like',
vocabulary=FacebookVerbsVocabulary,
)
facebook_username = schema.ASCIILine(
title=_(u'Admins'),
description=_(
u'help_admins',
default=u'A comma-separated list of either the '
u'Facebook IDs of page administrators.',
),
required=False,
default='',
)
facebook_app_id = schema.ASCIILine(
title=_(u'Application ID'),
description=_(
u'help_appid',
default=u'A Facebook Platform application ID.\n'
u'This is required when \"Do not track users\" option is enabled.',
),
required=False,
default='',
)
fbbuttons = schema.Tuple(
title=_(u'Facebook buttons'),
description=_(
u'help_fbbuttons',
default=u'Select buttons to be shown',
),
value_type=schema.Choice(vocabulary=FacebookButtonsVocabulary),
required=True,
default=(u'Like', ),
)
fbshowlikes = schema.Bool(
title=_(u'Show number of likes'),
description=_(
u'help_show_likes',
default=u'If enabled, the Facebook button will show the number of '
u'Facebook users who have already liked this page.'
),
default=True,
)
facebook_prefetch_enabled = schema.Bool(
title=_(u'Enable Facebook prefetch?'),
description=_(
u'help_facebook_prefetch_enabled',
default=u'If enabled, an event is triggered to make Facebook '
u'crawler scrape and cache metadata every time a new '
u'piece content is published and every time published '
u'content is edited. '
u'This will keep the metadata updated on Facebook always.'
),
default=False,
)
model.fieldset(
'twitter', label=u'Twitter', fields=['twitter_username'])
twitter_username = schema.ASCIILine(
title=_(u'Twitter nick'),
description=_(
u'help_your_twitter_nick',
default=u'Enter your twitter nick. eg. simplesconsultoria',
),
required=False,
default='',
)
|
/sc.social.like-2.13b3.zip/sc.social.like-2.13b3/sc/social/like/interfaces.py
| 0.620966 | 0.174938 |
interfaces.py
|
pypi
|
from DateTime import DateTime
from plone import api
from plone.supermodel import model
from sc.social.like import LikeMessageFactory as _
from sc.social.like.behaviors import ISocialMedia
from sc.social.like.interfaces import ISocialLikeSettings
from sc.social.like.logger import logger
from sc.social.like.utils import get_valid_objects
from sc.social.like.utils import validate_canonical_domain
from z3c.form import button
from z3c.form import field
from z3c.form import form
from zope import schema
class ICanonicalURLUpdater(model.Schema):
"""A form to update the canonical url of portal objects based on a date."""
old_canonical_domain = schema.URI(
title=_(u'Old canonical domain'),
description=_(
u'help_canonical_domain',
default=u'The canonical domain will be used to construct the canonical URL (<code>og:url</code> property) of portal objects. '
u'Use the domain name of your site (e.g. <strong>http://www.example.org</strong> or <strong>https://www.example.org</strong>). '
u'Facebook will use the canonical URL to ensure that all actions such as likes and shares aggregate at the same URL rather than spreading across multiple versions of a page. '
u'Check <a href="https://pypi.python.org/pypi/sc.social.like">package documentation</a> for more information on how to use this feature.'
),
required=True,
constraint=validate_canonical_domain,
)
published_before = schema.Date(
title=_(u'Date'),
description=_(
u'help_published_before',
default=u'Objects published before this date will be updated using the canonical domain defined in this form; '
u'objects published on or after this date will be updated using the canonical domain defined in the control panel configlet.'
),
required=True,
)
class CanonicalURLUpdater(form.Form):
"""A form to update the canonical url of portal objects based on a date."""
fields = field.Fields(ICanonicalURLUpdater)
label = _(u'Canonical URL updater form')
description = _(
u'This form will update the canonical URL of all Dexterity-based '
u'objects in the catalog providing the Social Media behavior.'
)
ignoreContext = True
@property
def canonical_domain(self):
return api.portal.get_registry_record(name='canonical_domain', interface=ISocialLikeSettings)
def update(self):
super(CanonicalURLUpdater, self).update()
# show error message if no canonical domain has been defined in the configlet
if not self.canonical_domain:
msg = _(u'Canonical domain has not been defined in the control panel configlet.')
api.portal.show_message(message=msg, request=self.request, type='error')
# disable the green bar and the portlet columns
self.request.set('disable_border', 1)
self.request.set('disable_plone.rightcolumn', 1)
self.request.set('disable_plone.leftcolumn', 1)
@property
def update_button_enabled(self):
"""Condition to be used to display the "Update" button."""
return self.canonical_domain is not None
@button.buttonAndHandler(_('Update'), name='update', condition=lambda form: form.update_button_enabled)
def handle_update(self, action):
data, errors = self.extractData()
if errors:
self.status = _(u'Please correct the errors.')
return
self.update_canonical_url(data)
@button.buttonAndHandler(_(u'label_cancel', default=u'Cancel'), name='cancel')
def handle_cancel(self, action):
self.request.response.redirect(self.context.absolute_url())
def update_canonical_url(self, data):
"""Update the canonical URL of all objects in the catalog
providing the ISocialMedia behavior.
Objects published before the specified date will be updated
using the canonical domain defined in this form; objects
published on or after that date will be updated using the
canonical domain defined in the control panel configlet.
"""
old_canonical_domain = data['old_canonical_domain']
new_canonical_domain = self.canonical_domain
published_before = data['published_before'].isoformat()
results = api.content.find(
object_provides=ISocialMedia.__identifier__,
review_state='published',
)
total = len(results)
logger.info(u'{0} objects will have their canonical URL updated'.format(total))
for obj in get_valid_objects(results):
# FIXME: we're currently ignoring the Plone site id
# https://github.com/collective/sc.social.like/issues/119
path = '/'.join(obj.getPhysicalPath()[2:])
if obj.effective_date < DateTime(published_before):
# use the canonical domain defined in this form
obj.canonical_url = '{0}/{1}'.format(old_canonical_domain, path)
elif not obj.canonical_url:
# use the canonical domain defined in the configlet
obj.canonical_url = '{0}/{1}'.format(new_canonical_domain, path)
logger.info(u'Done.')
self.status = u'Update complete; {0} items processed.'.format(total)
|
/sc.social.like-2.13b3.zip/sc.social.like-2.13b3/sc/social/like/browser/canonicalurl.py
| 0.855821 | 0.246567 |
canonicalurl.py
|
pypi
|
from Acquisition import aq_inner
from plone import api
from plone.app.layout.globals.interfaces import IViewView
from plone.formwidget.namedfile.converter import b64decode_file
from plone.memoize.view import memoize
from plone.memoize.view import memoize_contextless
from plone.namedfile.browser import Download
from plone.namedfile.file import NamedImage
from plone.registry.interfaces import IRegistry
from Products.Five import BrowserView
from sc.social.like.interfaces import IHelperView
from sc.social.like.interfaces import ISocialLikeSettings
from sc.social.like.plugins import IPlugin
from zope.component import getUtilitiesFor
from zope.component import getUtility
from zope.interface import implementer
@implementer(IHelperView)
class HelperView(BrowserView):
"""Social Like configuration helpers."""
def __init__(self, context, request):
self.context = aq_inner(context)
self.request = request
@memoize_contextless
def configs(self):
registry = getUtility(IRegistry)
# do not fail if the upgrade step has not being run
settings = registry.forInterface(ISocialLikeSettings, check=False)
return settings
@memoize_contextless
def enabled_portal_types(self):
configs = self.configs()
return configs.enabled_portal_types or []
@memoize_contextless
def plugins_enabled(self):
configs = self.configs()
return configs.plugins_enabled or []
@memoize
def enabled(self, view=None):
if view and not IViewView.providedBy(view):
return False
enabled_portal_types = self.enabled_portal_types()
return self.context.portal_type in enabled_portal_types
@memoize_contextless
def available_plugins(self):
registered = dict(getUtilitiesFor(IPlugin))
return registered
@memoize_contextless
def plugins(self):
available = self.available_plugins()
enabled = self.plugins_enabled()
plugins = []
for plugin_id in enabled:
plugin = available.get(plugin_id, None)
if plugin:
plugins.append(plugin)
return plugins
@memoize_contextless
def typebutton(self):
configs = self.configs()
return configs.typebutton
@memoize
def view_template_id(self):
context_state = api.content.get_view(
'plone_context_state', self.context, self.request)
return context_state.view_template_id()
class FallBackImageView(Download):
"""Helper view to return the fallback image."""
def __init__(self, context, request):
super(FallBackImageView, self).__init__(context, request)
record = ISocialLikeSettings.__identifier__ + '.fallback_image'
fallback_image = api.portal.get_registry_record(record, default=None)
if fallback_image is not None:
# set fallback image data for download
filename, data = b64decode_file(fallback_image)
data = NamedImage(data=data, filename=filename)
self.filename, self.data = filename, data
# enable image caching for 2 minutes
self.request.RESPONSE.setHeader('Cache-Control', 'max-age=120, public')
else:
# resource no longer available
self.data = NamedImage(data='')
self.request.RESPONSE.setStatus(410) # Gone
def _getFile(self):
return self.data
|
/sc.social.like-2.13b3.zip/sc.social.like-2.13b3/sc/social/like/browser/helper.py
| 0.733738 | 0.170111 |
helper.py
|
pypi
|
from zope import schema
from zope.component import getMultiAdapter
from zope.formlib import form
from zope.interface import implements
from plone.app.portlets.portlets import base
from plone.memoize import ram
from plone.memoize.compress import xhtml_compress
from plone.memoize.instance import memoize
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.cache import get_language
from Products.CMFCore.utils import getToolByName
from Acquisition import aq_inner
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from sc.social.viewcounter import MessageFactory as _
class IViewCounterPortlet(IPortletDataProvider):
name = schema.TextLine(title=_(u'Portlet title'),
description=_(u''),
required=True,
default=u'Most Accessed')
count = schema.Int(title=_(u'Number of items per list'),
description=_(u'How many items to list.'),
required=True,
default=5)
showLastHour = schema.Bool(
title=_(u"Display last hour's top contents."),
description=_(u"If selected the most accessed content from the last hour will be shown."),
default=False,
required=False)
showLastDay = schema.Bool(
title=_(u"Display last day's top contents."),
description=_(u"If selected the most accessed content from the last day will be shown."),
default=True,
required=False)
showLastWeek = schema.Bool(
title=_(u"Display last week's top contents."),
description=_(u"If selected the most accessed content from the last week will be shown."),
default=False,
required=False)
showLastMonth = schema.Bool(
title=_(u"Display last month's top contents."),
description=_(u"If selected the most accessed content from the last month will be shown."),
default=False,
required=False)
class Assignment(base.Assignment):
implements(IViewCounterPortlet)
def __init__(self,
name=u'Most Accessed',
count=5,
showLastHour=True,
showLastDay=False,
showLastWeek=False,
showLastMonth=False):
self.name = name
self.count = count
self.showLastHour = showLastHour
self.showLastDay = showLastDay
self.showLastWeek = showLastWeek
self.showLastMonth = showLastMonth
@property
def title(self):
return _(u'Most accessed')
def _render_cachekey(fun, self):
fingerprint = ''.join([d[0] for d in self._data()])
return "".join((
getToolByName(aq_inner(self.context), 'portal_url')(),
get_language(aq_inner(self.context), self.request),
str(self.anonymous),
self.manager.__name__,
self.data.__name__,
fingerprint))
class Renderer(base.Renderer):
render = ViewPageTemplateFile('portlet.pt')
def __init__(self, *args):
base.Renderer.__init__(self, *args)
context = aq_inner(self.context)
portal_state = getMultiAdapter((context, self.request), name=u'plone_portal_state')
self.anonymous = portal_state.anonymous()
self.portal_url = portal_state.portal_url()
self.reports = context.restrictedTraverse('@@vc_reports')
plone_tools = getMultiAdapter((context, self.request), name=u'plone_tools')
self.catalog = plone_tools.catalog()
@property
def available(self):
return 1
def options(self):
listOptions = []
data = self.viewcounter()
if self.data.showLastHour and data.get('lastHour',None):
listOptions.append(('lastHour',_(u"Last Hour")))
if self.data.showLastDay and data.get('lastDay',None):
listOptions.append(('lastDay',_(u"Last Day")))
if self.data.showLastWeek and data.get('lastWeek',None):
listOptions.append(('lastWeek',_(u"Last Week")))
if self.data.showLastMonth and data.get('lastMonth',None):
listOptions.append(('lastMonth',_(u"Last Month")))
return listOptions
def viewcounter(self):
return self._data()
@memoize
def _data(self):
count = self.data.count
lastHour = self.data.showLastHour and tuple(self.reports.viewsLastHour()[:count])
lastDay = self.data.showLastDay and tuple(self.reports.viewsLastDay()[:count])
lastWeek = self.data.showLastWeek and tuple(self.reports.viewsLastWeek()[:count])
lastMonth = self.data.showLastMonth and tuple(self.reports.viewsLastMonth()[:count])
return {'lastHour':lastHour,
'lastDay':lastDay,
'lastWeek':lastWeek,
'lastMonth':lastMonth,}
class AddForm(base.AddForm):
form_fields = form.Fields(IViewCounterPortlet)
label = _(u"Add a Most Accessed Contents Portlet")
description = _(u"This portlet displays the list of most accessed content.")
def create(self, data):
return Assignment(name=u'Most Accessed',
count=data.get('count', 5),
showLastHour=data.get('showLastHour', True),
showLastDay=data.get('showLastDay', False),
showLastWeek=data.get('showLastWeek', False),
showLastMonth=data.get('showLastMonth', False))
class EditForm(base.EditForm):
form_fields = form.Fields(IViewCounterPortlet)
label = _(u"Edit Most Accessed Contents Portlet")
description = _(u"This portlet displays the list of most accessed content.")
|
/sc.social.viewcounter-1.0.7.tar.gz/sc.social.viewcounter-1.0.7/sc/social/viewcounter/browser/portlet.py
| 0.705481 | 0.264258 |
portlet.py
|
pypi
|
__revision__ = "src/engine/SCons/Memoize.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Memoizer
A metaclass implementation to count hits and misses of the computed
values that various methods cache in memory.
Use of this modules assumes that wrapped methods be coded to cache their
values in a consistent way. Here is an example of wrapping a method
that returns a computed value, with no input parameters:
memoizer_counters = [] # Memoization
memoizer_counters.append(SCons.Memoize.CountValue('foo')) # Memoization
def foo(self):
try: # Memoization
return self._memo['foo'] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_foo_value()
self._memo['foo'] = result # Memoization
return result
Here is an example of wrapping a method that will return different values
based on one or more input arguments:
def _bar_key(self, argument): # Memoization
return argument # Memoization
memoizer_counters.append(SCons.Memoize.CountDict('bar', _bar_key)) # Memoization
def bar(self, argument):
memo_key = argument # Memoization
try: # Memoization
memo_dict = self._memo['bar'] # Memoization
except KeyError: # Memoization
memo_dict = {} # Memoization
self._memo['dict'] = memo_dict # Memoization
else: # Memoization
try: # Memoization
return memo_dict[memo_key] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_bar_value(argument)
memo_dict[memo_key] = result # Memoization
return result
At one point we avoided replicating this sort of logic in all the methods
by putting it right into this module, but we've moved away from that at
present (see the "Historical Note," below.).
Deciding what to cache is tricky, because different configurations
can have radically different performance tradeoffs, and because the
tradeoffs involved are often so non-obvious. Consequently, deciding
whether or not to cache a given method will likely be more of an art than
a science, but should still be based on available data from this module.
Here are some VERY GENERAL guidelines about deciding whether or not to
cache return values from a method that's being called a lot:
-- The first question to ask is, "Can we change the calling code
so this method isn't called so often?" Sometimes this can be
done by changing the algorithm. Sometimes the *caller* should
be memoized, not the method you're looking at.
-- The memoized function should be timed with multiple configurations
to make sure it doesn't inadvertently slow down some other
configuration.
-- When memoizing values based on a dictionary key composed of
input arguments, you don't need to use all of the arguments
if some of them don't affect the return values.
Historical Note: The initial Memoizer implementation actually handled
the caching of values for the wrapped methods, based on a set of generic
algorithms for computing hashable values based on the method's arguments.
This collected caching logic nicely, but had two drawbacks:
Running arguments through a generic key-conversion mechanism is slower
(and less flexible) than just coding these things directly. Since the
methods that need memoized values are generally performance-critical,
slowing them down in order to collect the logic isn't the right
tradeoff.
Use of the memoizer really obscured what was being called, because
all the memoized methods were wrapped with re-used generic methods.
This made it more difficult, for example, to use the Python profiler
to figure out how to optimize the underlying methods.
"""
import types
# A flag controlling whether or not we actually use memoization.
use_memoizer = None
CounterList = []
class Counter(object):
"""
Base class for counting memoization hits and misses.
We expect that the metaclass initialization will have filled in
the .name attribute that represents the name of the function
being counted.
"""
def __init__(self, method_name):
"""
"""
self.method_name = method_name
self.hit = 0
self.miss = 0
CounterList.append(self)
def display(self):
fmt = " %7d hits %7d misses %s()"
print fmt % (self.hit, self.miss, self.name)
def __cmp__(self, other):
try:
return cmp(self.name, other.name)
except AttributeError:
return 0
class CountValue(Counter):
"""
A counter class for simple, atomic memoized values.
A CountValue object should be instantiated in a class for each of
the class's methods that memoizes its return value by simply storing
the return value in its _memo dictionary.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether
its memoized value has already been set (a hit) or not (a miss).
"""
def __call__(self, *args, **kw):
obj = args[0]
if self.method_name in obj._memo:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class CountDict(Counter):
"""
A counter class for memoized values stored in a dictionary, with
keys based on the method's input arguments.
A CountDict object is instantiated in a class for each of the
class's methods that memoizes its return value in a dictionary,
indexed by some key that can be computed from one or more of
its input arguments.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether the
computed key value is already present in the memoization dictionary
(a hit) or not (a miss).
"""
def __init__(self, method_name, keymaker):
"""
"""
Counter.__init__(self, method_name)
self.keymaker = keymaker
def __call__(self, *args, **kw):
obj = args[0]
try:
memo_dict = obj._memo[self.method_name]
except KeyError:
self.miss = self.miss + 1
else:
key = self.keymaker(*args, **kw)
if key in memo_dict:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class Memoizer(object):
"""Object which performs caching of method calls for its 'primary'
instance."""
def __init__(self):
pass
def Dump(title=None):
if title:
print title
CounterList.sort()
for counter in CounterList:
counter.display()
class Memoized_Metaclass(type):
def __init__(cls, name, bases, cls_dict):
super(Memoized_Metaclass, cls).__init__(name, bases, cls_dict)
for counter in cls_dict.get('memoizer_counters', []):
method_name = counter.method_name
counter.name = cls.__name__ + '.' + method_name
counter.underlying_method = cls_dict[method_name]
replacement_method = types.MethodType(counter, None, cls)
setattr(cls, method_name, replacement_method)
def EnableMemoization():
global use_memoizer
use_memoizer = 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
/sc0ns-2.2.0-1.zip/sc0ns-2.2.0-1/SCons/Memoize.py
| 0.738292 | 0.359055 |
Memoize.py
|
pypi
|
__revision__ = "src/engine/SCons/Variables/EnumVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__all__ = ['EnumVariable',]
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s. Valid values are: %s' % (key, val, vals))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Variables.Add().
'key' and 'default' are the values to be passed on to Variables.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
/sc0ns-2.2.0-1.zip/sc0ns-2.2.0-1/SCons/Variables/EnumVariable.py
| 0.650689 | 0.324396 |
EnumVariable.py
|
pypi
|
__doc__ = """
Textfile/Substfile builder for SCons.
Create file 'target' which typically is a textfile. The 'source'
may be any combination of strings, Nodes, or lists of same. A
'linesep' will be put between any part written and defaults to
os.linesep.
The only difference between the Textfile builder and the Substfile
builder is that strings are converted to Value() nodes for the
former and File() nodes for the latter. To insert files in the
former or strings in the latter, wrap them in a File() or Value(),
respectively.
The values of SUBST_DICT first have any construction variables
expanded (its keys are not expanded). If a value of SUBST_DICT is
a python callable function, it is called and the result is expanded
as the value. Values are substituted in a "random" order; if any
substitution could be further expanded by another subsitition, it
is unpredictible whether the expansion will occur.
"""
__revision__ = "src/engine/SCons/Tool/textfile.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons
import os
import re
from SCons.Node import Node
from SCons.Node.Python import Value
from SCons.Util import is_String, is_Sequence, is_Dict
def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if not subs: return contents
for (k,v) in subs:
contents = re.sub(k, v, contents)
return contents
def _action(target, source, env):
# prepare the line separator
linesep = env['LINESEPARATOR']
if linesep is None:
linesep = os.linesep
elif is_String(linesep):
pass
elif isinstance(linesep, Value):
linesep = linesep.get_text_contents()
else:
raise SCons.Errors.UserError(
'unexpected type/class for LINESEPARATOR: %s'
% repr(linesep), None)
# create a dictionary to use for the substitutions
if 'SUBST_DICT' not in env:
subs = None # no substitutions
else:
d = env['SUBST_DICT']
if is_Dict(d):
d = list(d.items())
elif is_Sequence(d):
pass
else:
raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
subs = []
for (k,v) in d:
if callable(v):
v = v()
if is_String(v):
v = env.subst(v)
else:
v = str(v)
subs.append((k,v))
# write the file
try:
fd = open(target[0].get_path(), "wb")
except (OSError,IOError), e:
raise SCons.Errors.UserError("Can't write target file %s" % target[0])
# separate lines by 'linesep' only if linesep is not empty
lsep = None
for s in source:
if lsep: fd.write(lsep)
fd.write(_do_subst(s, subs))
lsep = linesep
fd.close()
def _strfunc(target, source, env):
return "Creating '%s'" % target[0]
def _convert_list_R(newlist, sources):
for elem in sources:
if is_Sequence(elem):
_convert_list_R(newlist, elem)
elif isinstance(elem, Node):
newlist.append(elem)
else:
newlist.append(Value(elem))
def _convert_list(target, source, env):
if len(target) != 1:
raise SCons.Errors.UserError("Only one target file allowed")
newlist = []
_convert_list_R(newlist, source)
return target, newlist
_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
_text_builder = SCons.Builder.Builder(
action = SCons.Action.Action(_action, _strfunc, varlist = _text_varlist),
source_factory = Value,
emitter = _convert_list,
prefix = '$TEXTFILEPREFIX',
suffix = '$TEXTFILESUFFIX',
)
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action = SCons.Action.Action(_action, _strfunc, varlist = _subst_varlist),
source_factory = SCons.Node.FS.File,
emitter = _convert_list,
prefix = '$SUBSTFILEPREFIX',
suffix = '$SUBSTFILESUFFIX',
src_suffix = ['.in'],
)
def generate(env):
env['LINESEPARATOR'] = os.linesep
env['BUILDERS']['Textfile'] = _text_builder
env['TEXTFILEPREFIX'] = ''
env['TEXTFILESUFFIX'] = '.txt'
env['BUILDERS']['Substfile'] = _subst_builder
env['SUBSTFILEPREFIX'] = ''
env['SUBSTFILESUFFIX'] = ''
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
/sc0ns-2.2.0-1.zip/sc0ns-2.2.0-1/SCons/Tool/textfile.py
| 0.447943 | 0.314682 |
textfile.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.