metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "6outtaTen/validateInput",
"score": 5
} |
#### File: 6outtaTen/validateInput/ValidateInput.py
```python
def validate_int(prompt_text="Please enter an integer: ",err_text="Please make sure that you entered an INTEGER"):
"""
Prompts, returns and validates that the input is an integer, if not, it warns
Takes in 2 arguments: prompt_text and error_text - the text that user wants to be the prompt
and the text that displays when the input is not valid. The defaults are:
prompt_text = "Please enter an integer: " error_text = "Please make sure that you entered an INTEGER"
"""
while True:
try:
user_input = int(input(f"{prompt_text}"))
except ValueError:
print(f"{err_text}")
else:
return user_input
def validate_str(prompt_text="Please enter a string: ",err_text="Please make sure that you entered a STRING"):
"""
Prompts, returns and validates that the input is a string, if not, it warns
Takes in 2 arguments: prompt_text and error_text - the text that user wants to be the prompt
and the text that displays when the input is not valid. The defaults are:
prompt_text = "Please enter a string: " error_text = "Please make sure that you entered a STRING"
"""
while True:
user_input = input(f"{prompt_text}")
if any(x.isdigit() for x in user_input):
print(f"{err_text}")
continue
else:
return user_input
``` |
{
"source": "6paklata/tensorflow",
"score": 2
} |
#### File: keras/distribute/mnist_multi_worker.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras import utils
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
NUM_CLASSES = 10
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_enum('distribution_strategy', None, ['multi_worker_mirrored'],
'The Distribution Strategy to use.')
flags.DEFINE_string('model_dir', None, 'Directory for TensorBoard/Checkpoint.')
# TODO(rchao): Use multi_worker_util.maybe_shard_dataset() once that is provided
# there.
def maybe_shard_dataset(dataset):
"""Shard the dataset if running in multi-node environment."""
cluster_resolver = TFConfigClusterResolver()
cluster_spec = cluster_resolver.cluster_spec().as_dict()
if cluster_spec:
dataset = dataset.shard(
multi_worker_util.worker_count(cluster_spec,
cluster_resolver.task_type),
multi_worker_util.id_in_cluster(
cluster_spec, cluster_resolver.task_type, cluster_resolver.task_id))
return dataset
def get_data_shape():
# input image dimensions
img_rows, img_cols = 28, 28
if backend.image_data_format() == 'channels_first':
return 1, img_rows, img_cols
else:
return img_rows, img_cols, 1
def get_input_datasets(use_bfloat16=False):
"""Downloads the MNIST dataset and creates train and eval dataset objects.
Args:
use_bfloat16: Boolean to determine if input should be cast to bfloat16
Returns:
Train dataset and eval dataset. The dataset doesn't include batch dim.
"""
cast_dtype = dtypes.bfloat16 if use_bfloat16 else dtypes.float32
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_data_shape = (x_train.shape[0],) + get_data_shape()
test_data_shape = (x_test.shape[0],) + get_data_shape()
if backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(train_data_shape)
x_test = x_test.reshape(test_data_shape)
else:
x_train = x_train.reshape(train_data_shape)
x_test = x_test.reshape(test_data_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = utils.to_categorical(y_train, NUM_CLASSES)
y_test = utils.to_categorical(y_test, NUM_CLASSES)
# train dataset
train_ds = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
# TODO(rchao): Remove maybe_shard_dataset() once auto-sharding is done.
train_ds = maybe_shard_dataset(train_ds)
train_ds = train_ds.repeat()
train_ds = train_ds.map(lambda x, y: (math_ops.cast(x, cast_dtype), y))
train_ds = train_ds.batch(64, drop_remainder=True)
# eval dataset
eval_ds = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
# TODO(rchao): Remove maybe_shard_dataset() once auto-sharding is done.
eval_ds = maybe_shard_dataset(eval_ds)
eval_ds = eval_ds.repeat()
eval_ds = eval_ds.map(lambda x, y: (math_ops.cast(x, cast_dtype), y))
eval_ds = eval_ds.batch(64, drop_remainder=True)
return train_ds, eval_ds
def get_model(index=0):
"""Builds a Sequential CNN model to recognize MNIST digits.
Args:
index: The worker index. Defaults to 0.
Returns:
a CNN Keras model used for MNIST
"""
# Define a CNN model to recognize MNIST digits.
model = keras.models.Sequential()
model.add(
keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=get_data_shape()))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25, name='dropout_worker%s_first' % index))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dropout(0.5, name='dropout_worker%s_second' % index))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def main(_):
if flags.FLAGS.enable_eager:
ops.enable_eager_execution()
logging.info('Eager execution enabled for MNIST Multi-Worker.')
else:
logging.info('Eager execution not enabled for MNIST Multi-Worker.')
# Build the train and eval datasets from the MNIST data.
train_ds, eval_ds = get_input_datasets()
if flags.FLAGS.distribution_strategy == 'multi_worker_mirrored':
# MultiWorkerMirroredStrategy for multi-worker distributed MNIST training.
strategy = collective_strategy.CollectiveAllReduceStrategy()
else:
raise ValueError('Only `multi_worker_mirrored` is supported strategy '
'in Keras MNIST example at this time. Strategy passed '
'in is %s' % flags.FLAGS.distribution_strategy)
# Create and compile the model under Distribution strategy scope.
# `fit`, `evaluate` and `predict` will be distributed based on the strategy
# model was compiled with.
with strategy.scope():
model = get_model()
optimizer = rmsprop.RMSProp(learning_rate=0.001)
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
# Train the model with the train dataset.
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=flags.FLAGS.model_dir)
model.fit(
x=train_ds,
epochs=20,
steps_per_epoch=468,
callbacks=[tensorboard_callback])
# Evaluate the model with the eval dataset.
score = model.evaluate(eval_ds, steps=10, verbose=0)
logging.info('Test loss:{}'.format(score[0]))
logging.info('Test accuracy:{}'.format(score[1]))
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run()
```
#### File: tools/compatibility/tf_upgrade_v2_safety_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
class TfUpgradeV2SafetyTest(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2_safety.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testContribWarning(self):
text = "tf.contrib.foo()"
_, report, _, _ = self._upgrade(text)
expected_info = "tf.contrib will not be distributed"
self.assertIn(expected_info, report)
if __name__ == "__main__":
test_lib.main()
``` |
{
"source": "6r6/dsi-autofill",
"score": 3
} |
#### File: 6r6/dsi-autofill/tasker.py
```python
from dsi import DsiParser
from opt import Opt
class WiresParser:
def __init__(self, xls_path, dsi_path):
self.xls_path = xls_path
self.dsi = DsiParser(dsi_path)
def wire_to_dict(self):
wire_list = self.dsi.get_string('wire').split('\n')
wire_lists = self.dsi.get_pure_list(wire_list)
del wire_list
wire_dict = {}
for item in wire_lists:
single_list = item.split(':')
circuit_nbr = single_list[0]
#circuit_option = single_list[1]
circuit_code = single_list[6]
wire_dict[str(circuit_nbr)] = circuit_code
return wire_dict
def main():
print('Harness Option Filter @.@')
xls_path = input('Please input XLS(X) file path:')
dsi_path = input('Please input DSI path:')
wire_instance = WiresParser(xls_path, dsi_path)
wire_dict = wire_instance.wire_to_dict()
opt_instance = Opt(xls_path)
for item in wire_dict:
option = wire_dict[item]
opt_instance.set_option(item,option)
print('Match Circuit {} to Option {} ...[OK]'.format(item, option))
opt_instance.output()
print('File Saved...[OK]')
if __name__ == '__main__':
main()
``` |
{
"source": "6r6/yz",
"score": 2
} |
#### File: 6r6/yz/index.py
```python
import os
import time
import random
import hmac
import hashlib
import binascii
import base64
import json
import logging
import re
import requests
# (*)腾讯优图配置
app_id = os.environ.get('app_id')
secret_id = os.environ.get('secret_id')
secret_key = os.environ.get('secret_key')
# Server酱V3配置
sckey = os.environ.get('sckey')
logger = logging.getLogger()
class Youtu(object):
def __init__(self, app_id, secret_id, secret_key, qq=10000):
self.app_id = app_id
self.secret_id = secret_id
self.secret_key = secret_key
self.qq = qq
def cal_sig(self):
timestamp = int(time.time())
expired = str(timestamp + 2592000)
rdm = str(random.randint(0, 999999999))
plain_text = 'a={appid}&k={secret_id}&e={expired}&t={timestamp}&r={rdm}&u={qq}&f='
plain_text = plain_text.format(appid=self.app_id,
secret_id=self.secret_id,
timestamp=timestamp,
rdm=rdm, qq=self.qq,
expired=expired)
bin = hmac.new(self.secret_key.encode(), plain_text.encode(), hashlib.sha1).hexdigest()
s = binascii.unhexlify(bin)
s = s + plain_text.encode('ascii')
signature = base64.b64encode(s).rstrip().decode()
return signature
def get_text(self, image_raw):
signature = self.cal_sig()
headers = {'Host': 'api.youtu.qq.com', 'Content-Type': 'text/json', 'Authorization': signature}
data = {'app_id': self.app_id, 'image': ''}
data['image'] = base64.b64encode(image_raw).rstrip().decode('utf-8')
resp = requests.post('https://api.youtu.qq.com/youtu/ocrapi/generalocr',
data=json.dumps(data),
headers=headers)
if 'items' in resp.text:
return resp.content.decode('utf-8')
else:
return '0'
class ScoreQuery:
def __init__(self, xm, id, ksbh):
self.xm = xm
self.id = id
self.ksbh = ksbh
self.cookies = requests.cookies.RequestsCookieJar()
self.headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control':'max-age=0',
'Content-Type':'application/x-www-form-urlencoded',
'DNT':'1',
'Host':'yz.chsi.com.cn',
'Origin':'https://yz.chsi.com.cn',
'Referer':'https://yz.chsi.com.cn/apply/cjcx/',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.37 (KHTML, like Gecko) Chrome/70.0.3537.110 Safari/537.37'
}
def get_cookies(self):
base_url = 'https://yz.chsi.com.cn/apply/cjcx'
session = requests.session()
base_resp = session.get(base_url, headers=self.headers)
self.cookies = base_resp.cookies
def get_checkcode(self):
pic_url = 'https://yz.chsi.com.cn/apply/cjcx/image.do'
resp = requests.get(pic_url, headers=self.headers, cookies=self.cookies).content
ocr = Youtu(app_id, secret_id, secret_key)
try:
resp = ocr.get_text(resp)
resp = eval(resp)
return resp['items'][0]['itemstring']
except:
return '0'
def get_score_page(self):
self.get_cookies()
checkcode = self.get_checkcode().replace(' ','')
post_url = 'https://yz.chsi.com.cn/apply/cjcx/cjcx.do'
data = {
'xm': self.xm,
'zjhm':self.id,
'ksbh':self.ksbh,
'bkdwdm':None,
'checkcode':checkcode
}
post_resp = requests.post(post_url,data=data, headers=self.headers).text
return post_resp
@staticmethod
def get_mid_text(w1, w2, text):
pat = re.compile(w1+'(.*?)'+w2,re.S)
result_dict = pat.findall(text)
return result_dict
@staticmethod
def notice(key, title, desp):
url = 'https://sc.ftqq.com/{}.send'.format(key)
payload = {'text':title,'desp':desp}
r = requests.get(url,params=payload)
return r.text
def main_handler(event, context):
data = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {"Content-Type":"application/json"},
"body": ""}
try:
rid = context["request_id"]
xm = event['queryString']['xm']
id = event['queryString']['id']
kh = event['queryString']['kh']
query = ScoreQuery(xm,id,kh)
page = query.get_score_page()
if '无查询结果' in page:
logging.info('成绩还没出')
data['body'] = json.dumps({"Code":101,"Msg":"Score not released yet","Request_id":rid})
return data
elif '总分' in page:
score_content = query.get_mid_text('<tbody>','</tbody>',page)[0]
logging.info('成绩查询成功')
data['headers']['Content-Type'] = 'text/html'
data['body'] = score_content
#query.notice(sckey,'成绩出了',page)
#写这段代码的时候,成绩还未公布,并不知道页面结构,所以直接返回表格内容
return data
else:
data['body'] = json.dumps({"Code":103,"Msg":"Unexpected page contents","Request_id":rid})
return data
except:
data['body'] = json.dumps({"Code":102,"Msg":"Unexpected url parameters","Request_id":rid})
return data
``` |
{
"source": "6rayWa1cher/banlabyrinth",
"score": 2
} |
#### File: src/banlabyrinth/bot.py
```python
import logging
from logging.handlers import RotatingFileHandler
from appdirs import *
from discord.ext import commands
from banlabyrinth import configmanager
from banlabyrinth.configmanager import CONFIG_PATH
def setup_logger():
logs_path = configmanager.get_logs_path()
if not os.path.exists(logs_path):
os.mkdir(logs_path)
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO)
handler = RotatingFileHandler(filename=logs_path + 'discord.log', encoding='utf-8', mode='w',
maxBytes=8 * 1024 * 1024)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
logger = logging.getLogger('banlab')
logger.setLevel(logging.INFO)
handler1 = RotatingFileHandler(filename=logs_path + 'banlab.log', encoding='utf-8', mode='w',
maxBytes=8 * 1024 * 1024)
handler1.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
handler2 = logging.StreamHandler(stream=sys.stdout)
handler2.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler1)
logger.addHandler(handler2)
def main():
if not os.path.exists(CONFIG_PATH):
configmanager.create_config()
setup_logger()
import banlabyrinth.dbmanager as dbmanager
dbmanager.setup()
token = configmanager.get_token()
bot = commands.Bot(command_prefix=configmanager.get_command_prefix())
bot.load_extension("banlabyrinth.cogs.roleregistrarcog")
bot.load_extension("banlabyrinth.cogs.databasecleanercog")
bot.load_extension("banlabyrinth.cogs.boxcog")
bot.load_extension("banlabyrinth.cogs.trappedcog")
logging.getLogger('banlab').info("Connecting...")
bot.run(token)
if __name__ == '__main__':
main()
```
#### File: banlabyrinth/entities/labyrinth.py
```python
from typing import Mapping, Optional
import discord
from banlabyrinth.lab import LabyrinthWalker, letters_reversed, ROAD, WALL
CENTER_ICON = "\u2718"
LEFT_ARROW = "\u21d0"
DOWN_ARROW = "\u21d3"
RIGHT_ARROW = "\u21d2"
UP_ARROW = "\u21d1"
CLOSED_ICON = "\u26d4"
class Labyrinth:
def __init__(self, lab: LabyrinthWalker, folder: discord.CategoryChannel,
previous_member_roles: Optional[Mapping], up, right, down, left, center):
self.lab = lab
self.folder = folder
self.up = up
self.right = right
self.down = down
self.left = left
self.center = center
self.channels = {up, right, center, down, left}
self.channel_to_direction = {
self.up: 'N',
self.right: 'E',
self.down: 'S',
self.left: 'W'
}
self.direction_to_channel = {
'N': self.up,
'E': self.right,
'S': self.down,
'W': self.left
}
self.channel_to_true_name = {
self.up: UP_ARROW,
self.right: RIGHT_ARROW,
self.down: DOWN_ARROW,
self.left: LEFT_ARROW
}
self.previous_member_roles = previous_member_roles
async def update_channels(self):
for channel in self.channels:
if channel == self.center:
continue
direction = self.channel_to_direction[channel]
wall = letters_reversed[direction]
if self.lab.curr.walls[wall] == ROAD and channel.name != self.channel_to_true_name[channel]:
await channel.edit(name=self.channel_to_true_name[channel])
elif self.lab.curr.walls[wall] == WALL and channel.name != CLOSED_ICON:
await channel.edit(name=CLOSED_ICON)
def is_wall(self, channel):
direction = self.channel_to_direction[channel]
wall = letters_reversed[direction]
return self.lab.curr.walls[wall] == WALL
```
#### File: src/banlabyrinth/utils.py
```python
import logging
import random
from itertools import filterfalse
from typing import Optional
import discord
from discord import PermissionOverwrite
logger = logging.getLogger("banlab")
def get_justice_quote() -> str:
quotes = ["Be nice and kind!", "Remember: my boxes and mazes are always ready! >:)",
"I think, you don't want to meet me again :)", "You got off easy, don't you think?",
"For your safety, do not do anything wrong!", "See you next time >:D"]
return random.choice(quotes)
async def get_member(ctx, member) -> Optional[discord.Member]:
if member is not None and ctx.guild.get_member_named(member) is not None:
member = ctx.guild.get_member_named(member)
elif member is not None:
await ctx.send("{0} not found.".format(member))
return None
else:
member = ctx.author
if member == ctx.guild.me:
await ctx.send("Nope :P You can't do this on me :D".format(member))
return None
return member
# noinspection PyUnresolvedReferences
async def trap(ctx, member, guild, exclude):
previous_member_roles = dict()
for channel in filterfalse(exclude.__contains__, guild.voice_channels):
try:
if member in channel.overwrites:
special_perms = channel.overwrites[member]
if not special_perms.is_empty():
previous_member_roles[channel] = special_perms.pair()
await channel.set_permissions(member, read_messages=False, connect=False)
logger.debug("changing {0.name} from guild {1.id} roles in channel {2.name} "
"to read_messages={3}, connect={4}".format(member, guild, channel, "False",
"False"))
except discord.errors.Forbidden:
await ctx.send("Warning! Something went wrong while trapping process! "
"Check if I have enough rights in {0.name}, especially "
"right to change permissions. Otherwise, the trap isn't"
" effective :(".format(channel))
return previous_member_roles
async def untrap(member, guild, prev_roles, exclude):
for channel in filterfalse(exclude.__contains__, guild.voice_channels):
try:
special_perms = PermissionOverwrite.from_pair(*prev_roles[channel]) if channel in prev_roles \
else PermissionOverwrite()
# noinspection PyUnresolvedReferences
await channel.set_permissions(member, overwrite=special_perms)
logger.debug("changing {0.name} from guild {1.id} roles in channel {2.name} "
"to allow={3}, deny={4}".format(member, guild, channel, special_perms.pair()[0].value,
special_perms.pair()[1].value))
except discord.errors.Forbidden:
pass
``` |
{
"source": "6rayWa1cher/bar-counter-discordbot",
"score": 2
} |
#### File: barcounter/cogs/helpers.py
```python
from typing import Optional
from discord import Guild
from discord.ext.commands import Context
from barcounter import confutils as conf, db, log
from barcounter.dbentities import Server, Person, Drink
def get_server_or_create(gid: int, preferred_locale: Optional[str]) -> Server:
preferred_locale = preferred_locale.replace("-", "_")
if preferred_locale is not None and preferred_locale in conf.get_langs():
return Server.get_or_create(sid=gid, defaults={"lang": preferred_locale})[0]
else:
return Server.get_or_create(sid=gid, defaults={"lang": "en_US"})[0]
def get_server_from_context(ctx: Context) -> Server:
return get_server_or_create(ctx.guild.id, ctx.guild.preferred_locale)
def get_lang(gid: int, preferred_locale: Optional[str]):
return get_server_or_create(gid, preferred_locale).lang
def get_lang_from_context(ctx: Context):
return get_lang(ctx.guild.id, ctx.guild.preferred_locale)
def get_lang_from_guild(guild: Guild):
return get_lang(guild.id, guild.preferred_locale)
def get_person_or_create(gid: int, uid: int, preferred_locale: Optional[str]):
server = get_server_or_create(gid, preferred_locale)
return Person.get_or_create(server=server, uid=uid, defaults={"intoxication": 0})[0]
async def add_default_drinks(guild):
server = get_server_or_create(guild.id, guild.preferred_locale)
default_drinks = conf.lang_raw(server.lang, "default_drinks")
with db.atomic():
for default_drink in default_drinks:
Drink.create(server=server, name=default_drink.name, intoxication=default_drink.intoxication,
portion_size=default_drink.portion, portions_per_day=default_drink.portions_per_day,
portions_left=default_drink.portions_per_day)
log.info("Added drinks to {0}".format(guild.id))
```
#### File: barcounter/cogs/settingscog.py
```python
from discord.ext import commands
from discord.ext.commands import Bot, Context
from barcounter import confutils as conf, db
from barcounter import log
from barcounter.cogs.helpers import get_server_or_create, add_default_drinks, get_lang_from_context
from barcounter.confutils import get_langs
from barcounter.dbentities import Server, Drink
logger = log
class SettingsCog(commands.Cog):
def __init__(self, bot):
self.bot: Bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: Context, error):
if not hasattr(error, "bcdb_checked") or not error.bcdb_checked:
log.error("Error on command {0}".format(ctx.command), exc_info=error)
await ctx.send(conf.lang(get_lang_from_context(ctx), "on_error"))
return True
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def lang(self, ctx: Context, lang_code: str = None):
"""
If lang is provided, sets the language. Requires Manage Guild permission.
Return the list of available languages and quick reminder how to set it.
"""
if lang_code:
if lang_code not in get_langs():
await ctx.send(conf.international("incorrect_language"))
else:
logger.info("Updated lang on {0} to {1}".format(ctx.guild.id, lang_code))
with db.atomic():
server = get_server_or_create(ctx.guild.id, ctx.guild.preferred_locale)
Server.update(lang=lang_code).where(Server.sid == ctx.guild.id).execute()
Drink.delete().where(Drink.server == server.id).execute()
await add_default_drinks(ctx.guild)
await ctx.send(conf.lang(lang_code, "lang_selected"))
else:
langs = "\n".join(
"{0}, lang_code \"{1}\"".format(conf.lang(lang, "name"), lang) for lang in conf.get_langs())
await ctx.send(conf.international("lang_list").format(self.bot.command_prefix) + '\n' + langs)
@lang.error
async def lang_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send(conf.international("missing_permissions"))
async def globally_block_dms(ctx):
return ctx.guild is not None
def setup(bot):
bot.add_check(globally_block_dms)
bot.add_cog(SettingsCog(bot))
``` |
{
"source": "6RiverSystems/caddy-ansible",
"score": 2
} |
#### File: caddy-ansible/tests/test_default.py
```python
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner('.molecule/ansible_inventory').get_hosts('all')
def test_files(host):
dirs = [
"/etc/caddy",
"/var/log/caddy"
]
for dir in dirs:
d = host.file(dir)
assert d.exists
assert d.is_directory
def test_packages(host):
pkgs = [
"git"
]
for p in pkgs:
assert host.package(p).is_installed
def test_service(host):
s = host.service("caddy")
assert s.is_enabled
assert s.is_running
def test_socket(host):
sockets = [
"tcp://127.0.0.1:2020"
]
for socket in sockets:
s = host.socket(socket)
assert s.is_listening
``` |
{
"source": "6RiverSystems/catkin_tools",
"score": 2
} |
#### File: verbs/catkin_build/test_context.py
```python
import os
TEST_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources')
BUILD = ['build', '--no-notify', '--no-status']
CLEAN = ['clean', '--all', '--yes'] # , '--no-notify', '--no-color', '--no-status']
def test_build_this():
"""Test package context awareness"""
pass # TODO: Implement this (both negative and positive results)
def test_start_with_this():
"""Test package context awareness for --start-with option"""
pass # TODO: Implement this (both negative and positive results)
``` |
{
"source": "6RiverSystems/pure_pursuit",
"score": 2
} |
#### File: pure_pursuit/scripts/straight_line.py
```python
import rospy
from std_msgs.msg import String, Header
from nav_msgs.msg import Odometry, Path
from geometry_msgs.msg import Twist, PoseStamped
import math
import sys
import time
import tf
from srslib_framework.msg import MsgUpdateToteLights
def messageCreation(message, cmd, startColor, endColor, startSegment, endSegment, freq):
message.lightCmd = cmd
message.startColor.r = startColor[0]
message.startColor.g = startColor[1]
message.startColor.b = startColor[2]
message.startColor.a = startColor[3]
message.endColor.r = endColor[0]
message.endColor.g = endColor[1]
message.endColor.b = endColor[2]
message.endColor.a = endColor[3]
message.startSegment.x = startSegment[0]
message.startSegment.y = startSegment[1]
message.startSegment.z = startSegment[2]
message.endSegment.x = endSegment[0]
message.endSegment.y = endSegment[1]
message.endSegment.z = endSegment[2]
message.frequency = freq
class StraightLine:
VEL_COM_TOPIC = "/sensors/odometry/velocity/cmd"
MAP_POSE_TOPIC = "/map_pose"
GOAL_TOPIC = "/path_segment"
LIGHT_TOPIC = "/drivers/brainstem/cmd/update_tote_lights"
global rospy
def __init__(self, endPoint):
self.endPoint = endPoint
self.map_pose_sub = rospy.Subscriber(self.MAP_POSE_TOPIC, PoseStamped, self.mapPoseCallback)
#self.vel_sub = rospy.Subscriber(self.VEL_COM_TOPIC, Twist, self.velocityCmdCallback)
self.goal_pub = rospy.Publisher(self.GOAL_TOPIC, Path, queue_size=2)
self.light_pub = rospy.Publisher(self.LIGHT_TOPIC, MsgUpdateToteLights, queue_size=5)
self.timeChange = 1.#0
self.redTop = True
self.changeLights = 0
def velocityCmdCallback(self, msg):
if(rospy.get_time() - self.changeLights > self.timeChange):
if(self.redTop):
lightMsg1 = MsgUpdateToteLights()
lightMsg2 = MsgUpdateToteLights()
messageCreation(lightMsg1,1,[255,0,0,0],[255,0,0,0],[0,0,0],[26,1,0],1)
self.light_pub.publish(lightMsg1)
messageCreation(lightMsg2,1,[0,255,0,0],[0,255,0,0],[0,0,1],[26,1,1],1)
self.light_pub.publish(lightMsg2)
else:
lightMsg1 = MsgUpdateToteLights()
lightMsg2 = MsgUpdateToteLights()
messageCreation(lightMsg1,1,[0,255,0,0],[0,255,0,0],[0,0,0],[26,1,0],1)
self.light_pub.publish(lightMsg1)
messageCreation(lightMsg2,1,[255,0,0,0],[255,0,0,0],[0,0,1],[26,1,1],1)
self.light_pub.publish(lightMsg2)
self.redTop = not self.redTop
self.changeLights = rospy.get_time()
def mapPoseCallback(self, msg):
self.currentPose = msg.pose
def sendGoalFunc(self):
path = Path()
path.header.frame_id = 'map'
for i in range(99):
newPose = PoseStamped()
newPose.header.seq = i
newPose.header.frame_id = 'map'
newPose.pose.position.x = self.currentPose.position.x + i*(self.endPoint[0] - self.currentPose.position.x)/100.0
newPose.pose.position.y = self.currentPose.position.y + i*(self.endPoint[1] - self.currentPose.position.y)/100.0
newPose.pose.position.z = 0
newQuaternion = tf.transformations.quaternion_from_euler(0, 0, math.tan((self.endPoint[1] - self.currentPose.position.y)/( .0001 + self.endPoint[0] - self.currentPose.position.x)))
newPose.pose.orientation.x = 0
newPose.pose.orientation.y = 0
newPose.pose.orientation.z = newQuaternion[2]
newPose.pose.orientation.w = newQuaternion[3]
path.poses.append(newPose)
self.goal_pub.publish(path)
if __name__ == '__main__':
rospy.init_node('straight_line', anonymous=True)
endPoint = []
endPoint.append(13) # x
endPoint.append(42) # y
hs = StraightLine(endPoint)
time.sleep(1.0)
rate = rospy.Rate(20) # 10hz
rate.sleep()
print "Here we go"
hs.sendGoalFunc()
rospy.spin()
``` |
{
"source": "6RiverSystems/realsense",
"score": 2
} |
#### File: realsense2_camera/cfg/base_d400_params.py
```python
from dynamic_reconfigure.parameter_generator_catkin import *
def add_base_params(gen, prefix):
# Name Type Level Description Default Min Max
gen.add(str(prefix) + "depth_gain", int_t, 1, "Gain", 16, 16, 248)
gen.add(str(prefix) + "depth_enable_auto_exposure", bool_t, 2, "Enable Auto Exposure", False)
preset_enum = gen.enum([gen.const("Custom", int_t, 0, "Custom"),
gen.const("Default", int_t, 1, "Default Preset"),
gen.const("Hand", int_t, 2, "Hand Gesture"),
gen.const("HighAccuracy", int_t, 3, "High Accuracy"),
gen.const("HighDensity", int_t, 4, "High Density"),
gen.const("MediumDensity", int_t, 5, "Medium Density")], "D400 Visual Presets")
gen.add(str(prefix) + "depth_visual_preset", int_t, 3, "D400 Visual Presets", 3, 0, 5, edit_method=preset_enum)
gen.add(str(prefix) + "depth_frames_queue_size", int_t, 4, "Frames Queue Size", 16, 0, 32)
gen.add(str(prefix) + "depth_error_polling_enabled", bool_t, 5, "Error Polling Enabled", False)
gen.add(str(prefix) + "depth_output_trigger_enabled", bool_t, 6, "Output Trigger Enabled", False)
gen.add(str(prefix) + "depth_units", double_t, 7, "Depth Units", 0.001, 0.001, 0.001)
gen.add(str(prefix) + "JSON_file_path", str_t, 8, "JSON_file_path", "")
``` |
{
"source": "6rotoms/igdb-pdt",
"score": 3
} |
#### File: 6rotoms/igdb-pdt/populate_db.py
```python
import asyncio
import aiohttp
import itertools
import json
import argparse
from redisearch import Client, TextField
import os
from datetime import datetime
import time
BASE_URL = 'https://api.igdb.com/v4/games'
CLIENT_ID = ''
CLIENT_SECRET = ''
REDIS_HOSTNAME = 'localhost'
REDIS_PORT = 6379
IGDB_SRC = 'API'
if 'REDIS_HOSTNAME' in os.environ:
REDIS_HOSTNAME = os.environ['REDIS_HOSTNAME']
if 'REDIS_PORT' in os.environ:
REDIS_PORT = os.environ['REDIS_PORT']
if 'AUTHORIZATION' in os.environ:
AUTHORIZATION = os.environ['AUTHORIZATION']
if 'CLIENT_ID' in os.environ:
CLIENT_ID = os.environ['CLIENT_ID']
if 'CLIENT_SECRET' in os.environ:
CLIENT_SECRET = os.environ['CLIENT_SECRET']
if 'IGDB_SRC' in os.environ:
IGDB_SRC = os.environ['IGDB_SRC']
auth_headers = {'Client-ID': CLIENT_ID, 'Authorization': ''}
GAME_QUERY_STRING = b'fields id, summary, slug, name, alternative_names.name, cover.url; where (multiplayer_modes.onlinecoop=true | \
multiplayer_modes.offlinecoop=true | multiplayer_modes.lancoop=true | \
game_modes = (2, 6)) & category=(0,9);'
async def set_authorization(
session: aiohttp.ClientSession
) -> dict:
if not (CLIENT_ID and CLIENT_SECRET):
print('CLIENT_ID and CLIENT_SECRET environment variables not set!')
return
url = 'https://id.twitch.tv/oauth2/token?client_id=%s&client_secret=%s&grant_type=client_credentials' % (CLIENT_ID, CLIENT_SECRET)
resp = await session.post(url=url)
data = await resp.json()
access_token = data['access_token']
auth_headers['Authorization'] = 'Bearer %s' % access_token
async def get_count(
session: aiohttp.ClientSession
) -> dict:
url = BASE_URL + '/count'
resp = await session.post(url=url, headers=auth_headers,
data=GAME_QUERY_STRING)
data = await resp.json()
count = data['count']
return count
async def get_games(
session: aiohttp.ClientSession,
offset: int,
max_id: int
) -> dict:
url = BASE_URL
resp = await session.post(url=url, headers=auth_headers,
data=GAME_QUERY_STRING[:-1] +
b' & id>%d;limit 500;offset %d;' % (max_id, offset))
data = await resp.json()
return data
def get_cover(data: dict):
if 'cover' in data:
return data['cover']['url'].replace('t_thumb', 't_cover_big')
return ''
async def fetch_games():
async with aiohttp.ClientSession() as session:
await set_authorization(session=session)
if not auth_headers['Authorization']:
print('Failed to set Authorization!')
return json.dumps({}, indent=4)
count = await get_count(session=session)
max_id = -1
data = {}
last_time = 0
for _ in range(0, count, 2000):
new_time = time.time_ns()
while new_time - last_time < 1000000000:
time.sleep(5000/1000000000.0)
new_time = time.time_ns()
last_time = new_time
tasks = [get_games(session=session, offset=i, max_id=max_id) for i in range(0, 2000, 500)]
new_data = await asyncio.gather(*tasks, return_exceptions=True)
new_data = list(itertools.chain(*new_data))
max_entry = max(new_data, key=lambda d: d['id'])
max_id = int(max_entry['id'])
new_data = {
p['slug']: {
'name': p['name'],
'alt_names': [*map(lambda v: v.get('name', ''), p.get('alternative_names', []))],
'summary': p.get('summary', False) or '',
'thumb': p.get('cover', {}).get('url', '') or '',
'cover': get_cover(p),
}
for p in new_data}
data = {**data, **new_data}
return data
def load_mock_data():
with open('data.json') as f:
data = json.load(f)
return data
def cache_to_redis(data: dict):
if REDIS_HOSTNAME == '':
print('REDIS_HOSTNAME environment variable is not set')
return
client = Client('games', host=REDIS_HOSTNAME, port=REDIS_PORT)
indexCreated = False
maxAltNames = len(max(data.values(), key=lambda d: len(d['alt_names']))['alt_names'])
while not indexCreated:
try:
client.create_index([TextField('name', weight=10),
*[TextField('alt_name_%d' % i, weight=10) for i in range(maxAltNames)],
TextField('summary', weight=1)],
TextField('cover', weight=0),
TextField('thumb', weight=0))
indexCreated = True
except Exception:
print('Failed to create index, retrying %s')
time.sleep(3)
for k, v in data.items():
client.add_document(k,
name=v['name'],
**{'alt_name_%d' % i: n for i, n in enumerate(v['alt_names'])},
cover=v['cover'],
thumb=v['thumb'],
summary=v['summary'])
print('done')
def main(args: dict):
if (not args.output):
print('igdb redis updating: ', datetime.now())
if IGDB_SRC == 'MOCK' or args.mock:
data = load_mock_data()
else:
data = asyncio.run(fetch_games())
if args.output:
print(json.dumps(data, indent=4))
else:
print('call complete. fetched %d games' % (len(data)))
if args.persist:
cache_to_redis(data=data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='load redis db with game name list')
parser.add_argument('--mock', action='store_true')
parser.add_argument('--persist', action='store_true')
parser.add_argument('--output', action='store_true')
arguments = parser.parse_args()
main(args=arguments)
``` |
{
"source": "6saiya/mpython",
"score": 2
} |
#### File: esp32/modules/mpython.py
```python
from machine import I2C, PWM, Pin, ADC, TouchPad, UART
from ssd1106 import SSD1106_I2C
import esp,math,time,network
import ustruct, array
from neopixel import NeoPixel
from esp import dht_readinto
from time import sleep_ms, sleep_us,sleep
i2c = I2C(scl=Pin(Pin.P19), sda=Pin(Pin.P20), freq=200000)
class Font(object):
def __init__(self, font_address=0x300000):
self.font_address = font_address
buffer = bytearray(18)
esp.flash_read(self.font_address, buffer)
self.header, \
self.height, \
self.width, \
self.baseline, \
self.x_height, \
self.Y_height, \
self.first_char,\
self.last_char = ustruct.unpack('4sHHHHHHH', buffer)
self.first_char_info_address = self.font_address + 18
def GetCharacterData(self, c):
uni = ord(c)
if uni not in range(self.first_char, self.last_char):
return None
char_info_address = self.first_char_info_address + \
(uni - self.first_char) * 6
buffer = bytearray(6)
esp.flash_read(char_info_address, buffer)
ptr_char_data, len = ustruct.unpack('IH', buffer)
if (ptr_char_data) == 0 or (len == 0):
return None
buffer = bytearray(len)
esp.flash_read(ptr_char_data + self.font_address, buffer)
return buffer
class TextMode():
normal = 1
rev = 2
trans = 3
xor = 4
class OLED(SSD1106_I2C):
""" 128x64 oled display """
def __init__(self):
super().__init__(128, 64, i2c)
self.f = Font()
if self.f is None:
raise Exception('font load failed')
def DispChar(self, s, x, y, mode=TextMode.normal):
if self.f is None:
return
for c in s:
data = self.f.GetCharacterData(c)
if data is None:
x = x + self.width
continue
width, bytes_per_line = ustruct.unpack('HH', data[:4])
# print('character [%d]: width = %d, bytes_per_line = %d' % (ord(c)
# , width, bytes_per_line))
for h in range(0, self.f.height):
w = 0
i = 0
while w < width:
mask = data[4 + h * bytes_per_line + i]
if (width - w) >= 8:
n = 8
else:
n = width - w
py = y + h
page = py >> 3
bit = 0x80 >> (py % 8)
for p in range(0, n):
px = x + w + p
c = 0
if (mask & 0x80) != 0:
if mode == TextMode.normal or \
mode == TextMode.trans:
c = 1
if mode == TextMode.rev:
c = 0
if mode == TextMode.xor:
c = self.buffer[page * 128 + px] & bit
if c != 0:
c = 0
else:
c = 1
# print("px = %d, py = %d, c = %d" % (px, py, c))
super().pixel(px, py, c)
else:
if mode == TextMode.normal:
c = 0
super().pixel(px, py, c)
if mode == TextMode.rev:
c = 1
super().pixel(px, py, c)
mask = mask << 1
w = w + 8
i = i + 1
x = x + width + 1
def circle(self, x0, y0, radius , c):
# Circle drawing function. Will draw a single pixel wide circle with
# center at x0, y0 and the specified radius.
f = 1 - radius
ddF_x = 1
ddF_y = -2 * radius
x = 0
y = radius
super().pixel(x0, y0 + radius, c)
super().pixel(x0, y0 - radius, c)
super().pixel(x0 + radius, y0, c)
super().pixel(x0 - radius, y0, c)
while x < y:
if f >= 0:
y -= 1
ddF_y += 2
f += ddF_y
x += 1
ddF_x += 2
f += ddF_x
super().pixel(x0 + x, y0 + y, c)
super().pixel(x0 - x, y0 + y, c)
super().pixel(x0 + x, y0 - y, c)
super().pixel(x0 - x, y0 - y, c)
super().pixel(x0 + y, y0 + x, c)
super().pixel(x0 - y, y0 + x, c)
super().pixel(x0 + y, y0 - x, c)
super().pixel(x0 - y, y0 - x, c)
def fill_circle(self, x0, y0, radius, c):
# Filled circle drawing function. Will draw a filled circule with
# center at x0, y0 and the specified radius.
super().vline(x0, y0 - radius, 2*radius + 1, c)
f = 1 - radius
ddF_x = 1
ddF_y = -2 * radius
x = 0
y = radius
while x < y:
if f >= 0:
y -= 1
ddF_y += 2
f += ddF_y
x += 1
ddF_x += 2
f += ddF_x
super().vline(x0 + x, y0 - y, 2*y + 1, c)
super().vline(x0 + y, y0 - x, 2*x + 1, c)
super().vline(x0 - x, y0 - y, 2*y + 1, c)
super().vline(x0 - y, y0 - x, 2*x + 1, c)
def triangle(self, x0, y0, x1, y1, x2, y2, c):
# Triangle drawing function. Will draw a single pixel wide triangle
# around the points (x0, y0), (x1, y1), and (x2, y2).
super().line(x0, y0, x1, y1, c)
super().line(x1, y1, x2, y2, c)
super().line(x2, y2, x0, y0, c)
def fill_triangle(self, x0, y0, x1, y1, x2, y2, c):
# Filled triangle drawing function. Will draw a filled triangle around
# the points (x0, y0), (x1, y1), and (x2, y2).
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
if y1 > y2:
y2, y1 = y1, y2
x2, x1 = x1, x2
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
a = 0
b = 0
y = 0
last = 0
if y0 == y2:
a = x0
b = x0
if x1 < a:
a = x1
elif x1 > b:
b = x1
if x2 < a:
a = x2
elif x2 > b:
b = x2
super().hline(a, y0, b-a+1, c)
return
dx01 = x1 - x0
dy01 = y1 - y0
dx02 = x2 - x0
dy02 = y2 - y0
dx12 = x2 - x1
dy12 = y2 - y1
if dy01 == 0:
dy01 = 1
if dy02 == 0:
dy02 = 1
if dy12 == 0:
dy12 = 1
sa = 0
sb = 0
if y1 == y2:
last = y1
elif y0 == y1:
last = y0
else:
last = y1-1
for y in range(y0, last+1):
a = x0 + sa // dy01
b = x0 + sb // dy02
sa += dx01
sb += dx02
if a > b:
a, b = b, a
super().hline(a, y, b-a+1, c)
sa = dx12 * (y - y1)
sb = dx02 * (y - y0)
while y <= y2:
a = x1 + sa // dy12
b = x0 + sb // dy02
sa += dx12
sb += dx02
if a > b:
a, b = b, a
super().hline(a, y, b-a+1, c)
y += 1
def Bitmap(self, x, y, bitmap, w, h,c):
byteWidth = int((w + 7) / 8)
for j in range(h):
for i in range(w):
if bitmap[int(j * byteWidth + i / 8)] & (128 >> (i & 7)):
super().pixel(x+i, y+j, c)
def drawCircleHelper(self, x0, y0, r, cornername, c):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
while x < y:
if (f >= 0):
# y-- y -= 1 below
y -= 1
ddF_y += 2
f += ddF_y
ddF_x += 2
f += ddF_x
if (cornername & 0x4):
super().pixel(x0 + x, y0 + y, c)
super().pixel(x0 + y, y0 + x, c)
if (cornername & 0x2):
super().pixel(x0 + x, y0 - y, c)
super().pixel(x0 + y, y0 - x, c)
if (cornername & 0x8):
super().pixel(x0 - y, y0 + x, c)
super().pixel(x0 - x, y0 + y, c)
if (cornername & 0x1):
super().pixel(x0 - y, y0 - x, c)
super().pixel(x0 - x, y0 - y, c)
x += 1
def RoundRect( self, x, y, w, h, r, c):
self.hline(x + r , y , w - 2 * r , c)
self.hline(x + r , y + h - 1, w - 2 * r , c)
self.vline(x, y + r, h - 2 * r , c)
self.vline(x + w - 1, y + r , h - 2 * r , c)
self.drawCircleHelper(x + r , y + r , r , 1, c)
self.drawCircleHelper(x + w - r - 1, y + r , r , 2, c)
self.drawCircleHelper(x + w - r - 1, y + h - r - 1, r , 4, c)
self.drawCircleHelper(x + r , y + h - r - 1, r , 8, c)
class Accelerometer():
""" """
def __init__(self):
self.addr = 38
self.i2c = i2c
self.i2c.writeto(self.addr, b'\x0F\x08') # set resolution = 10bit
self.i2c.writeto(self.addr, b'\x11\x00') # set power mode = normal
def get_x(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, b'\x02', False)
buf = self.i2c.readfrom(self.addr, 2)
x = ustruct.unpack('h', buf)[0]
return x / 4 / 4096
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def get_y(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, b'\x04', False)
buf = self.i2c.readfrom(self.addr, 2)
y = ustruct.unpack('h', buf)[0]
return y / 4 / 4096
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def get_z(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, b'\x06', False)
buf = self.i2c.readfrom(self.addr, 2)
z = ustruct.unpack('h', buf)[0]
return z / 4 / 4096
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
class BME280(object):
def __init__(self):
self.addr = 119
# The “ctrl_hum” register sets the humidity data acquisition options of the device
# 0x01 = [2:0]oversampling ×1
i2c.writeto(self.addr, b'\xF2\x01')
# The “ctrl_meas” register sets the pressure and temperature data acquisition options of the device.
# The register needs to be written after changing “ctrl_hum” for the changes to become effective.
# 0x27 = [7:5]Pressure oversampling ×1 | [4:2]Temperature oversampling ×4 | [1:0]Normal mode
i2c.writeto(self.addr, b'\xF4\x27')
# The “config” register sets the rate, filter and interface options of the device. Writes to the “config”
# register in normal mode may be ignored. In sleep mode writes are not ignored.
i2c.writeto(self.addr, b'\xF5\x00')
i2c.writeto(self.addr, b'\x88', False)
bytes = i2c.readfrom(self.addr, 6)
self.dig_T = ustruct.unpack('Hhh', bytes)
i2c.writeto(self.addr, b'\x8E', False)
bytes = i2c.readfrom(self.addr, 18)
self.dig_P = ustruct.unpack('Hhhhhhhhh', bytes)
i2c.writeto(self.addr, b'\xA1', False)
self.dig_H = array.array('h', [0, 0, 0, 0, 0, 0])
self.dig_H[0] = i2c.readfrom(self.addr, 1)[0]
i2c.writeto(self.addr, b'\xE1', False)
buff = i2c.readfrom(self.addr, 7)
self.dig_H[1] = ustruct.unpack('h', buff[0:2])[0]
self.dig_H[2] = buff[2]
self.dig_H[3] = (buff[3] << 4) | (buff[4] & 0x0F)
self.dig_H[4] = (buff[5] << 4) | (buff[4] >> 4 & 0x0F)
self.dig_H[5] = buff[6]
def temperature(self):
retry = 0
if (retry < 5):
try:
i2c.writeto(self.addr, b'\xFA', False)
buff = i2c.readfrom(self.addr, 3)
T = (((buff[0] << 8) | buff[1]) << 4) | (buff[2] >> 4 & 0x0F)
c1 = (T / 16384.0 - self.dig_T[0] / 1024.0) * self.dig_T[1]
c2 = ((T / 131072.0 - self.dig_T[0] / 8192.0) * (T / 131072.0 - self.dig_T[0] / 8192.0)) * self.dig_T[2]
self.tFine = c1 + c2
return self.tFine / 5120.0
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def pressure(self):
retry = 0
if (retry < 5):
try:
i2c.writeto(self.addr, b'\xF7', False)
buff = i2c.readfrom(self.addr, 3)
P = (((buff[0] << 8) | buff[1]) << 4) | (buff[2] >> 4 & 0x0F)
c1 = self.tFine / 2.0 - 64000.0
c2 = c1 * c1 * self.dig_P[5] / 32768.0
c2 = c2 + c1 * self.dig_P[4] * 2.0
c2 = c2 / 4.0 + self.dig_P[3] * 65536.0
c1 = (self.dig_P[2] * c1 * c1 / 524288.0 + self.dig_P[1] * c1) / 524288.0
c1 = (1.0 + c1 / 32768.0) * self.dig_P[0]
if c1 == 0.0:
return 0
p = 1048576.0 - P
p = (p - c2 / 4096.0) * 6250.0 / c1
c1 = self.dig_P[8] * p * p / 2147483648.0
c2 = p * self.dig_P[7] / 32768.0
p = p + (c1 + c2 + self.dig_P[6]) / 16.0
return p
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def humidity(self):
retry = 0
if (retry < 5):
try:
self.temperature()
i2c.writeto(self.addr, b'\xFD', False)
buff = i2c.readfrom(self.addr, 2)
H = buff[0] << 8 | buff[1]
h = self.tFine - 76800.0
h = (H - (self.dig_H[3] * 64.0 + self.dig_H[4] / 16384.0 * h)) * \
(self.dig_H[1] / 65536.0 * (1.0 + self.dig_H[5] / 67108864.0 * h * \
(1.0 + self.dig_H[2] / 67108864.0 * h)))
h = h * (1.0 - self.dig_H[0] * h / 524288.0)
if h > 100.0:
return 100.0
elif h < 0.0:
return 0.0
else:
return h
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
class PinMode(object):
IN = 1
OUT = 2
PWM = 3
ANALOG = 4
pins_remap_esp32 = (33, 32, 35, 34, 39, 0, 16, 17, 26, 25,
36, 2, -1, 18, 19, 21, 5, -1, -1, 22, 23,
-1, -1,
27, 14, 12, 13, 15, 4)
class MPythonPin():
def __init__(self, pin, mode=PinMode.IN,pull=None):
if mode not in [PinMode.IN, PinMode.OUT, PinMode.PWM, PinMode.ANALOG]:
raise TypeError("mode must be 'IN, OUT, PWM, ANALOG'")
if pin == 4:
raise TypeError("P4 is used for light sensor")
if pin == 10:
raise TypeError("P10 is used for sound sensor")
try:
self.id = pins_remap_esp32[pin]
except IndexError:
raise IndexError("Out of Pin range")
if mode == PinMode.IN:
if pin in [3]:
raise TypeError('IN not supported on P%d' %pin)
self.Pin=Pin(self.id, Pin.IN, pull)
if mode == PinMode.OUT:
if pin in [2,3]:
raise TypeError('OUT not supported on P%d' %pin)
self.Pin=Pin(self.id, Pin.OUT,pull)
if mode == PinMode.PWM:
if pin not in [0,1,5,6,7,8,9,11,13,14,15,16,19,20,23,24,25,26,27,28]:
raise TypeError('PWM not supported on P%d' %pin)
self.pwm = PWM(Pin(self.id), duty=0)
if mode == PinMode.ANALOG:
if pin not in [0, 1, 2, 3, 4, 10]:
raise TypeError('ANALOG not supported on P%d' %pin)
self.adc= ADC(Pin(self.id))
self.adc.atten(ADC.ATTN_11DB)
self.mode = mode
def irq(self,handler=None, trigger=Pin.IRQ_RISING):
if not self.mode == PinMode.IN:
raise TypeError('the pin is not in IN mode')
return self.Pin.irq(handler,trigger)
def read_digital(self):
if not self.mode == PinMode.IN:
raise TypeError('the pin is not in IN mode')
return self.Pin.value()
def write_digital(self, value):
if not self.mode == PinMode.OUT:
raise TypeError('the pin is not in OUT mode')
self.Pin.value(value)
def read_analog(self):
if not self.mode == PinMode.ANALOG:
raise TypeError('the pin is not in ANALOG mode')
return self.adc.read()
def write_analog(self, duty, freq=1000):
if not self.mode == PinMode.PWM:
raise TypeError('the pin is not in PWM mode')
self.pwm.freq(freq)
self.pwm.duty(duty)
'''
# to be test
class LightSensor(ADC):
def __init__(self):
super().__init__(Pin(pins_remap_esp32[4]))
# super().atten(ADC.ATTN_11DB)
def value(self):
# lux * k * Rc = N * 3.9/ 4096
# k = 0.0011mA/Lux
# lux = N * 3.9/ 4096 / Rc / k
return super().read() * 1.1 / 4095 / 6.81 / 0.011
'''
class wifi:
def __init__(self):
self.sta=network.WLAN(network.STA_IF)
self.ap=network.WLAN(network.AP_IF)
def connectWiFi(self,ssid,passwd):
self.sta.active(True)
self.sta.connect(ssid,passwd)
while(self.sta.ifconfig()[0]=='0.0.0.0'):
sleep_ms(200)
print('Connecting to network...')
print('WiFi Connection Successful,Network Config:%s' %str(self.sta.ifconfig()))
def disconnectWiFi(self):
self.sta.disconnect()
self.sta.active(False)
print('disconnect WiFi...')
def enable_APWiFi(self,essid,channel):
self.ap.active(True)
self.ap.config(essid=essid,channel=channel)
def disable_APWiFi(self):
self.ap.active(False)
print('disable AP WiFi...')
# display
oled = OLED()
display = oled
# 3 axis accelerometer
accelerometer = Accelerometer()
# bm280
try:
bme280=BME280()
except:
pass
# 3 rgb leds
rgb = NeoPixel(Pin(17, Pin.OUT), 3, 3, 1)
rgb.write()
# light sensor
light = ADC(Pin(39))
# sound sensor
sound = ADC(Pin(36))
# buttons
button_a = Pin(0, Pin.IN, Pin.PULL_UP)
button_b = Pin(2, Pin.IN, Pin.PULL_UP)
# touchpad
touchPad_P = TouchPad(Pin(27))
touchPad_Y = TouchPad(Pin(14))
touchPad_T = TouchPad(Pin(12))
touchPad_H = TouchPad(Pin(13))
touchPad_O = TouchPad(Pin(15))
touchPad_N = TouchPad(Pin(4))
from gui import *
def numberMap(inputNum,bMin,bMax,cMin,cMax):
outputNum = 0
outputNum =((cMax - cMin) / (bMax - bMin))*(inputNum - bMin)+cMin
return outputNum
``` |
{
"source": "6shell/ralph",
"score": 3
} |
#### File: lib/polymorphic/models.py
```python
from collections import defaultdict
from itertools import groupby
from django.contrib.contenttypes.models import ContentType
from django.db import models, OperationalError
from ralph.lib.error_handling.exceptions import WrappedOperationalError
class PolymorphicQuerySet(models.QuerySet):
def __init__(self, *args, **kwargs):
self._polymorphic_select_related = {}
self._polymorphic_prefetch_related = {}
self._annotate_args = []
self._annotate_kwargs = {}
self._extra_args = []
self._extra_kwargs = {}
self._polymorphic_filter_args = []
self._polymorphic_filter_kwargs = {}
super().__init__(*args, **kwargs)
def iterator(self):
"""
Override iterator:
- Iterate for all objects and collected ID
- For each ContentType generates additional queryset
- Returns iterator with different models
"""
# if this is final-level model, don't check for descendants - just
# return original queryset result
if not getattr(self.model, '_polymorphic_descendants', []):
yield from super().iterator()
return
result = []
content_types_ids = set()
select_related = None
if self.query.select_related:
select_related = self.query.select_related
self.query.select_related = False
for obj in super().iterator():
content_types_ids.add(obj.content_type_id)
result.append((
obj.content_type_id, obj.pk)
)
# store original order of items by PK
pks_order = [r[1] for r in result]
# WARNING! sorting result (by content type) breaks original order of
# items - we need to restore it at the end of this function
result = groupby(sorted(result), lambda x: x[0])
content_type_model_map = {
ct.id: ct.model_class() for ct in ContentType.objects.filter(
pk__in=list(content_types_ids)
)
}
# NOTICE: there might be multiple objects with the same ct_id and
# pk!! (ex. because of filters causing joins - ex. for prefetch related,
# when one object is attached to many others). We need to group them
# and return all of them (order is rather irrelevant then, because
# it's the same object).
result_mapping = defaultdict(list)
for k, v in result:
model = content_type_model_map[k]
polymorphic_models = getattr(model, '_polymorphic_models', [])
if polymorphic_models and model not in polymorphic_models:
model_query = model.objects.filter(pk__in=[i[1] for i in v])
model_name = model._meta.object_name
# first check if select_related/prefetch_related is present for
# this model to not trigger selecting/prefetching all related
# or reset select_related accidentally
# see https://docs.djangoproject.com/en/1.8/ref/models/querysets/#select-related # noqa
# for details
if select_related:
model_query.query.select_related = select_related.copy()
if self._polymorphic_select_related.get(model_name):
model_query = model_query.select_related(
*self._polymorphic_select_related[model_name]
)
if self._polymorphic_prefetch_related.get(model_name):
model_query = model_query.prefetch_related(
*self._polymorphic_prefetch_related[model_name]
)
model_query = model_query.annotate(
*self._annotate_args, **self._annotate_kwargs
).extra(*self._extra_args, **self._extra_kwargs)
# rewrite filters to properly handle joins between tables
# TODO(mkurek): handle it better since it will produce
# additional (unnecessary) WHERE conditions. Consider for
# example extracting (somehow) joined tables from filter
# fields and put them into `select_related`
if (
self._polymorphic_filter_args or
self._polymorphic_filter_kwargs
):
model_query = model_query.filter(
*self._polymorphic_filter_args,
**self._polymorphic_filter_kwargs
)
try:
for obj in model_query:
result_mapping[obj.pk].append(obj)
# NOTE(pszulc): We try to catch OperationalError that randomly
# occurs (1052, "Column 'created' in field list is ambiguous")
except OperationalError as e:
raise WrappedOperationalError(
query=model_query.query, model=self, error_str=str(e)) \
from e
# yield objects in original order
for pk in pks_order:
# yield all objects with particular PK
# it might happen that there will be additional objects with
# particular PK comparing to original query. This might happen when
# "broad" polymorphic_filter is used with prefetch_related (and
# original model is filtered to get only subset of all objects)
# see test cases in `PolymorphicTestCase` for examples.
while result_mapping[pk]:
yield result_mapping[pk].pop()
def annotate(self, *args, **kwargs):
self._annotate_args.extend(args)
self._annotate_kwargs.update(kwargs)
return super().annotate(*args, **kwargs)
def extra(self, *args, **kwargs):
self._extra_args.extend(args)
self._extra_kwargs.update(kwargs)
return super().extra(*args, **kwargs)
def _clone(self, *args, **kwargs):
clone = super()._clone(*args, **kwargs)
clone._polymorphic_select_related = (
self._polymorphic_select_related.copy()
)
clone._polymorphic_prefetch_related = (
self._polymorphic_prefetch_related.copy()
)
clone._annotate_kwargs = (
self._annotate_kwargs.copy()
)
clone._annotate_args = (
self._annotate_args.copy()
)
clone._extra_args = self._extra_args.copy()
clone._extra_kwargs = self._extra_kwargs.copy()
clone._polymorphic_filter_args = self._polymorphic_filter_args.copy()
clone._polymorphic_filter_kwargs = (
self._polymorphic_filter_kwargs.copy()
)
return clone
def polymorphic_select_related(self, **kwargs):
"""
Apply select related on descendant model (passed as model name). Usage:
>>> MyBaseModel.objects.polymorphic_select_related(
MyDescendantModel=['related_field1', 'related_field2'],
MyDescendantModel2=['related_field3'],
)
"""
obj = self._clone()
obj._polymorphic_select_related = kwargs
return obj
def polymorphic_prefetch_related(self, **kwargs):
"""
Apply prefetch related on descendant model (passed as model name).
Usage:
>>> MyBaseModel.objects.polymorphic_prefetch_related(
MyDescendantModel=['related_field1', 'related_field2'],
MyDescendantModel2=['related_field3'],
)
"""
obj = self._clone()
obj._polymorphic_prefetch_related = kwargs
return obj
def polymorphic_filter(self, *args, **kwargs):
"""
Extra filter for descendat model
Might be useful (as a workaround) for forcing join on descendant model
in some cases with prefetch_related with queryset with polymorphic
objects.
"""
obj = self._clone()
obj._polymorphic_filter_args.extend(args)
obj._polymorphic_filter_kwargs.update(kwargs)
return obj
class PolymorphicBase(models.base.ModelBase):
"""
Looking for classes in all classes that inherit from class polymorphic.
Adding:
- polymorphic models to class as attributes
- polymorphic descendants to bases class as attributes
- set is_polymorphic flag to bases class if class is polymorphic
"""
def __new__(cls, name, bases, attrs):
full_mro = set(
tuple([mro for b in bases for mro in b.__mro__]) + bases
)
base_polymorphic = set(
[b for b in full_mro if issubclass(b, Polymorphic)]
)
attrs['_polymorphic_descendants'] = []
attrs['_polymorphic_models'] = base_polymorphic
new_class = super().__new__(cls, name, bases, attrs)
for polymorphic_class in base_polymorphic:
# Set is_polymorphic flag for classes that use polymorphic
polymorphic_class.is_polymorphic = True
if new_class._meta.proxy:
continue
try:
polymorphic_class._polymorphic_descendants.append(new_class)
except AttributeError:
# The exception is for class Polymorphic
pass
return new_class
class Polymorphic(models.Model):
"""
Polymorphic model.
Added content type field to model
Example:
>>> class Test(Polymorphic, models.Model, metaclass=PolymorphicBase):
pass
"""
content_type = models.ForeignKey(ContentType, blank=True, null=True)
polymorphic_objects = PolymorphicQuerySet.as_manager()
objects = models.Manager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Save object content type
"""
if not self.content_type:
self.content_type = ContentType.objects.get_for_model(self)
super().save(*args, **kwargs)
@property
def last_descendant(self):
return self.content_type.get_object_for_this_type(pk=self.pk)
``` |
{
"source": "6sibilings/daniel-Allen",
"score": 2
} |
#### File: daniel-Allen/acp/cli.py
```python
import argparse
import logging
import os.path
import sys
import time
from collections import OrderedDict
from .basebinary import *
from .client import ACPClient
from .exception import *
from .property import ACPProperty
class _ArgParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("error: {0}\n".format(message))
#self.print_help()
sys.exit(2)
def _cmd_not_implemented(*unused):
raise ACPCommandLineError("command handler not implemented")
def _cmd_listprop(unused):
print "\nSupported properties:\n"
prop_names = ACPProperty.get_supported_property_names()
for name in prop_names:
print "{0}: {1}".format(name, ACPProperty.get_property_info_string(name, "description"))
print
def _cmd_helpprop(args):
prop_name = args.pop()
description = ACPProperty.get_property_info_string(prop_name, "description")
prop_type = ACPProperty.get_property_info_string(prop_name, "type")
validation = ACPProperty.get_property_info_string(prop_name, "validation")
s = "{0} ({1}".format(description, prop_type)
if validation:
s += ", {0})".format(validation)
else:
s += ")"
print s
def _cmd_getprop(client, args):
prop_name = args.pop()
prop = client.get_properties([prop_name])
if len(prop):
print ACPProperty(prop_name, prop[0].value)
def _cmd_setprop(client, args):
prop_name, prop_value = args
prop_type = ACPProperty.get_property_info_string(prop_name, "type")
prop = ACPProperty()
if prop_type == "dec":
try:
prop = ACPProperty(prop_name, int(prop_value))
except ValueError:
logging.error("value for \"{0}\" has the wrong type, should be {0}".format(prop_name, prop_type))
elif prop_type == "hex":
try:
#XXX: this is not the right way to do exceptions
prop = ACPProperty(prop_name, int(prop_value, 16))
except ValueError:
logging.error("value for \"{0}\" has the wrong type, should be {0}".format(prop_name, prop_type))
elif prop_type == "mac":
#XXX: not catching our exception
prop = ACPProperty(prop_name, prop_value)
elif prop_type == "bin":
prop = ACPProperty(prop_name, prop_value.decode("hex"))
elif prop_type == "str":
prop = ACPProperty(prop_name, prop_value)
elif prop_type in ["cfb", "log"]:
logging.error("unsupported prop type: {0}".format(prop_type))
client.set_properties({prop_name : prop})
def _cmd_dumpprop(client, unused):
prop_names = ACPProperty.get_supported_property_names()
properties = client.get_properties(prop_names)
for prop in properties:
padded_description = ACPProperty.get_property_info_string(prop.name, "description").ljust(32, " ")
print "{0}: {1}".format(padded_description, prop)
def _cmd_acpprop(client, unused):
props_reply = client.get_properties(["prop"])
props_raw = props_reply[0].value
props = ""
for i in range(len(props_raw) / 4):
props += "{0}\n".format(props_raw[i*4:i*4+4])
print props
def _cmd_dump_syslog(client, unused):
print "{0}".format(client.get_properties(["logm"])[0])
def _cmd_reboot(client, unused):
print "Rebooting device"
client.set_properties({"acRB" : ACPProperty("acRB", 0)})
def _cmd_factory_reset(client, unused):
print "Performing factory reset"
client.set_properties(OrderedDict([("acRF",ACPProperty("acRF", 0)), ("acRB",ACPProperty("acRB", 0))]))
def _cmd_flash_primary(client, args):
fw_path = args.pop()
if os.path.exists(fw_path):
with open(fw_path, "rb") as fw_file:
fw_data = fw_file.read()
print "Flashing primary firmware partition"
client.flash_primary(fw_data)
else:
logging.error("Basebinary not readable at path: {0}".format(fw_path))
def _cmd_do_feat_command(client, unused):
print client.get_features()
def _cmd_decrypt(args):
(inpath, outpath) = args
with open(inpath, "rb") as infile:
indata = infile.read()
#XXX: lazy, fixme
try:
outdata = Basebinary.parse(indata)
except BasebinaryError:
raise
else:
with open(outpath, "wb") as outfile:
outfile.write(outdata)
def _cmd_extract(args):
(inpath, outpath) = args
with open(inpath, "rb") as infile:
indata = infile.read()
#XXX: lazy, fixme
try:
outdata = Basebinary.extract(indata)
except BasebinaryError:
raise
else:
with open(outpath, "wb") as outfile:
outfile.write(outdata)
def _cmd_srp_test(client, unused):
print "SRP testing"
client.authenticate_AppleSRP()
client.close()
def main():
#TODO: add CLI arg for verbosity
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
parser = _ArgParser()
parameters_group = parser.add_argument_group("AirPort client parameters")
parameters_group.add_argument("-t", "--target", metavar="address", help="IP address or hostname of the target router")
parameters_group.add_argument("-p", "--password", metavar="password", help="router admin password")
airport_client_group = parser.add_argument_group("AirPort client commands")
airport_client_group.add_argument("--listprop", action="store_const", const=True, help="list supported properties")
airport_client_group.add_argument("--helpprop", metavar="property", nargs=1, help="print the description of the specified property")
airport_client_group.add_argument("--getprop", metavar="property", nargs=1, help="get the value of the specified property")
airport_client_group.add_argument("--setprop", metavar=("property", "value"), nargs=2, help="set the value of the specified property")
airport_client_group.add_argument("--dumpprop", action="store_const", const=True, help="dump values of all supported properties")
airport_client_group.add_argument("--acpprop", action="store_const", const=True, help="get acp acpprop list")
airport_client_group.add_argument("--dump-syslog", action="store_const", const=True, help="dump the router system log")
airport_client_group.add_argument("--reboot", action="store_const", const=True, help="reboot device")
airport_client_group.add_argument("--factory-reset", action="store_const", const=True, help="RESET EVERYTHING and reboot; you have been warned!")
airport_client_group.add_argument("--flash-primary", metavar="firmware_path", nargs=1, help="flash primary partition firmware")
airport_client_group.add_argument("--do-feat-command", action="store_const", const=True, help="send 0x1b (feat) command")
basebinary_group = parser.add_argument_group("Basebinary commands")
basebinary_group.add_argument("--decrypt", metavar=("inpath", "outpath"), nargs=2, help="decrypt the basebinary")
basebinary_group.add_argument("--extract", metavar=("inpath", "outpath"), nargs=2, help="extract the gzimg contents")
test_group = parser.add_argument_group("Test arguments")
test_group.add_argument("--srp-test", action="store_const", const=True, help="SRP (requires OS X)")
args_dict = vars(parser.parse_args())
#TODO: give each element a dict containing parameter requirements/argparse infos, then generate parser based on this
commands = {
"listprop": "local",
"helpprop": "local",
"getprop": "remote_admin",
"setprop": "remote_admin",
"dumpprop": "remote_admin",
"acpprop": "remote_admin",
"dump_syslog": "remote_admin",
"reboot": "remote_admin",
"factory_reset": "remote_admin",
"flash_primary": "remote_admin",
"do_feat_command": "remote_noauth",
"decrypt": "local",
"extract": "local",
"srp_test": "remote_admin",
}
target = args_dict["target"]
password = args_dict["password"]
command_args = {k: v for k, v in args_dict.items() if k in commands and v is not None}
if len(command_args) == 0:
logging.error("must specify a command")
elif len(command_args) == 1:
#TODO: clean this up a bit
cmd, arg = command_args.popitem()
assert commands[cmd] in ["local", "remote_noauth", "remote_admin"], "unknown command type \"{0}\"".format(commands[cmd])
cmd_handler_name = "_cmd_{0}".format(cmd)
cmd_handler = globals().get(cmd_handler_name, _cmd_not_implemented)
if commands[cmd] == "local":
cmd_handler(arg)
if commands[cmd] == "remote_noauth":
if target is not None:
c = ACPClient(target)
c.connect()
cmd_handler(c, arg)
c.close()
else:
logging.error("must specify a target")
if commands[cmd] == "remote_admin":
if target is not None and password is not None:
c = ACPClient(target, password)
c.connect()
cmd_handler(c, arg)
c.close()
else:
logging.error("must specify a target and administrator password")
else:
logging.error("multiple commands not supported, choose only one")
```
#### File: daniel-Allen/acp/keystream.py
```python
ACP_STATIC_KEY = "5b6faf5d9d5b0e1351f2da1de7e8d673".decode("hex")
def generate_acp_keystream(length):
"""Get key used to encrypt the header key (and some message data?)
Args:
length (int): length of keystream to generate
Returns:
String of requested length
Note:
Keystream repeats every 256 bytes
"""
key = ""
key_idx = 0
while (key_idx < length):
key += chr((key_idx + 0x55 & 0xFF) ^ ord(ACP_STATIC_KEY[key_idx % len(ACP_STATIC_KEY)]))
key_idx += 1
return key
``` |
{
"source": "6sixteen/VMPE",
"score": 3
} |
#### File: backend/dataProcess/imgProcess.py
```python
import numpy as np
import cv2
def getVHistogram(img, number=256):
'''
得到图片HSV空间的V的直方图
:param img:rgb
:param number:直方图的个数
:return:
'''
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_img)
hist = cv2.calcHist([v], [0], None, [number], [0.0, 255.0])
res = []
for i in range(number):
res.append([i, int(hist[i, 0])])
return res
def getGrayHistogram(img, bin=256):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist([img_gray], [0], None, [bin], [0.0, 255.0])
res = []
for i in range(bin):
res.append([i, int(hist[i, 0])])
return res
if __name__ == '__main__':
img = cv2.imread("D:\codeTest\parameterExp\\backend-flask\data\img\Image_20210812150343338.bmp")
res = getVHistogram(img)
print(res)
Writer = "plh"
```
#### File: backend/dataProcess/parameterCluster.py
```python
import sys
import os
root = os.path.abspath("./")
sys.path.append(os.path.join(root, "dataProcess/graphCluster"))
import numpy as np
from combination import getIntersectionFromMatrixs, getUnionFromMatrces
from mcode_weighted import mcode
from tool import formatChangeGraph3
import json
from matrices import *
from spectralClustering import SpectralClustering
from tool import readTxtPeremeter, formatChangeParameter
def calEdge(p1, p2):
'''
单纯为了格式的统一!!
:param p1: ∪
:param p2: ∩
:return:
'''
return p2
def calJac(p1, p2):
'''
:param p1: ∪
:param p2: ∩
:return:
'''
if p1 == 0:
return 0
return p2 / p1
def calMy(union_matrix, insection_matrix, have_parameter_image_number, parameter_image):
'''
:param p1: ∪ 参数集合
:param p2: ∩ 参数集合
:return:
'''
u = 0.0
n = 0.0
for union in union_matrix:
a = str(union[4])
str1 = str(union[0]) + '_' + str(union[1]) + '_' + str(union[2]) + '_' + str(union[3]) + '_' + a.split('.')[0]
u += (float(parameter_image[str1]) / float(have_parameter_image_number))
for insection in insection_matrix:
a = str(insection[4])
str2 = str(insection[0]) + '_' + str(insection[1]) + '_' + str(insection[2]) + '_' + str(insection[3]) + '_' + a.split('.')[0]
n += (float(parameter_image[str2]) / float(have_parameter_image_number))
if u == 0:
return 0
else:
return n / u
# def getGraph(parameter_matrices, edge):
# '''
# 得到参数集合的图结构数据
# :param parameter_matrices:
# :return:
# res [[key, key1, w, key_m, key1_m, union_len, insection_len, insection_matrix],...]
# key: 图片1序号
# key2: 图片2序号
# w:边的权重
# key_m: 图片1参数集合的数量
# key1_m: 图片2参数集合的数量
# union_len: 图片1和图片2参数集合并集的数量
# insection_len; 图片1和图片2参数集合交集的数量
# '''
#
# print(parameter_matrices)
#
# # 边的数值选择
# if edge == "jac":
# claw = calJac
# if edge == "parameter_number":
# claw = calEdge
#
# res = []
# img_indices = parameter_matrices.keys()
# com = {}
# link_num = 0
# for key in img_indices:
# for key1 in img_indices:
# comkey = str(key) + "_" + str(key1)
# comkey2 = str(key1) + "_" + str(key)
# if key != key1:
# if comkey not in com.keys() and comkey2 not in com.keys():
# insection_matrix = getIntersectionFromMatrixs([parameter_matrices[key], parameter_matrices[key1]])
# insection_len = len(insection_matrix)
# union_matrix = getUnionFromMatrces([parameter_matrices[key], parameter_matrices[key1]])
# union_len = len(union_matrix)
# key_m = np.shape(parameter_matrices[key])[0]
# key1_m = np.shape(parameter_matrices[key1])[0]
# w = claw(union_len, insection_len)
# res.append([key, key1, w, key_m, key1_m, union_len, insection_len, insection_matrix])
#
# com[comkey] = link_num
# link_num += 1
# if comkey in com.keys() and comkey2 not in com.keys(): # 为了减少计算
# temp = res[com[comkey]]
# res.append([temp[1], temp[0], temp[2], temp[4], temp[3], temp[5], temp[6], temp[7]])
# com[comkey2] = link_num
# link_num += 1
#
# if comkey not in com.keys() and comkey2 in com.keys(): # 为了减少计算
# temp = res[com[comkey2]]
# res.append([temp[1], temp[0], temp[2], temp[4], temp[3], temp[5], temp[6], temp[7]])
# com[comkey] = link_num
# link_num += 1
# return res
def getGraph(parameter_matrices, edge, have_parameter_image_number, filename11):
'''
得到参数集合的图结构数据
:param parameter_matrices:
:return:
res [[key, key1, w, key_m, key1_m, union_len, insection_len, insection_matrix],...]
key: 图片1序号
key2: 图片2序号
w:边的权重
key_m: 图片1参数集合的数量
key1_m: 图片2参数集合的数量
union_len: 图片1和图片2参数集合并集的数量
insection_len; 图片1和图片2参数集合交集的数量
'''
# print(parameter_matrices)
# 边的数值选择
if edge == "jac":
claw = calJac
if edge == "parameter_number":
claw = calEdge
parameter_image = readTxtPeremeter(filename11, formatChangeParameter)
res = []
img_indices = parameter_matrices.keys()
com = {}
link_num = 0
for key in img_indices:
for key1 in img_indices:
comkey = str(key) + "_" + str(key1)
comkey2 = str(key1) + "_" + str(key)
if key != key1:
if comkey not in com.keys() and comkey2 not in com.keys():
insection_matrix = getIntersectionFromMatrixs([parameter_matrices[key], parameter_matrices[key1]])
insection_len = len(insection_matrix)
union_matrix = getUnionFromMatrces([parameter_matrices[key], parameter_matrices[key1]])
union_len = len(union_matrix)
key_m = np.shape(parameter_matrices[key])[0]
key1_m = np.shape(parameter_matrices[key1])[0]
w = calMy(union_matrix, insection_matrix, have_parameter_image_number, parameter_image)
res.append([key, key1, w, key_m, key1_m, union_len, insection_len, insection_matrix])
com[comkey] = link_num
link_num += 1
if comkey in com.keys() and comkey2 not in com.keys(): # 为了减少计算
temp = res[com[comkey]]
res.append([temp[1], temp[0], temp[2], temp[4], temp[3], temp[5], temp[6], temp[7]])
com[comkey2] = link_num
link_num += 1
if comkey not in com.keys() and comkey2 in com.keys(): # 为了减少计算
temp = res[com[comkey2]]
res.append([temp[1], temp[0], temp[2], temp[4], temp[3], temp[5], temp[6], temp[7]])
com[comkey] = link_num
link_num += 1
return res
def writeGraph(data, filename):
'''
把graph数据写到txt和json中去
:param data:
:param filename:
:return:
'''
with open(filename, "w") as f:
for item in data:
line = str(item[0]) + " " + str(item[1]) + " " + str(item[2]) + " " + str(item[3]) + " " + str(
item[4]) + " " + str(item[5]) + " " + str(item[6])
f.write(line)
f.write("\n")
graph_json = formatChangeGraph3(data)
filename_json = filename.replace(".txt", ".json")
with open(filename_json, "w") as f:
json.dump(graph_json, f, indent=4)
def processEdge(data, p):
'''
减少links的数量
:param data: getGraph 返回的数据
:param p: 留多少边
:return:
返回的类型和data 一样,只不过去掉了一些边
'''
links = {}
res = []
for i, item in enumerate(data):
if item[0] not in links.keys():
links[item[0]] = [item]
else:
links[item[0]].append(item)
# for key in links.keys():
# link = links[key]
# for item in link:
# print(item[0:-2])
for key in links.keys():
link = links[key]
# print("link",link)
reserve_link_num = int(p * len(link))
if reserve_link_num == 0:
reserve_link_num = 1
link.sort(key=lambda x: x[2], reverse=True)
res.extend(link[0:reserve_link_num])
link.sort(key=lambda x: x[6], reverse=True)
for i, item in enumerate(link):
if i == reserve_link_num:
break
if item not in res:
res.append(item)
# print("res", res)
return res
def getAdjacencyMatrix(graph):
'''
得到graph的邻接矩阵
:param graph:
:return:
'''
node_source = [item[0] for item in graph]
node_target = [item[1] for item in graph]
node_source.extend(node_target)
node = list(set(node_source))
node.sort(key=lambda x: int(x))
print(node)
n_node = len(node)
adj_matrix = np.zeros((n_node, n_node)) # 对角线是False 有连接是
change_img2i = {}
change_i2img = {}
for i, index in enumerate(node):
change_img2i[index] = i
change_i2img[i] = index
for item in graph:
adj_matrix[change_img2i[item[0]], change_img2i[item[1]]] = 1
adj_matrix[change_img2i[item[1]], change_img2i[item[0]]] = 1
return adj_matrix, change_i2img
def graphCluster(graph_data, method, n_clusters, matrix_name):
'''
:param graph_data: [[],[],[]....] list
:param method:
:param n_clusters:
:param matrix_name:
:return:
'''
res = {}
if method == "mcode":
res = mcode(graph_data)
if method == "spectralClustering":
adjacency_matrix, change = getAdjacencyMatrix(graph_data)
# print(adjacency_matrix)
if matrix_name == "BetheHessian":
matrix_fun = BetheHessian
spectral_labels, eigvals, eigvects, W = SpectralClustering(n_clusters, matrix_fun(adjacency_matrix),
matrix_name)
spectral_labels_np = np.array(spectral_labels)
for i in range(n_clusters):
temp = [change[item] for item in np.where(spectral_labels_np == i)[0]]
res[i] = temp
else:
res = mcode(graph_data)
return res
if __name__ == '__main__':
# test_data = {"1":[[1,2,3],[1,2,4]],"2":[[1,2,3],[1,2,5]]}
# getGraph(test_data)
Writer = "plh"
```
#### File: backend/dataProcess/tool.py
```python
import pandas as pd
import os.path as osp
import os
import numpy as np
import json
import time
import glob
import yaml
def bit():
import blocksmith
key = '<KEY>'
address = blocksmith.EthereumWallet.generate_address(key)
print(address)
# 0x1269645a46a3e86c1a3c3de8447092d90f6f04ed
checksum_address = blocksmith.EthereumWallet.checksum_address(address)
print(checksum_address)
# 0x1269645a46A3e86c1a3C3De8447092D90f6F04ED
def data2number(data, splitStr="/"):
"""
把各个变量的参数都提取出来
如果是失败的参数,末尾添加 -1
:param data: ['1', 'Threshold0/InputControl/minGray/20', 'Threshold0/InputControl/maxGray/40', 'OpeningCircle0/InputControl/2', 'SelectShape1/InputControl/min/0.8', 'FinalResult0/0', 'FinalResult/Error']
2074,Threshold0/InputControl/minGray/22,Threshold0/InputControl/maxGray/44,OpeningCircle0/InputControl/2,SelectShape1/InputControl/min/0.83,FinalResult0/2,FinalResult1/12459.9271205567,FinalResult2/18642.1
:param splitStr:
:return:
[1.0, 20.0, 40.0, 2.0, 0.8, 0.0, -1]
"""
returnData = []
for item in data:
itemSplitDot = item.split(splitStr)
param = itemSplitDot[len(itemSplitDot) - 1]
try:
paramF = float(param)
except Exception as ex:
paramF = -1
returnData.append(paramF)
return returnData
# 和文件读取有关
def list2csv(fileName, list):
test = pd.DataFrame(data=list) # 数据有三列,列名分别为one,two,three
test.to_csv(fileName, encoding='gbk')
def getMarixFromCsv(file):
'''
read matrix P from csv
:param file:csv filePath of matrix P
:return: numpy
'''
data = pd.read_csv(file)
# row = list(data.index.values)
# column = list(data.columns.values)
dataNp = data.values
dataNp = dataNp[:, 1:] # 去掉索引
return dataNp
def addJson(file, key, value):
'''
在json文件中添加新的key-value;要求原來的數據是dict
:param file:
:param key:
:param value:
:return:
'''
with open(file, "r") as f:
data = json.load(f)
data[key] = value
with open(file, "w") as f:
json.dump(data, f, indent=4)
def addJson1(file, key1, key2, value):
with open(file, "r") as f:
data = json.load(f)
if key1 not in data.keys():
data[key1] = {}
data[key1][key2] = value
with open(file, "w") as f:
json.dump(data, f, indent=4)
def judgeParametersCal(img_names, json_file):
'''
判断是否已经计算过多张图片的参数集合,如果计算过则返回相应信息
:param img_names:
:param json_file:
:return:
'''
img_names_l = len(img_names)
with open(json_file, "r") as f:
data = json.load(f)
for key, item in data.items():
# 判断img_names 是否被 item["imgNames"]包含
# 1.len(A交B) ==len(A)
intersection = list(set(img_names).intersection(set(item["imgNames"])))
if img_names_l == len(intersection):
return key, item
return None, None
def imageIndices(img_file, json_file, suffix="*.bmp"):
img_paths = glob.glob(osp.join(img_file, suffix))
res = {}
for i, img_path in enumerate(img_paths):
img_name = img_path.split("\\")[-1]
res[img_name] = i
with open(json_file, "w", ) as f:
json.dump(res, f, indent=4)
return res
def getParameters(img_indices, filter_config, json_file):
with open(json_file, "r") as f:
data = json.load(f)
if filter_config in data.keys() and img_indices in data[filter_config].keys():
return data[filter_config][img_indices]
else:
return None
def getParameterSetsIntersectionFromCsv(img_names, filter_config, filter_method):
'''因为文件导入的问题 失败, 新建文件 可能能解决'''
with open(yaml_file, 'r') as f:
cfg = yaml.safe_load(f)
print(cfg)
matrices = []
for img_name in img_names:
img_file = osp.join(cfg["caseBaseFile"], "parameter/splitData", "suc_" + img_name + ".csv")
matrix = getMarixFromCsv(img_file)
matrix = filter_method(matrix, filter_config)
matrix = matrix[:, 0:-2]
matrices.append(matrix)
insection_matrix = getIntersectionFromMatrixs(matrices)
return insection_matrix
# 和文件读取有关
def imgs2Indices(img_names, img_indices):
'''
图片名称到索引转换
:param img_names:
:param img_indices:
:return:
'''
res = ""
for img in img_names:
num = img_indices[img]
res = res + "_" + str(num)
return res[1:]
def searchCombination(file, value, keyCompare):
'''
檢索索引文件,觀察是否已經生成combination文件
:param file: string
:param value: {}
:param keyCompare: [string,...]
:return:
flag:True 已經存在數據
item["fileName"]: 文件名稱
'''
with open(file, "r") as f:
data = json.load(f)
for key, item in data.items():
flag = True
for key2 in keyCompare:
difference = list(set(value[key2]).difference(set(item[key2])))
difference2 = list(set(item[key2]).difference(set(value[key2])))
if len(difference) != 0 or len(difference2) != 0:
flag = False
break
if flag:
return flag, key
return False, []
def mkdir(file):
'''
创建文件夹
:param file:
可以用相对地址,会自动在前面加上当前运行文件的地址
也可以用绝对地址
:return:
'''
if not osp.exists(file):
os.mkdir(file)
def getImgNames(file):
'''
遍历图片文件夹,获得所有图片的名字
:param file:
:return:
'''
for root, dirs, files in os.walk(file):
imgNames = files
return imgNames
# 组合数有关
def nFactorial(start, final, step=1):
'''
start 到final的阶层
:param start:
:param final:
:param step:
:return:
'''
sum = 1
for i in range(start, final + 1, step):
sum = sum * i
return sum
def calCnm(n, m):
'''
Cnm 的组合数
:param n: n这个数大
:param m:
:return:
'''
a = nFactorial(n - m + 1, n)
b = nFactorial(1, m)
return int(a / b)
def calCnmSum(n, m):
'''
sum of Cn1+Cn2+...+Cnm
:param n: n这个数大
:param m:
:return:
'''
sum = 0
for i in range(1, m + 1):
sum = sum + calCnm(n, i)
return sum
# 组合数有关
# 格式处理
def formatChange(data):
'''
参数集合部分从list->array 然后去掉结果部分
:param data: dict
{
"imgNames":
[[],[],...],
...
}
:return: res: dict
{
"imgNames":
[[],[],...] :np.array,
...
}
'''
res = data.copy()
for key, item in res.items():
if len(item) != 0:
itemN = np.array(item)
itemN = itemN[:, :-2]
res[key] = itemN
else:
res[key] = np.array(item)
return res
def readTxt(filename, formatChange=None):
'''
读取txt文件,返回[[row],[row]...]
:param filename:
:return:
'''
res = []
with open(filename, "r") as f:
for line in f.readlines():
line = line.strip("\n").split(" ")
if formatChange is not None:
line = formatChange(line)
res.append(line)
return res
def readTxtPeremeter(filename, formatChange=None):
'''
读取txt文件,返回[[row],[row]...]
:param filename:
:return:
'''
res = {}
with open(filename, "r") as f:
for line in f.readlines():
line = line.strip("\n").split(" ")
if formatChange is not None:
name, line = formatChange(line)
res[name] = line
return res
# 格式处理
def formatChangeParameter(line):
newline = line[0] + '_' + line[1] + '_' + line[2] + '_' + line[3] + '_' + line[4]
return newline, line[5]
def formatChangeGraph(line):
newline = []
newline.append(line[0])
newline.append(line[1])
newline.append(float(line[2]))
return newline
def formatChangeGraph2(graph):
'''
getGraph 中返回的数据是比较详细的,做进一步的处理,方便下一步聚类的操作
:param graph:
:return:
'''
newgraph = []
for item in graph:
newitem = []
newitem.append(item[0])
newitem.append(item[1])
newitem.append(item[2])
newgraph.append(newitem)
return newgraph
def formatChangeGraph3(graph, cluster_matrix=None):
'''
将getGraph返回的数据处理成力导向图能接受的数据格式
:param graph:
:return:
'''
res = {}
nodestemp = {}
links = []
for item in graph:
if item[0] not in nodestemp.keys():
nodestemp[item[0]] = item[3]
if item[1] not in nodestemp.keys():
nodestemp[item[1]] = item[4]
if item[2] != 0:
link = {}
link["source"] = item[0]
link["target"] = item[1]
link["unionDec"] = item[2]
links.append(link)
res["links"] = links
nodes = []
for key in nodestemp.keys():
node = {}
node["id"] = key
node["unionNum"] = nodestemp[key]
if cluster_matrix is not None:
node["cluster_matrix"] = cluster_matrix[key].tolist()
nodes.append(node)
res["nodes"] = nodes
return res
def formatChangeGraph4(graph, cluster_res,add_info):
'''
将getGraph返回的数据处理成力导向图能接受的数据格式,并将add_info中的额外节点信息加入到结果中去
:param graph:
:return:
'''
res = {}
nodestemp = {}
links = []
for item in graph:
if item[0] not in nodestemp.keys():
nodestemp[item[0]] = item[3]
if item[1] not in nodestemp.keys():
nodestemp[item[1]] = item[4]
if item[2] != 0:
link = {}
link["source"] = item[0]
link["target"] = item[1]
link["unionDec"] = item[2]
links.append(link)
res["links"] = links
nodes = []
for key in cluster_res:
node = {}
node["id"] = key
node["unionNum"] = add_info[key]["n_success_filter"]
for key1 in add_info[key].keys():
node[key1] = add_info[key][key1]
nodes.append(node)
res["nodes"] = nodes
return res
# 和文件命名有关
def randomFileName(prefix="combination", suffix=".json"):
'''
返回一個隨機生成的文件名
:return:
'''
t = time.time()
fileName = prefix + str(round(t)) + suffix
return fileName
def createFilename(prefix, suffix, connector, parameters):
filename = prefix
for key in parameters.keys():
filename = filename + connector + str(key) + connector + str(parameters[key])
filename = filename + suffix
return filename
# others
def calTotalParameterSets(cfg):
n = 1
temp = 1000
parameterConfig = cfg["parameterConfig"]
for key in parameterConfig.keys():
if parameterConfig[key]["use"]:
n = n * (parameterConfig[key]["maxValue"] * temp - parameterConfig[key]["minValue"] * temp) / (parameterConfig[key]["step"] * temp) + n # 因为首尾的问题,所以要先加1
return int(n)
def removeBracketList(l):
'''
去掉list中的所有括号
:param l:
:return:
'''
if not isinstance(l,list):
return l
new_l = []
for item in l:
if isinstance(item, list):
new_l.extend(removeBracketList(item))
else:
new_l.append(item)
return new_l
def list2str(l,connector='_',format=None):
'''
把list中的内容变成str
:param l:
:param connector:
:param format:
:return:
'''
if not isinstance(l,list):
return str(l)
res = ""
for item in l:
if format is not None:
res = res+format(item)+connector
else:
res = res + str(item) + connector
return res[:-1]
def getIds(l,sort=True):
'''
得到图片的id
:param l: [1,2,3,4]
:param sort:
:return: "1_2_3_4"
'''
if len(l)==0:
return []
res = ""
l_c = l.copy()
if sort:
l_c.sort()
for id in l_c:
res = res + "_" + str(id)
return res[1:]
if __name__ == '__main__':
l=[15,[2,4]]
a = removeBracketList(l)
print(a)
input()
# l = 15
# res = list2str(l)
# print(res)
# input()
# l = [1,2,3]
# l = [[1,2],3]
# l = [[1],2,3]
# new_l = reBracketList(l)
# print(new_l)
# input()
# yaml_file = "../config.yml"
# with open(yaml_file, 'r') as f:
# cfg = yaml.safe_load(f)
# n = calTotalParameterSets(cfg)
# print(n)
# input()
# img_names = ["Image_20210812150340363.bmp",
# "Image_20210812150343338.bmp",
# "Image_20210812150345651.bmp"]
# filter_config ={"minValue":18000,"maxValue":25000}
#
# matrix = getParameterSetsIntersectionFromCsv(img_names, filter_config, fileterSingleMatrix_case1)
# print(matrix)
# input()
filename = "D:\codeTest\parameterExp\data\case1\graph\graph_imgs_0_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19_20_21_edge_jac_filter_23000_25000_process_1_0.3.txt"
res = readTxt(filename, formatChangeGraph)
# print(res)
# imgNames = getImgNames("D:\codeTest\parameterExp\data\case1\img")
# print("imgNames",imgNames)
# a = np.array([[]])
# print(np.shape(a))
# print(a)
#
# img_file = "D:\codeTest\parameterExp\data\case1\img"
# json_file = "D:\codeTest\parameterExp\data\case1\combination\img_indices.json"
# res = imageIndices(img_file, json_file)
# a = np.array([[1,2,3],[4,5,6]])
# b = a.tolist()
# bit()
# img_names = ["Image_20210812150340363.bmp",
# "Image_20210812150343338.bmp",
# "Image_20210812150345651.bmp"]
# img_indices = {
# "Image_20210812150340363.bmp": 0,
# "Image_20210812150343338.bmp": 1,
# "Image_20210812150345651.bmp": 2,
# "Image_20210812150348106.bmp": 3,
# "Image_20210812150439515.bmp": 4,
# "Image_20210812150442099.bmp": 5,
# "Image_20210812150446018.bmp": 6,
# "Image_20210812150449667.bmp": 7,
# "Image_20210812150507378.bmp": 8,
# "Image_20210812150735634.bmp": 9,
# "Image_20210812150738139.bmp": 10,
# "Image_20210812150742075.bmp": 11,
# "Image_20210812150745340.bmp": 12,
# "Image_20210812150748010.bmp": 13,
# "Image_20210812150752110.bmp": 14,
# "Image_20210812150754923.bmp": 15,
# "Image_20210812150757138.bmp": 16,
# "Image_20210812150800770.bmp": 17,
# "Image_20210812150954922.bmp": 18,
# "Image_20210812151017347.bmp": 19,
# "Image_20210812151053418.bmp": 20,
# "Image_20210812151121185.bmp": 21
# }
# res = imgs2Indices(img_names, img_indices)
# print(res)
Writer = "plh"
``` |
{
"source": "6tail/lunar-python",
"score": 3
} |
#### File: lunar-python/lunar_python/LunarTime.py
```python
from . import NineStar
from .util import LunarUtil
class LunarTime:
"""
时辰
"""
def __init__(self, lunar_year, lunar_month, lunar_day, hour, minute, second):
from . import Lunar
self.__lunar = Lunar.fromYmdHms(lunar_year, lunar_month, lunar_day, hour, minute, second)
self.__zhiIndex = LunarUtil.getTimeZhiIndex("%02d:%02d" % (hour, minute))
self.__ganIndex = (self.__lunar.getDayGanIndexExact() % 5 * 2 + self.__zhiIndex) % 10
@staticmethod
def fromYmdHms(lunar_year, lunar_month, lunar_day, hour, minute, second):
return LunarTime(lunar_year, lunar_month, lunar_day, hour, minute, second)
def getGan(self):
return LunarUtil.GAN[self.__ganIndex + 1]
def getZhi(self):
return LunarUtil.ZHI[self.__zhiIndex + 1]
def getGanZhi(self):
return self.getGan() + self.getZhi()
def getShengXiao(self):
return LunarUtil.SHENGXIAO[self.__zhiIndex + 1]
def getPositionXi(self):
return LunarUtil.POSITION_XI[self.__ganIndex + 1]
def getPositionXiDesc(self):
return LunarUtil.POSITION_DESC[self.getPositionXi()]
def getPositionYangGui(self):
return LunarUtil.POSITION_YANG_GUI[self.__ganIndex + 1]
def getPositionYangGuiDesc(self):
return LunarUtil.POSITION_DESC[self.getPositionYangGui()]
def getPositionYinGui(self):
return LunarUtil.POSITION_YIN_GUI[self.__ganIndex + 1]
def getPositionYinGuiDesc(self):
return LunarUtil.POSITION_DESC[self.getPositionYinGui()]
def getPositionFu(self):
return LunarUtil.POSITION_FU[self.__ganIndex + 1]
def getPositionFuDesc(self):
return LunarUtil.POSITION_DESC[self.getPositionFu()]
def getPositionCai(self):
return LunarUtil.POSITION_CAI[self.__ganIndex + 1]
def getPositionCaiDesc(self):
return LunarUtil.POSITION_DESC[self.getPositionCai()]
def getChong(self):
return LunarUtil.CHONG[self.__zhiIndex + 1]
def getChongGan(self):
return LunarUtil.CHONG_GAN[self.__ganIndex + 1]
def getChongGanTie(self):
return LunarUtil.CHONG_GAN_TIE[self.getGan()]
def getChongShengXiao(self):
chong = self.getChong()
for i in range(0, len(LunarUtil.ZHI)):
if LunarUtil.ZHI[i] == chong:
return LunarUtil.SHENGXIAO[i]
return ""
def getChongDesc(self):
return "(" + self.getChongGan() + self.getChong() + ")" + self.getChongShengXiao()
def getSha(self):
return LunarUtil.SHA[self.getZhi()]
def getNaYin(self):
return LunarUtil.NAYIN[self.getGanZhi()]
def getTianShen(self):
offset = LunarUtil.ZHI_TIAN_SHEN_OFFSET[self.__lunar.getDayZhiExact()]
return LunarUtil.TIAN_SHEN[(self.__zhiIndex + offset) % 12 + 1]
def getTianShenType(self):
return LunarUtil.TIAN_SHEN_TYPE[self.getTianShen()]
def getTianShenLuck(self):
return LunarUtil.TIAN_SHEN_TYPE_LUCK[self.getTianShenType()]
def getYi(self):
"""
获取时宜
:return: 宜
"""
return LunarUtil.getTimeYi(self.__lunar.getDayInGanZhiExact(), self.getGanZhi())
def getJi(self):
"""
获取时忌
:return: 忌
"""
return LunarUtil.getTimeJi(self.__lunar.getDayInGanZhiExact(), self.getGanZhi())
def getNineStar(self):
solar_ymd = self.__lunar.getSolar().toYmd()
jie_qi = self.__lunar.getJieQiTable()
asc = False
if jie_qi["冬至"] <= solar_ymd < jie_qi["夏至"]:
asc = True
start = 7 if asc else 3
day_zhi = self.__lunar.getDayZhi()
if day_zhi in "子午卯酉":
start = 1 if asc else 9
elif day_zhi in "辰戌丑未":
start = 4 if asc else 6
index = start + self.__zhiIndex - 1 if asc else start - self.__zhiIndex - 1
if index > 8:
index -= 9
if index < 0:
index += 9
return NineStar.fromIndex(index)
def getGanIndex(self):
return self.__ganIndex
def getZhiIndex(self):
return self.__zhiIndex
def __str__(self):
return self.toString()
def toString(self):
return self.getGanZhi()
def getXun(self):
"""
获取时辰所在旬
:return: 旬
"""
return LunarUtil.getXun(self.getGanZhi())
def getXunKong(self):
"""
获取值时空亡
:return: 空亡(旬空)
"""
return LunarUtil.getXunKong(self.getGanZhi())
def getMinHm(self):
hour = self.__lunar.getHour()
if hour < 1:
return "00:00"
elif hour > 22:
return "23:00"
return "%02d:00", hour - 1 if hour % 2 == 0 else hour
def getMaxHm(self):
hour = self.__lunar.getHour()
if hour < 1:
return "00:59"
elif hour > 22:
return "23:59"
return "%02d:59", hour + 1 if hour % 2 != 0 else hour
```
#### File: lunar-python/lunar_python/LunarYear.py
```python
from . import Solar
from .util import ShouXingUtil, LunarUtil
class LunarYear:
"""
农历年
"""
__LEAP_11 = (75, 94, 170, 238, 265, 322, 389, 469, 553, 583, 610, 678, 735, 754, 773, 849, 887, 936, 1050, 1069, 1126, 1145, 1164, 1183, 1259, 1278, 1308, 1373, 1403, 1441, 1460, 1498, 1555, 1593, 1612, 1631, 1642, 2033, 2128, 2147, 2242, 2614, 2728, 2910, 3062, 3244, 3339, 3616, 3711, 3730, 3825, 4007, 4159, 4197, 4322, 4341, 4379, 4417, 4531, 4599, 4694, 4713, 4789, 4808, 4971, 5085, 5104, 5161, 5180, 5199, 5294, 5305, 5476, 5677, 5696, 5772, 5791, 5848, 5886, 6049, 6068, 6144, 6163, 6258, 6402, 6440, 6497, 6516, 6630, 6641, 6660, 6679, 6736, 6774, 6850, 6869, 6899, 6918, 6994, 7013, 7032, 7051, 7070, 7089, 7108, 7127, 7146, 7222, 7271, 7290, 7309, 7366, 7385, 7404, 7442, 7461, 7480, 7491, 7499, 7594, 7624, 7643, 7662, 7681, 7719, 7738, 7814, 7863, 7882, 7901, 7939, 7958, 7977, 7996,
8034, 8053, 8072, 8091, 8121, 8159, 8186, 8216, 8235, 8254, 8273, 8311, 8330, 8341, 8349, 8368, 8444, 8463, 8474, 8493, 8531, 8569, 8588, 8626, 8664, 8683, 8694, 8702, 8713, 8721, 8751, 8789, 8808, 8816, 8827, 8846, 8884, 8903, 8922, 8941, 8971, 9036, 9066, 9085, 9104, 9123, 9142, 9161, 9180, 9199, 9218, 9256, 9294, 9313, 9324, 9343, 9362, 9381, 9419, 9438, 9476, 9514, 9533, 9544, 9552, 9563, 9571, 9582, 9601, 9639, 9658, 9666, 9677, 9696, 9734, 9753, 9772, 9791, 9802, 9821, 9886, 9897, 9916, 9935, 9954, 9973, 9992)
__LEAP_12 = (37, 56, 113, 132, 151, 189, 208, 227, 246, 284, 303, 341, 360, 379, 417, 436, 458, 477, 496, 515, 534, 572, 591, 629, 648, 667, 697, 716, 792, 811, 830, 868, 906, 925, 944, 963, 982, 1001, 1020, 1039, 1058, 1088, 1153, 1202, 1221, 1240, 1297, 1335, 1392, 1411, 1422, 1430, 1517, 1525, 1536, 1574, 3358, 3472, 3806, 3988, 4751, 4941, 5066, 5123, 5275, 5343, 5438, 5457, 5495, 5533, 5552, 5715, 5810, 5829, 5905, 5924, 6421, 6535, 6793, 6812, 6888, 6907, 7002, 7184, 7260, 7279, 7374, 7556, 7746, 7757, 7776, 7833, 7852, 7871, 7966, 8015, 8110, 8129, 8148, 8224, 8243, 8338, 8406, 8425, 8482, 8501, 8520, 8558, 8596, 8607, 8615, 8645, 8740, 8778, 8835, 8865, 8930, 8960, 8979, 8998, 9017, 9055, 9074, 9093, 9112, 9150, 9188, 9237, 9275, 9332, 9351, 9370, 9408, 9427, 9446, 9457, 9465,
9495, 9560, 9590, 9628, 9647, 9685, 9715, 9742, 9780, 9810, 9818, 9829, 9848, 9867, 9905, 9924, 9943, 9962, 10000)
__LEAP = {}
__CACHE = {}
for i in range(0, len(__LEAP_11)):
__LEAP[__LEAP_11[i]] = 13
for i in range(0, len(__LEAP_12)):
__LEAP[__LEAP_12[i]] = 14
def __init__(self, lunar_year):
self.__year = lunar_year
self.__months = []
self.__jieQiJulianDays = []
self.compute()
@staticmethod
def fromYear(lunar_year):
if lunar_year not in LunarYear.__CACHE:
obj = LunarYear(lunar_year)
LunarYear.__CACHE[lunar_year] = obj
else:
obj = LunarYear.__CACHE[lunar_year]
return obj
def compute(self):
from . import Lunar, Solar, LunarMonth
# 节气(中午12点),长度27
jq = []
# 合朔,即每月初一(中午12点),长度16
hs = []
# 每月天数,长度15
day_counts = []
current_year = self.__year
year = current_year - 2000
# 从上年的大雪到下年的立春
for i in range(0, len(Lunar.JIE_QI_IN_USE)):
# 精确的节气
t = 36525 * ShouXingUtil.saLonT((year + (17 + i) * 15.0 / 360) * ShouXingUtil.PI_2)
t += ShouXingUtil.ONE_THIRD - ShouXingUtil.dtT(t)
self.__jieQiJulianDays.append(t + Solar.J2000)
# 按中午12点算的节气
if 0 < i < 26:
jq.append(round(t))
# 冬至前的初一
w = ShouXingUtil.calcShuo(jq[0])
if w > jq[0]:
w -= 29.5306
# 递推每月初一
for i in range(0, 16):
hs.append(ShouXingUtil.calcShuo(w + 29.5306 * i))
# 每月天数
for i in range(0, 15):
day_counts.append(int(hs[i + 1] - hs[i]))
current_year_leap = -1
if current_year in LunarYear.__LEAP:
current_year_leap = LunarYear.__LEAP[current_year]
else:
if hs[13] <= jq[24]:
i = 1
while hs[i + 1] > jq[2 * i] and i < 13:
i += 1
current_year_leap = i
prev_year = current_year - 1
prev_year_leap = -1
if prev_year in LunarYear.__LEAP:
prev_year_leap = LunarYear.__LEAP[prev_year] - 12
y = prev_year
m = 11
for i in range(0, 15):
cm = m
is_next_leap = False
if y == current_year and i == current_year_leap:
cm = -m
elif y == prev_year and i == prev_year_leap:
cm = -m
if y == current_year and i + 1 == current_year_leap:
is_next_leap = True
elif y == prev_year and i + 1 == prev_year_leap:
is_next_leap = True
self.__months.append(LunarMonth(y, cm, day_counts[i], hs[i] + Solar.J2000))
if not is_next_leap:
m += 1
if m == 13:
m = 1
y += 1
def getYear(self):
return self.__year
def toString(self):
return str(self.__year) + ""
def toFullString(self):
return "%d年" % self.__year
def __str__(self):
return self.toString()
def getMonths(self):
return self.__months
def getJieQiJulianDays(self):
return self.__jieQiJulianDays
def getLeapMonth(self):
"""
获取闰月
:return: 闰月数字,1代表闰1月,0代表无闰月
"""
for i in range(0, len(self.__months)):
m = self.__months[i]
if m.getYear() == self.__year and m.isLeap():
return abs(m.getMonth())
return 0
def getMonth(self, lunar_month):
"""
获取农历月
:param lunar_month: 闰月数字,1代表闰1月,0代表无闰月
:return: 农历月
"""
for i in range(0, len(self.__months)):
m = self.__months[i]
if m.getYear() == self.__year and m.getMonth() == lunar_month:
return m
return None
def getZhiShui(self):
"""
获取治水(正月第一个辰日是初几,就是几龙治水)
:return: 治水,如:二龙治水
"""
offset = 4 - Solar.fromJulianDay(self.getMonth(1).getFirstJulianDay()).getLunar().getDayZhiIndex()
if offset < 0:
offset += 12
return LunarUtil.NUMBER[offset + 1] + "龙治水"
def getFenBing(self):
"""
获取分饼(正月第一个丙日是初几,就是几人分饼)
:return: 分饼,如:六人分饼
"""
offset = 2 - Solar.fromJulianDay(self.getMonth(1).getFirstJulianDay()).getLunar().getDayGanIndex()
if offset < 0:
offset += 10
return LunarUtil.NUMBER[offset + 1] + "人分饼"
def getGengTian(self):
"""
获取耕田(正月第一个丑日是初几,就是几牛耕田)
:return: 耕田,如:六牛耕田
"""
offset = 1 - Solar.fromJulianDay(self.getMonth(1).getFirstJulianDay()).getLunar().getDayZhiIndex()
if offset < 0:
offset += 12
return LunarUtil.NUMBER[offset + 1] + "牛耕田"
def getDeJin(self):
"""
获取得金(正月第一个辛日是初几,就是几日得金)
:return: 得金,如:一日得金
"""
offset = 7 - Solar.fromJulianDay(self.getMonth(1).getFirstJulianDay()).getLunar().getDayGanIndex()
if offset < 0:
offset += 10
return LunarUtil.NUMBER[offset + 1] + "日得金"
```
#### File: lunar-python/test/JieQiTest.py
```python
import unittest
from lunar_python import Lunar
class JieQiTest(unittest.TestCase):
def test7(self):
lunar = Lunar.fromYmd(2012, 9, 1)
self.assertEqual("2012-09-07 13:29:00", lunar.getJieQiTable()["白露"].toYmdHms())
def test8(self):
lunar = Lunar.fromYmd(2050, 12, 1)
self.assertEqual("2050-12-07 06:41:00", lunar.getJieQiTable()["大雪"].toYmdHms())
```
#### File: lunar-python/test/LunarTest.py
```python
import unittest
from lunar_python import Lunar, Solar
class LunarTest(unittest.TestCase):
def test(self):
date = Lunar.fromYmdHms(2019, 3, 27, 0, 0, 0)
self.assertEqual("二〇一九年三月廿七", date.toString())
self.assertEqual("二〇一九年三月廿七 己亥(猪)年 戊辰(龙)月 戊戌(狗)日 子(鼠)时 纳音[平地木 大林木 平地木 桑柘木] 星期三 (七殿泰山王诞) 西方白虎 星宿[参水猿](吉) 彭祖百忌[戊不受田田主不祥 戌不吃犬作怪上床] 喜神方位[巽](东南) 阳贵神方位[艮](东北) 阴贵神方位[坤](西南) 福神方位[坎](正北) 财神方位[坎](正北) 冲[(壬辰)龙] 煞[北]", date.toFullString())
self.assertEqual("2019-05-01", date.getSolar().toString())
self.assertEqual("2019-05-01 00:00:00 星期三 (劳动节) 金牛座", date.getSolar().toFullString())
def test1(self):
solar = Solar.fromYmdHms(100, 1, 1, 12, 0, 0)
self.assertEqual("九九年腊月初二", solar.getLunar().toString())
def test2(self):
solar = Solar.fromYmdHms(3218, 12, 31, 12, 0, 0)
self.assertEqual("三二一八年冬月廿二", solar.getLunar().toString())
def test3(self):
lunar = Lunar.fromYmdHms(5, 1, 6, 12, 0, 0)
self.assertEqual("0005-02-03", lunar.getSolar().toString())
def test4(self):
lunar = Lunar.fromYmdHms(9998, 12, 2, 12, 0, 0)
self.assertEqual("9999-01-11", lunar.getSolar().toString())
def test5(self):
lunar = Lunar.fromYmdHms(1905, 1, 1, 12, 0, 0)
self.assertEqual("1905-02-04", lunar.getSolar().toString())
def test6(self):
lunar = Lunar.fromYmdHms(2038, 12, 29, 12, 0, 0)
self.assertEqual("2039-01-23", lunar.getSolar().toString())
def test7(self):
lunar = Lunar.fromYmdHms(2020, -4, 2, 13, 0, 0)
self.assertEqual("二〇二〇年闰四月初二", lunar.toString())
self.assertEqual("2020-05-24", lunar.getSolar().toString())
def test8(self):
lunar = Lunar.fromYmdHms(2020, 12, 10, 13, 0, 0)
self.assertEqual("二〇二〇年腊月初十", lunar.toString())
self.assertEqual("2021-01-22", lunar.getSolar().toString())
def test9(self):
lunar = Lunar.fromYmdHms(1500, 1, 1, 12, 0, 0)
self.assertEqual("1500-01-31", lunar.getSolar().toString())
def test10(self):
lunar = Lunar.fromYmdHms(1500, 12, 29, 12, 0, 0)
self.assertEqual("1501-01-18", lunar.getSolar().toString())
def test11(self):
solar = Solar.fromYmdHms(1500, 1, 1, 12, 0, 0)
self.assertEqual("一四九九年腊月初一", solar.getLunar().toString())
def test12(self):
solar = Solar.fromYmdHms(1500, 12, 31, 12, 0, 0)
self.assertEqual("一五〇〇年腊月十一", solar.getLunar().toString())
def test13(self):
solar = Solar.fromYmdHms(1582, 10, 4, 12, 0, 0)
self.assertEqual("一五八二年九月十八", solar.getLunar().toString())
def test14(self):
solar = Solar.fromYmdHms(1582, 10, 15, 12, 0, 0)
self.assertEqual("一五八二年九月十九", solar.getLunar().toString())
def test15(self):
lunar = Lunar.fromYmdHms(1582, 9, 18, 12, 0, 0)
self.assertEqual("1582-10-04", lunar.getSolar().toString())
def test16(self):
lunar = Lunar.fromYmdHms(1582, 9, 19, 12, 0, 0)
self.assertEqual("1582-10-15", lunar.getSolar().toString())
def test17(self):
lunar = Lunar.fromYmdHms(2019, 12, 12, 11, 22, 0)
self.assertEqual("2020-01-06", lunar.getSolar().toString())
def test18(self):
lunar = Lunar.fromYmd(2021, 12, 29)
self.assertEqual("除夕", lunar.getFestivals()[0])
def test19(self):
lunar = Lunar.fromYmd(2020, 12, 30)
self.assertEqual("除夕", lunar.getFestivals()[0])
def test20(self):
lunar = Lunar.fromYmd(2020, 12, 29)
self.assertEqual(0, len(lunar.getFestivals()))
def test21(self):
solar = Solar.fromYmd(2022, 1, 31)
lunar = solar.getLunar()
self.assertEqual("除夕", lunar.getFestivals()[0])
def test22(self):
lunar = Lunar.fromYmd(2033, -11, 1)
self.assertEqual('2033-12-22', lunar.getSolar().toYmd())
def test23(self):
lunar = Lunar.fromYmd(2022, 1, 1)
self.assertEqual('五黄土玉衡', lunar.getYearNineStar().toString())
def test24(self):
lunar = Lunar.fromYmd(2033, 1, 1)
self.assertEqual('三碧木天玑', lunar.getYearNineStar().toString())
def test25(self):
solar = Solar.fromYmdHms(2021, 6, 7, 21, 18, 0)
self.assertEqual('二〇二一年四月廿七', solar.getLunar().toString())
def test26(self):
lunar = Lunar.fromYmdHms(2021, 6, 7, 21, 18, 0)
self.assertEqual('2021-07-16', lunar.getSolar().toString())
def testNext(self):
solar = Solar.fromYmdHms(2020, 1, 10, 12, 0, 0)
lunar = solar.getLunar()
for i in range(-1, 1):
self.assertEqual(solar.next(i).getLunar().toFullString(), lunar.next(i).toFullString())
def test27(self):
solar = Solar.fromYmd(1989, 4, 28)
self.assertEqual(23, solar.getLunar().getDay())
def test28(self):
solar = Solar.fromYmd(1990, 10, 8)
self.assertEqual("乙酉", solar.getLunar().getMonthInGanZhiExact())
def test29(self):
solar = Solar.fromYmd(1990, 10, 9)
self.assertEqual("丙戌", solar.getLunar().getMonthInGanZhiExact())
def test30(self):
solar = Solar.fromYmd(1990, 10, 8)
self.assertEqual("丙戌", solar.getLunar().getMonthInGanZhi())
def test31(self):
solar = Solar.fromYmdHms(1987, 4, 17, 9, 0, 0)
self.assertEqual("一九八七年三月二十", solar.getLunar().toString())
def test32(self):
lunar = Lunar.fromYmd(2034, 1, 1)
self.assertEqual("2034-02-19", lunar.getSolar().toYmd())
def test33(self):
lunar = Lunar.fromYmd(2033, 12, 1)
self.assertEqual("2034-01-20", lunar.getSolar().toYmd())
def test34(self):
lunar = Lunar.fromYmd(37, -12, 1)
self.assertEqual("闰腊", lunar.getMonthInChinese())
def test36(self):
solar = Solar.fromYmd(5553, 1, 22)
self.assertEqual("五五五二年闰腊月初二", solar.getLunar().toString())
def test37(self):
solar = Solar.fromYmd(7013, 12, 24)
self.assertEqual("七〇一三年闰冬月初四", solar.getLunar().toString())
def test38(self):
lunar = Lunar.fromYmd(7013, -11, 4)
self.assertEqual("7013-12-24", lunar.getSolar().toString())
def test39(self):
solar = Solar.fromYmd(1987, 4, 12)
lunar = solar.getLunar()
self.assertEqual("一九八七年三月十五", lunar.toString())
def test40(self):
solar = Solar.fromYmd(1987, 4, 13)
lunar = solar.getLunar()
self.assertEqual("一九八七年三月十六", lunar.toString())
def test41(self):
solar = Solar.fromYmd(4, 2, 10)
lunar = solar.getLunar()
self.assertEqual("鼠", lunar.getYearShengXiao())
def test42(self):
solar = Solar.fromYmd(4, 2, 9)
lunar = solar.getLunar()
self.assertEqual("猪", lunar.getYearShengXiao())
def test43(self):
solar = Solar.fromYmd(2017, 2, 15)
lunar = solar.getLunar()
self.assertEqual("子命互禄 辛命进禄", lunar.getDayLu())
def test44(self):
solar = Solar.fromYmd(2017, 2, 16)
lunar = solar.getLunar()
self.assertEqual("寅命互禄", lunar.getDayLu())
```
#### File: lunar-python/test/WuHouTest.py
```python
import unittest
from lunar_python import Solar
class WuHouTest(unittest.TestCase):
def test1(self):
solar = Solar.fromYmd(2020, 4, 23)
lunar = solar.getLunar()
self.assertEqual("萍始生", lunar.getWuHou())
def test2(self):
solar = Solar.fromYmd(2021, 1, 15)
lunar = solar.getLunar()
self.assertEqual("雉始雊", lunar.getWuHou())
def test3(self):
solar = Solar.fromYmd(2017, 1, 5)
lunar = solar.getLunar()
self.assertEqual("雁北乡", lunar.getWuHou())
def test4(self):
solar = Solar.fromYmd(2020, 4, 10)
lunar = solar.getLunar()
self.assertEqual("田鼠化为鴽", lunar.getWuHou())
def test5(self):
solar = Solar.fromYmd(2020, 6, 11)
lunar = solar.getLunar()
self.assertEqual("鵙始鸣", lunar.getWuHou())
def test6(self):
solar = Solar.fromYmd(2020, 6, 1)
lunar = solar.getLunar()
self.assertEqual("麦秋至", lunar.getWuHou())
def test7(self):
solar = Solar.fromYmd(2020, 12, 8)
lunar = solar.getLunar()
self.assertEqual("鹖鴠不鸣", lunar.getWuHou())
def test8(self):
solar = Solar.fromYmd(2020, 12, 11)
lunar = solar.getLunar()
self.assertEqual("鹖鴠不鸣", lunar.getWuHou())
def test9(self):
solar = Solar.fromYmd(1982,12,22)
lunar = solar.getLunar()
self.assertEqual("蚯蚓结", lunar.getWuHou())
``` |
{
"source": "6tennis/dl",
"score": 2
} |
#### File: dl/adv/aldred.py
```python
from core.advbase import *
from slot.a import *
def module():
return Aldred
class Aldred(Adv):
comment = 'maintain dragondrive'
conf = {}
conf['slots.a'] = Heralds_of_Hinomoto()+Dear_Diary()
conf['slots.poison.a'] = Heralds_of_Hinomoto()+The_Plaguebringer()
conf['acl'] = """
`s3, not self.s3_buff
`s2
`dragon, not self.dragondrive_buff.get()
`s1, x=5
"""
coab = ['Wand','Berserker','Curran']
def prerun(self):
self.dragondrive_buff = Selfbuff('dragondrive', 0.30, -1, 's', 'passive')
self.dragonform.set_dragondrive(self.dragondrive_buff)
self.a3_str = Modifier('a3', 'att', 'passive', 0.20)
self.s2_str = Selfbuff('s2', 0.20, -1, 'att', 'buff') # doesnt proc doublebuff reeeee
self.s2_tick = Timer(self.s2_degen, 2.9, 1)
self.s2_stuff_timer = Timer(self.s2_stuff_off)
self.s2_on = False
self.hp = 100
self.conf.x1.utp = 120
self.conf.x2.utp = 120
self.conf.x3.utp = 120
self.conf.x4.utp = 180
self.conf.x5.utp = 180
def d_slots(self):
if self.duration <= 60:
self.conf['slots.a'] = The_Chocolatiers()+TL()
self.conf['slots.poison.a'] = The_Chocolatiers()+The_Plaguebringer()
def x_proc(self, e):
if self.dragondrive_buff.get():
try:
utp = self.conf[e.name].utp
self.dragonform.charge_gauge(utp, utp=True)
except:
pass
def s1_proc(self, e):
if self.dragondrive_buff.get():
with CrisisModifier('s1', 1.00, self.hp):
self.dmg_make('s1', 2.42*4)
self.dragonform.add_drive_gauge_time(self.s1.ac.getstartup()+self.s1.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(-750, utp=True)
self.s1.charge(self.sp_convert(0.50, self.conf.s1.sp))
else:
self.dmg_make('s1', 2.42*4)
# 242 * 4 mod, 4 hits, 2.4s
# 242 * 4 w/ 2x crisis
# -750 dd points
# +50% skill gauge
# 2.1666667461395264
def s2_proc(self, e):
if self.dragondrive_buff.get():
self.s2_stuff_on()
self.s2_stuff_timer.on(40 * self.mod('bt'))
self.dragonform.add_drive_gauge_time(self.s2.ac.getstartup()+self.s2.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(3000, utp=True)
else:
self.dragonform.charge_gauge(1200, utp=True)
# 1 hp loss = 1 gauge gain, will assume 3000 max hp here
if self.hp > 30:
self.dragonform.charge_gauge(3000 * (self.hp-30)/100, utp=True)
self.hp = 30
# +1200 dd points
# 1.3333333730697632s
def s2_stuff_on(self):
self.a3_str.on()
self.s2_str.on()
self.s2_tick.on()
def s2_stuff_off(self, t):
self.a3_str.off()
self.s2_str.off()
self.s2_tick.off()
def s2_degen(self, t):
self.hp = max(self.hp-6, 0)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/celliera.py
```python
from core.advbase import *
from slot.a import *
def module():
return Celliera
class Celliera(Adv):
a3 = ('a',0.08,'hp70')
conf = {}
conf['slots.a'] = RR()+Breakfast_at_Valerios()
conf['slots.frostbite.a'] = Primal_Crisis()+His_Clever_Brother()
conf['acl'] = """
`dragon.act('c3 s end'), not self.afflics.frostbite.get()
`s1
`s2, seq=5
`s3
"""
coab = ['Dagger', 'Xander', 'Pipple']
def d_coabs(self):
if 'sim_afflict' in self.conf and self.conf.sim_afflict.efficiency > 0:
self.coab = ['Xander','Dagger','Summer_Estelle']
def prerun(self):
self.s2buff = Selfbuff("s2_shapshifts1",1, 10,'ss','ss')
self.s2str = Selfbuff("s2_str",0.25,10)
def s1_proc(self, e):
if self.s2buff.get():
self.s2buff.buff_end_timer.timing += 2.5
self.s2str.buff_end_timer.timing += 2.5
def s2_proc(self, e):
self.s2buff.on()
self.s2str.on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/delphi.py
```python
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Delphi
class Delphi(Adv):
a1 = ('a',-0.55)
conf = {}
conf['slots.a'] = Mega_Friends()+The_Fires_of_Hate()
conf['slots.d'] = Fatalis()
conf['acl'] = """
`s3, not self.s3_buff
`s1
`s2, self.s1fscharge == 0 and (s1.charged <= ((s1.sp/13)*9))
`fs, x=2
"""
coab = ['Blade','Gala_Alex','Heinwald']
conf['afflict_res.poison'] = 0
def prerun(self):
self.flurry_poison = 70
self.s1defdown = self.condition('s1 defdown for 10s')
self.s1.autocharge_init(80000).on()
self.s2.autocharge_init(50000).on()
self.s1fscharge = 0
def s1_proc(self, e):
if self.s1defdown:
Debuff('s1defdown',0.20,10,1).on()
self.s1fscharge = 1
def s2_proc(self, e):
self.afflics.poison('s2',120+self.flurry_poison*(self.hits>=15),3.00,27)
def fs_proc(self, e):
if self.s1fscharge > 0:
self.s1fscharge -= 1
self.dmg_make("o_fs_boost",0.21*3)
self.afflics.poison('fs',120+self.flurry_poison*(self.hits>=15),3.00,27)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/fritz.py
```python
from core.advbase import *
def module():
return Fritz
class Fritz(Adv):
conf = {}
conf['acl'] = """
`dragon
`s1, x=5 and cancel or fsc
`s2
`s3
`fs, x=5
"""
coab = ['Blade','Halloween_Elisanne','Peony']
def prerun(self):
self.stance = 0
self.s2fscharge = 0
def s2_proc(self, e):
self.s2fscharge = 3
def fs_proc(self, e):
if self.s2fscharge > 0:
self.s2fscharge -= 1
self.dmg_make("o_fs_boost",0.57*3+0.29)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/g_sarisse.py.rollfs.py
```python
from slot.a import *
from slot.d import *
import adv.g_sarisse
def module():
return Gala_Sarisse
class Gala_Sarisse(adv.g_sarisse.Gala_Sarisse):
comment = 'roll fs'
conf = {}
conf['slots.d'] = Dreadking_Rathalos()
conf['acl'] = """
`s3, fsc and not self.s3_buff
`s1, fsc
`s2, fsc
`dodge, fsc
`fs
"""
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(Gala_Sarisse, *sys.argv)
```
#### File: dl/adv/heinwald.py
```python
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Heinwald
class Heinwald(Adv):
a1 = ('s',0.4,'hp70')
a3 = [('prep',1.00), ('scharge_all', 0.05)]
conf = {}
conf['slots.d'] = Fatalis()
conf['acl'] = """
`s3, not self.s3_buff
`s2, s=3 or cancel
`s1, cancel
"""
coab = ['Blade','Wand','Dagger']
def init(self):
if self.condition("buff all teammates"):
self.s2_proc = self.c_s2_proc
def prerun(self):
self.s2ssbuff = Selfbuff("s2_shapshifts1",1, 10,'ss','ss')
def c_s2_proc(self, e):
self.s2ssbuff.on()
Teambuff('s2team',0.15,10).on()
Selfbuff('s2self',0.10,10).on()
def s2_proc(self, e):
self.s2ssbuff.on()
Selfbuff('s2',0.25,10).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/linyou.py
```python
from core.advbase import *
from slot.a import *
from module.x_alt import Fs_alt
def module():
return Lin_You
class Lin_You(Adv):
comment = '2in1 '
a1 = [('cc',0.10,'hp70'), ('cc',0.20,'hit25'), ('cc',0.20,'hit50')]
a3 = ('sp',0.08)
conf = {}
conf['slots.a'] = The_Wyrmclan_Duo()+Primal_Crisis()
conf['acl'] = """
`dragon.act("c3 s end")
`s3, not self.s3_buff
`s2, s1.check()
`s1
`fs, self.hits <= 44 and self.fs_alt.uses > 0 and x=4
"""
coab = ['Blade','Dragonyule_Xainfried','Axe2']
def prerun(self):
conf_fs_alt = {'fs.dmg': 2.59, 'fs.hit': 6}
self.fs_alt = Fs_alt(self, Conf(conf_fs_alt))
self.s2_buff = Spdbuff('s2_spd',0.20, 15)
def s1_proc(self, e):
if self.s2_buff.get():
self.dmg_make('s1_powerup', 1.86*3)
self.s2_buff.buff_end_timer.add(self.s1.ac.getstartup()+self.s1.ac.getrecovery())
self.hits += 3
self.afflics.sleep('s1', 150)
self.fs_alt.on(3)
def s2_proc(self, e):
self.s2_buff.on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/luca.py
```python
from core.advbase import *
from slot.a import *
def module():
return Luca
class Luca(Adv):
a1 = ('a',0.13,'hp100')
conf = {}
conf['slots.a'] = Resounding_Rendition()+Spirit_of_the_Season()
conf['acl'] = """
`dragon
`s1
`s2, fsc
`s3
`fs, seq=4
"""
coab = ['Blade','Halloween_Elisanne','Peony']
conf['afflict_res.paralysis'] = 0
def s1_proc(self, e):
self.afflics.paralysis('s1',110,0.883)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/luther.py
```python
from core.advbase import *
from slot.d import *
def module():
return Luther
class Luther(Adv):
a1 = ('cc',0.10,'hit15')
conf = {}
conf ['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2, seq=5 and cancel
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
coab = ['Blade', 'Xander', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/rex.py
```python
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Rex
class Rex(Adv):
conf = {}
conf['slots.a'] = Summer_Paladyns()+Primal_Crisis()
conf['slots.frostbite.a'] = KFM()+His_Clever_Brother()
conf['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2,seq=4
`s3,seq=4
`fs,seq=5
"""
coab = ['Blade', 'Xander', 'Dagger']
def d_slots(self):
if self.duration <= 120:
self.conf['slots.a'] = Resounding_Rendition() + Breakfast_at_Valerios()
if self.duration <= 120 and self.duration > 60:
self.conf['slots.frostbite.a'] = Primal_Crisis()+His_Clever_Brother()
def d_coabs(self):
if 'sim_afflict' in self.conf and self.conf.sim_afflict.efficiency > 0 and (self.duration > 120 or self.duration <= 60):
self.coab = ['Blade', 'Xander','Pipple']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/veronica.py.1hp.py
```python
import adv.veronica
import slot
from slot import *
def module():
return Veronica
class Veronica(adv.veronica.Veronica):
conf = adv.veronica.Veronica.conf.copy()
conf['acl'] = """
`dragon.act("c3 s end")
`s3, not self.s3_buff
`s1
"""
def prerun(self):
super().prerun()
self.hp = 0
self.a1_buff.on()
if __name__ == '__main__':
import sys
from core.simulate import test_with_argv
test_with_argv(Veronica, *sys.argv)
```
#### File: dl/adv/yachiyo.py
```python
from core.advbase import *
from slot.a import *
from slot.w import *
class w530(WeaponBase):
ele = ['water','light']
wt = 'blade'
att = 468
def module():
return Yachiyo
class Yachiyo(Adv):
a1 = ('affself_paralysis', 0.15, 10, 5)
a3 = ('k_paralysis', 0.2)
conf = {}
conf['slots.a'] = MF()+SotS()
conf['acl'] = """
`dragon
`fs, self.fsa_charge and seq=5
`s2
`s1
`s3, fsc
"""
coab = ['Malora','Dagger','Peony']
conf['afflict_res.paralysis'] = 0
def d_coabs(self):
if 'sim_afflict' in self.conf and self.conf.sim_afflict.efficiency > 0:
self.coab = ['Sharena','Dagger','Peony']
def prerun(self):
self.fsa_charge = 0
def s1_proc(self, e):
self.dmg_make('s1',4.32)
self.afflics.paralysis('s1',100,0.66)
self.dmg_make('s1',4.32)
def s2_proc(self, e):
# self.fso_dmg = self.conf.fs.dmg
self.fso_sp = self.conf.fs.sp
# self.conf.fs.dmg = 7.82
self.conf.fs.sp = 200
self.fsa_charge = 1
def fs_proc(self, e):
if self.fsa_charge:
# self.conf.fs.dmg = self.fso_dmg
self.dmg_make("o_fs_boost",6.90)
self.conf.fs.sp = self.fso_sp
self.fsa_charge = 0
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/adv/yue.py
```python
from core.advbase import *
from slot.a import *
def module():
return Yue
class Yue(Adv):
conf = {}
conf['slots.a'] = Resounding_Rendition()+Breakfast_at_Valerios()
conf['slots.burn.a'] = Kung_Fu_Masters()+Elegant_Escort()
conf['acl'] = """
`dragon, s=2
`s3, not self.s3_buff
`s1
`s2, x=4
`fs, x=5
"""
coab = ['Blade', 'Marth', 'Halloween_Mym']
def d_coabs(self):
if self.duration <= 60:
self.coab = ['Blade','Marth','Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
```
#### File: dl/core/condition.py
```python
from typing import Union
class Condition(dict):
def __init__(self, cond: Union[bool, dict]):
self.global_cond = True
self.base_cond = {}
self.min_hp_cond = (None, 100)
super().__init__({})
if isinstance(cond, dict):
self.base_cond = cond
elif isinstance(cond, bool):
self.global_cond = cond
def cond_str(self):
return ' & '.join([k for k, v in self.items() if v])
def cond_set(self, key, cond=True):
if key in self.base_cond:
self[key] = self.base_cond[key]
elif key not in self:
self[key] = cond
return self[key] and self.global_cond
def hp_cond_set(self, hp, cond=True):
key = f'hp={hp}%' if hp > 0 else 'hp=1'
min_key, min_hp = self.min_hp_cond
if hp < min_hp:
if min_key is not None:
del self[min_key]
self.min_hp_cond = (key, hp)
return self.cond_set(key, cond)
else:
return self[min_key] and self.global_cond
def exist(self):
return any(self.values()) and self.global_cond
def __call__(self, key, cond=True):
return self.cond_set(key, cond)
``` |
{
"source": "6tudent/pwntools",
"score": 3
} |
#### File: pwnlib/adb/adb.py
```python
from __future__ import absolute_import
import functools
import glob
import logging
import os
import platform
import re
import shutil
import stat
import tempfile
import time
import dateutil.parser
from pwnlib import atexit
from pwnlib import tubes
from pwnlib.context import LocalContext
from pwnlib.context import context
from pwnlib.device import Device
from pwnlib.log import getLogger
from pwnlib.protocols.adb import AdbClient
from pwnlib.util import misc
log = getLogger(__name__)
def adb(argv, *a, **kw):
r"""Returns the output of an ADB subcommand.
>>> adb.adb(['get-serialno'])
'emulator-5554\n'
"""
if isinstance(argv, (str, unicode)):
argv = [argv]
log.debug("$ " + ' '.join(context.adb + argv))
# All "adb shell" incantations should go through adb.process()
if argv[0] == 'shell':
return process(argv[1:], *a, **kw).recvall()
return tubes.process.process(context.adb + argv, *a, **kw).recvall()
@context.quietfunc
def devices(serial=None):
"""Returns a list of ``Device`` objects corresponding to the connected devices."""
with AdbClient() as c:
lines = c.devices(long=True)
result = []
for line in lines.splitlines():
# Skip the first 'List of devices attached' line, and the final empty line.
if 'List of devices' in line or not line.strip():
continue
device = AdbDevice.from_adb_output(line)
if device.serial == serial:
return device
result.append(device)
return tuple(result)
def current_device(any=False):
"""Returns an ``AdbDevice`` instance for the currently-selected device
(via ``context.device``).
Example:
>>> device = adb.current_device(any=True)
>>> device
AdbDevice(serial='emulator-5554', type='device', port='emulator', product='sdk_phone_armv7', model='sdk phone armv7', device='generic')
>>> device.port
'emulator'
"""
all_devices = devices()
for device in all_devices:
if any or device == context.device:
return device
def with_device(f):
@functools.wraps(f)
def wrapper(*a,**kw):
if not context.device:
device = current_device(any=True)
if device:
log.warn_once('Automatically selecting device %s' % device)
context.device = device
if not context.device:
log.error('No devices connected, cannot invoke %s.%s' % (f.__module__, f.__name__))
return f(*a,**kw)
return wrapper
@with_device
def root():
"""Restarts adbd as root.
>>> adb.root()
"""
log.info("Enabling root on %s" % context.device)
with context.quiet:
with AdbClient() as c:
reply = c.root()
if 'already running as root' in reply:
return
elif not reply or 'restarting adbd as root' in reply:
with context.quiet:
wait_for_device()
else:
log.error("Could not run as root:\n%s" % reply)
def no_emulator(f):
@functools.wraps(f)
def wrapper(*a,**kw):
c = current_device()
if c and c.port == 'emulator':
log.error("Cannot invoke %s.%s on an emulator." % (f.__module__, f.__name__))
return f(*a,**kw)
return wrapper
@no_emulator
@with_device
def reboot(wait=True):
"""Reboots the device.
"""
log.info('Rebooting device %s' % context.device)
with AdbClient() as c:
c.reboot()
if wait:
wait_for_device()
@no_emulator
@with_device
def reboot_bootloader():
"""Reboots the device to the bootloader.
"""
log.info('Rebooting %s to bootloader' % context.device)
with AdbClient() as c:
c.reboot_bootloader()
@with_device
def uptime():
"""uptime() -> float
Returns:
Uptime of the device, in seconds
"""
up, idle = map(float, read('/proc/uptime').split())
return up
@with_device
def boot_time():
"""boot_time() -> int
Returns:
Boot time of the device, in Unix time, rounded to the
nearest second.
"""
for line in read('/proc/stat').splitlines():
name, value = line.split(None, 1)
if name == 'btime':
return int(value)
class AdbDevice(Device):
"""Encapsulates information about a connected device.
Example:
>>> device = adb.wait_for_device()
>>> device.arch
'arm'
>>> device.bits
32
>>> device.os
'android'
>>> device.product
'sdk_phone_armv7'
>>> device.serial
'emulator-5554'
"""
def __init__(self, serial, type, port=None, product='unknown', model='unknown', device='unknown', features=None, **kw):
self.serial = serial
self.type = type
self.port = port
self.product = product
self.model = model.replace('_', ' ')
self.device = device
self.os = 'android'
if product == 'unknown':
return
# Deferred fields
self._initialized = False
self._arch = None
self._bits = None
self._endian = None
self._avd = None
@property
def arch(self):
self.__do_deferred_initialization()
return self._arch
@property
def avd(self):
self.__do_deferred_initialization()
return self._avd
@property
def bits(self):
self.__do_deferred_initialization()
return self._bits
@property
def endian(self):
self.__do_deferred_initialization()
return self._endian
def __do_deferred_initialization(self):
if self._initialized:
return
with context.local(device=self.serial):
abi = str(properties.ro.product.cpu.abi)
context.clear()
context.arch = str(abi)
self._arch = context.arch
self._bits = context.bits
self._endian = context.endian
if self.port == 'emulator':
emulator, port = self.serial.split('-')
port = int(port)
try:
with remote('localhost', port, level='error') as r:
r.recvuntil('OK')
r.recvline() # Rest of the line
r.sendline('avd name')
self.avd = r.recvline().strip()
except:
pass
self._initialized = True
def __str__(self):
return self.serial
def __repr__(self):
fields = ['serial', 'type', 'port', 'product', 'model', 'device']
return '%s(%s)' % (self.__class__.__name__,
', '.join(('%s=%r' % (field, getattr(self, field)) for field in fields)))
@staticmethod
def from_adb_output(line):
fields = line.split()
"""
Example output:
ZX1G22LM7G device usb:336789504X product:shamu model:Nexus_6 device:shamu features:cmd,shell_v2
84B5T15A29020449 device usb:336855040X product:angler model:Nexus_6P device:angler
0062741b0e54b353 unauthorized usb:337641472X
emulator-5554 offline
emulator-5554 device product:sdk_phone_armv7 model:sdk_phone_armv7 device:generic
"""
fields = line.split()
serial = fields[0]
type = fields[1]
kwargs = {}
if serial.startswith('emulator-'):
kwargs['port'] = 'emulator'
for field in fields[2:]:
k,v = field.split(':')
kwargs[k] = v
return AdbDevice(serial, type, **kwargs)
def __wrapped(self, function):
"""Wrapps a callable in a scope which selects the current device."""
@functools.wraps(function)
def wrapper(*a, **kw):
with context.local(device=self):
return function(*a,**kw)
return wrapper
def __getattr__(self, name):
"""Provides scoped access to ``adb`` module propertise, in the context
of this device.
>>> property = 'ro.build.fingerprint'
>>> device = adb.wait_for_device()
>>> adb.getprop(property) == device.getprop(property)
True
"""
with context.local(device=self):
g = globals()
if name not in g:
raise AttributeError('%r object has no attribute %r' % (type(self).__name__,name))
value = g[name]
if not hasattr(value, '__call__'):
return value
return self.__wrapped(value)
@LocalContext
def wait_for_device(kick=False):
"""Waits for a device to be connected.
By default, waits for the currently-selected device (via ``context.device``).
To wait for a specific device, set ``context.device``.
To wait for *any* device, clear ``context.device``.
Return:
An ``AdbDevice`` instance for the device.
Examples:
>>> device = adb.wait_for_device()
"""
with log.waitfor("Waiting for device to come online") as w:
with AdbClient() as c:
if kick:
try:
c.reconnect()
except Exception:
pass
serial = ''
if context.device:
serial = str(context.device)
with AdbClient() as c:
c.wait_for_device(serial)
for device in devices():
if context.device == device:
return device
if not serial:
break
else:
log.error("Could not find any devices")
with context.local(device=device):
# There may be multiple devices, so context.device is
# insufficient. Pick the first device reported.
w.success('%s (%s %s %s)' % (device,
product(),
build(),
_build_date()))
return context.device
@with_device
def disable_verity():
"""Disables dm-verity on the device."""
with log.waitfor("Disabling dm-verity on %s" % context.device) as w:
root()
with AdbClient() as c:
reply = c.disable_verity()
if 'Verity already disabled' in reply:
return
elif 'Now reboot your device' in reply:
reboot(wait=True)
elif '0006closed' in reply:
return # Emulator doesnt support Verity?
else:
log.error("Could not disable verity:\n%s" % reply)
@with_device
def remount():
"""Remounts the filesystem as writable."""
with log.waitfor("Remounting filesystem on %s" % context.device) as w:
disable_verity()
root()
with AdbClient() as c:
reply = c.remount()
if 'remount succeeded' not in reply:
log.error("Could not remount filesystem:\n%s" % reply)
@with_device
def unroot():
"""Restarts adbd as AID_SHELL."""
log.info("Unrooting %s" % context.device)
with context.quiet:
with AdbClient() as c:
reply = c.unroot()
if '0006closed' == reply:
return # Emulator doesnt care
if 'restarting adbd as non root' not in reply:
log.error("Could not unroot:\n%s" % reply)
def _create_adb_push_pull_callback(w):
def callback(filename, data, size, chunk, chunk_size):
have = len(data) + len(chunk)
if size == 0:
size = '???'
percent = '???'
else:
percent = int(100 * have // size)
size = misc.size(size)
have = misc.size(have)
w.status('%s/%s (%s%%)' % (have, size, percent))
return True
return callback
@with_device
def pull(remote_path, local_path=None):
"""Download a file from the device.
Arguments:
remote_path(str): Path or directory of the file on the device.
local_path(str): Path to save the file to.
Uses the file's name by default.
Return:
The contents of the file.
Example:
>>> _=adb.pull('/proc/version', './proc-version')
>>> print read('./proc-version') # doctest: +ELLIPSIS
Linux version ...
"""
if local_path is None:
local_path = os.path.basename(remote_path)
msg = "Pulling %r to %r" % (remote_path, local_path)
if log.isEnabledFor(logging.DEBUG):
msg += ' (%s)' % context.device
with log.waitfor(msg) as w:
data = read(remote_path, callback=_create_adb_push_pull_callback(w))
misc.write(local_path, data)
return data
@with_device
def push(local_path, remote_path):
"""Upload a file to the device.
Arguments:
local_path(str): Path to the local file to push.
remote_path(str): Path or directory to store the file on the device.
Returns:
Remote path of the file.
Example:
>>> write('./filename', 'contents')
>>> adb.push('./filename', '/data/local/tmp')
'/data/local/tmp/filename'
>>> adb.read('/data/local/tmp/filename')
'contents'
>>> adb.push('./filename', '/does/not/exist')
Traceback (most recent call last):
...
PwnlibException: Could not stat '/does/not/exist'
"""
msg = "Pushing %r to %r" % (local_path, remote_path)
remote_filename = os.path.basename(local_path)
if log.isEnabledFor(logging.DEBUG):
msg += ' (%s)' % context.device
with log.waitfor(msg) as w:
with AdbClient() as c:
# We need to discover whether remote_path is a directory or not.
# If we cannot stat the full path, assume it's a path-plus-filename,
# where the filename does not exist.
stat_ = c.stat(remote_path)
if not stat_:
remote_filename = os.path.basename(remote_path)
remote_path = os.path.dirname(remote_path)
stat_ = c.stat(remote_path)
# If we can't find the exact path, or its parent directory, bail!
if not stat_:
log.error('Could not stat %r' % remote_path)
# If we found the parent directory, append the filename
mode = stat_['mode']
if stat.S_ISDIR(mode):
remote_path = os.path.join(remote_path, remote_filename)
c.write(remote_path,
misc.read(local_path),
callback=_create_adb_push_pull_callback(w))
return remote_path
@context.quietfunc
@with_device
def read(path, target=None, callback=None):
"""Download a file from the device, and extract its contents.
Arguments:
path(str): Path to the file on the device.
target(str): Optional, location to store the file.
Uses a temporary file by default.
callback(callable): See the documentation for
``adb.protocol.AdbClient.read``.
Examples:
>>> print adb.read('/proc/version') # doctest: +ELLIPSIS
Linux version ...
>>> adb.read('/does/not/exist')
Traceback (most recent call last):
...
PwnlibException: Could not stat '/does/not/exist'
"""
with AdbClient() as c:
stat = c.stat(path)
if not stat:
log.error('Could not stat %r' % path)
data = c.read(path, stat['size'], callback=callback)
if target:
misc.write(target, data)
return data
@context.quietfunc
@with_device
def write(path, data=''):
"""Create a file on the device with the provided contents.
Arguments:
path(str): Path to the file on the device
data(str): Contents to store in the file
Examples:
>>> adb.write('/dev/null', 'data')
>>> adb.write('/data/local/tmp/')
"""
with tempfile.NamedTemporaryFile() as temp:
misc.write(temp.name, data)
push(temp.name, path)
@context.quietfunc
@with_device
def mkdir(path):
"""Create a directory on the target device.
Note:
Silently succeeds if the directory already exists.
Arguments:
path(str): Directory to create.
Examples:
>>> adb.mkdir('/')
>>> path = '/data/local/tmp/mkdir_test'
>>> adb.exists(path)
False
>>> adb.mkdir(path)
>>> adb.exists(path)
True
>>> adb.mkdir('/init')
Traceback (most recent call last):
...
PwnlibException: mkdir failed for /init, File exists
"""
if not path.startswith('/'):
log.error("Must provide an absolute path: %r" % path)
with AdbClient() as c:
st = c.stat(path)
# Don't re-create existing directories
if st and stat.S_ISDIR(st['mode']):
return
result = process(['mkdir', path]).recvall()
# Any output at all is an error
if result:
log.error(result)
@context.quietfunc
@with_device
def makedirs(path):
"""Create a directory and all parent directories on the target device.
Note:
Silently succeeds if the directory already exists.
Examples:
>>> adb.makedirs('/data/local/tmp/this/is/a/directory/heirarchy')
>>> adb.listdir('/data/local/tmp/this/is/a/directory')
['heirarchy']
"""
if path != '/':
makedirs(os.path.dirname(path))
mkdir(path)
@context.quietfunc
@with_device
def exists(path):
"""Return :const:`True` if ``path`` exists on the target device.
Examples:
>>> adb.exists('/')
True
>>> adb.exists('/init')
True
>>> adb.exists('/does/not/exist')
False
"""
with AdbClient() as c:
return bool(c.stat(path))
@context.quietfunc
@with_device
def isdir(path):
"""Return :const:`True` if ``path`` is a on the target device.
Examples:
>>> adb.isdir('/')
True
>>> adb.isdir('/init')
False
>>> adb.isdir('/does/not/exist')
False
"""
with AdbClient() as c:
st = c.stat(path)
return bool(st and stat.S_ISDIR(st['mode']))
@context.quietfunc
@with_device
def unlink(path, recursive=False):
"""Unlinks a file or directory on the target device.
Examples:
>>> adb.unlink("/does/not/exist")
Traceback (most recent call last):
...
PwnlibException: Could not unlink '/does/not/exist': Does not exist
>>> filename = '/data/local/tmp/unlink-test'
>>> adb.write(filename, 'hello')
>>> adb.exists(filename)
True
>>> adb.unlink(filename)
>>> adb.exists(filename)
False
>>> adb.mkdir(filename)
>>> adb.write(filename + '/contents', 'hello')
>>> adb.unlink(filename)
Traceback (most recent call last):
...
PwnlibException: Cannot delete non-empty directory '/data/local/tmp/unlink-test' without recursive=True
>>> adb.unlink(filename, recursive=True)
>>> adb.exists(filename)
False
"""
with AdbClient() as c:
st = c.stat(path)
if not st:
log.error("Could not unlink %r: Does not exist" % path)
# If the directory is not empty, do not delete it
if isdir(path) and c.list(path) and not recursive:
log.error("Cannot delete non-empty directory %r without recursive=True" % path)
flags = '-rf' if recursive else '-r'
output = c.execute(['rm', flags, path]).recvall()
if output:
log.error(output)
@with_device
def process(argv, *a, **kw):
"""Execute a process on the device.
See :class:`pwnlib.tubes.process.process` documentation for more info.
Returns:
A :class:`pwnlib.tubes.process.process` tube.
Examples:
>>> adb.root()
>>> print adb.process(['cat','/proc/version']).recvall() # doctest: +ELLIPSIS
Linux version ...
"""
if isinstance(argv, (str, unicode)):
argv = [argv]
message = "Starting %s process %r" % ('Android', argv[0])
if log.isEnabledFor(logging.DEBUG):
if argv != [argv[0]]: message += ' argv=%r ' % argv
with log.progress(message) as p:
return AdbClient().execute(argv)
@with_device
def interactive(**kw):
"""Spawns an interactive shell."""
return shell(**kw).interactive()
@with_device
def shell(**kw):
"""Returns an interactive shell."""
return process(['sh', '-i'], **kw)
@with_device
def which(name, all = False, *a, **kw):
"""Retrieves the full path to a binary in ``$PATH`` on the device
Arguments:
name(str): Binary name
all(bool): Whether to return all paths, or just the first
*a: Additional arguments for :func:`.adb.process`
**kw: Additional arguments for :func:`.adb.process`
Returns:
Either a path, or list of paths
Example:
>>> adb.which('sh')
'/system/bin/sh'
>>> adb.which('sh', all=True)
['/system/bin/sh']
>>> adb.which('foobar') is None
True
>>> adb.which('foobar', all=True)
[]
"""
# Unfortunately, there is no native 'which' on many phones.
which_cmd = '''
echo $PATH | while read -d: directory; do
[ -x "$directory/{name}" ] || continue;
echo -n "$directory/{name}\\x00";
done
'''.format(name=name)
which_cmd = which_cmd.strip()
data = process(['sh','-c', which_cmd], *a, **kw).recvall()
result = []
for path in data.split('\x00'):
# Skip empty entries
if not path:
continue
# Return the first entry if all=False
if not all:
return path
# Accumulate all entries if all=True
result.append(path)
if all:
return result
return None
@with_device
def whoami():
return process(['sh','-ic','echo $USER']).recvall().strip()
@with_device
def forward(port):
"""Sets up a port to forward to the device."""
tcp_port = 'tcp:%s' % port
start_forwarding = adb(['forward', tcp_port, tcp_port])
atexit.register(lambda: adb(['forward', '--remove', tcp_port]))
@context.quietfunc
@with_device
def logcat(stream=False):
"""Reads the system log file.
By default, causes logcat to exit after reading the file.
Arguments:
stream(bool): If :const:`True`, the contents are streamed rather than
read in a one-shot manner. Default is :const:`False`.
Returns:
If ``stream`` is :const:`False`, returns a string containing the log data.
Otherwise, it returns a :class:`pwnlib.tubes.tube.tube` connected to the log output.
"""
if stream:
return process(['logcat'])
else:
return process(['logcat', '-d']).recvall()
@with_device
def pidof(name):
"""Returns a list of PIDs for the named process."""
with context.quiet:
io = process(['pidof', name])
data = io.recvall().split()
return list(map(int, data))
@with_device
def proc_exe(pid):
"""Returns the full path of the executable for the provided PID."""
with context.quiet:
io = process(['realpath','/proc/%d/exe' % pid])
data = io.recvall().strip()
return data
@with_device
def getprop(name=None):
"""Reads a properties from the system property store.
Arguments:
name(str): Optional, read a single property.
Returns:
If ``name`` is not specified, a ``dict`` of all properties is returned.
Otherwise, a string is returned with the contents of the named property.
"""
with context.quiet:
if name:
return process(['getprop', name]).recvall().strip()
result = process(['getprop']).recvall()
expr = r'\[([^\]]+)\]: \[(.*)\]'
props = {}
for line in result.splitlines():
if not line.startswith('['):
continue
name, value = re.search(expr, line).groups()
if value.isdigit():
value = int(value)
props[name] = value
return props
@with_device
def setprop(name, value):
"""Writes a property to the system property store."""
return process(['setprop', name, value]).recvall().strip()
@with_device
def listdir(directory='/'):
"""Returns a list containing the entries in the provided directory.
Note:
This uses the SYNC LIST functionality, which runs in the adbd
SELinux context. If adbd is running in the su domain ('adb root'),
this behaves as expected.
Otherwise, less files may be returned due to restrictive SELinux
policies on adbd.
"""
return list(sorted(AdbClient().list(directory)))
@with_device
def fastboot(args, *a, **kw):
"""Executes a fastboot command.
Returns:
The command output.
"""
argv = ['fastboot', '-s', str(context.device)] + list(args)
return tubes.process.process(argv, *a, **kw).recvall()
@with_device
def fingerprint():
"""Returns the device build fingerprint."""
return str(properties.ro.build.fingerprint)
@with_device
def product():
"""Returns the device product identifier."""
return str(properties.ro.build.product)
@with_device
def build():
"""Returns the Build ID of the device."""
return str(properties.ro.build.id)
@with_device
@no_emulator
def unlock_bootloader():
"""Unlocks the bootloader of the device.
Note:
This requires physical interaction with the device.
"""
AdbClient().reboot_bootloader()
fastboot(['oem', 'unlock'])
fastboot(['continue'])
class Kernel(object):
_kallsyms = None
@property
def address(self):
return self.symbols['_text']
@property
@context.quietfunc
def symbols(self):
"""Returns a dictionary of kernel symbols"""
result = {}
for line in self.kallsyms.splitlines():
fields = line.split()
address = int(fields[0], 16)
name = fields[-1]
result[name] = address
return result
@property
@context.quietfunc
def kallsyms(self):
"""Returns the raw output of kallsyms"""
if not self._kallsyms:
self._kallsyms = {}
root()
write('/proc/sys/kernel/kptr_restrict', '1')
self._kallsyms = read('/proc/kallsyms')
return self._kallsyms
@property
@context.quietfunc
def version(self):
"""Returns the kernel version of the device."""
root()
return read('/proc/version').strip()
@property
@context.quietfunc
def cmdline(self):
root()
return read('/proc/cmdline').strip()
@property
@context.quietfunc
def lastmsg(self):
root()
if 'last_kmsg' in listdir('/proc'):
return read('/proc/last_kmsg')
if 'console-ramoops' in listdir('/sys/fs/pstore/'):
return read('/sys/fs/pstore/console-ramoops')
def enable_uart(self):
"""Reboots the device with kernel logging to the UART enabled."""
model = str(properties.ro.product.model)
known_commands = {
'Nexus 4': None,
'Nexus 5': None,
'Nexus 6': 'oem config console enable',
'Nexus 5X': None,
'Nexus 6P': 'oem uart enable',
'Nexus 7': 'oem uart-on',
}
with log.waitfor('Enabling kernel UART') as w:
if model not in known_commands:
log.error("Device UART is unsupported.")
command = known_commands[model]
if command is None:
w.success('Always enabled')
return
# Check the current commandline, it may already be enabled.
if any(s.startswith('console=tty') for s in self.cmdline.split()):
w.success("Already enabled")
return
# Need to be root
with context.local(device=context.device):
# Save off the command line before rebooting to the bootloader
cmdline = kernel.cmdline
reboot_bootloader()
# Wait for device to come online
while context.device not in fastboot(['devices',' -l']):
time.sleep(0.5)
# Try the 'new' way
fastboot(command.split())
fastboot(['continue'])
wait_for_device()
kernel = Kernel()
class Property(object):
def __init__(self, name=None):
self.__dict__['_name'] = name
def __str__(self):
return getprop(self._name).strip()
def __repr__(self):
return repr(str(self))
def __getattr__(self, attr):
if self._name:
attr = '%s.%s' % (self._name, attr)
return Property(attr)
def __setattr__(self, attr, value):
if attr in self.__dict__:
return super(Property, self).__setattr__(attr, value)
if self._name:
attr = '%s.%s' % (self._name, attr)
setprop(attr, value)
properties = Property()
def _build_date():
"""Returns the build date in the form YYYY-MM-DD as a string"""
as_string = getprop('ro.build.date')
as_datetime = dateutil.parser.parse(as_string)
return as_datetime.strftime('%Y-%b-%d')
def find_ndk_project_root(source):
'''Given a directory path, find the topmost project root.
tl;dr "foo/bar/jni/baz.cpp" ==> "foo/bar"
'''
ndk_directory = os.path.abspath(source)
while ndk_directory != '/':
if os.path.exists(os.path.join(ndk_directory, 'jni')):
break
ndk_directory = os.path.dirname(ndk_directory)
else:
return None
return ndk_directory
_android_mk_template = '''
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := %(local_module)s
LOCAL_SRC_FILES := %(local_src_files)s
include $(BUILD_EXECUTABLE)
'''.lstrip()
_application_mk_template = '''
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
APP_ABI:= %(app_abi)s
APP_PLATFORM:=%(app_platform)s
'''.lstrip()
def _generate_ndk_project(file_list, abi='arm-v7a', platform_version=21):
# Create our project root
root = tempfile.mkdtemp()
if not isinstance(file_list, (list, tuple)):
file_list = [file_list]
# Copy over the source file(s)
jni_directory = os.path.join(root, 'jni')
os.mkdir(jni_directory)
for file in file_list:
shutil.copy(file, jni_directory)
# Create the directories
# Populate Android.mk
local_module = os.path.basename(file_list[0])
local_module, _ = os.path.splitext(local_module)
local_src_files = ' '.join(list(map(os.path.basename, file_list)))
Android_mk = os.path.join(jni_directory, 'Android.mk')
with open(Android_mk, 'w+') as f:
f.write(_android_mk_template % locals())
# Populate Application.mk
app_abi = abi
app_platform = 'android-%s' % platform_version
Application_mk = os.path.join(jni_directory, 'Application.mk')
with open(Application_mk, 'w+') as f:
f.write(_application_mk_template % locals())
return root
def compile(source):
"""Compile a source file or project with the Android NDK."""
ndk_build = misc.which('ndk-build')
if not ndk_build:
# Ensure that we can find the NDK.
ndk = os.environ.get('NDK', None)
if ndk is None:
log.error('$NDK must be set to the Android NDK directory')
ndk_build = os.path.join(ndk, 'ndk-build')
# Determine whether the source is an NDK project or a single source file.
project = find_ndk_project_root(source)
if not project:
# Realistically this should inherit from context.arch, but
# this works for now.
abi = 'armeabi-v7a'
sdk = '21'
# If we have an attached device, use its settings.
if context.device:
abi = str(properties.ro.product.cpu.abi)
sdk = str(properties.ro.build.version.sdk)
project = _generate_ndk_project(source, abi, sdk)
# Remove any output files
lib = os.path.join(project, 'libs')
if os.path.exists(lib):
shutil.rmtree(lib)
# Build the project
io = tubes.process.process(ndk_build, cwd=os.path.join(project, 'jni'))
result = io.recvall()
if 0 != io.poll():
log.error("Build failed:\n%s" % result)
# Find all of the output files
output = glob.glob(os.path.join(lib, '*', '*'))
return output[0]
class Partition(object):
def __init__(self, path, name, blocks=0):
self.path = path
self.name = name
self.blocks = blocks
self.size = blocks * 1024
@property
def data(self):
with log.waitfor('Fetching %r partition (%s)' % (self.name, self.path)):
return read(self.path)
@with_device
def walk(top, topdown=True):
join = os.path.join
isdir = lambda x: stat.S_ISDIR(x['mode'])
client = AdbClient()
names = client.list(top)
dirs, nondirs = [], []
for name, metadata in names.items():
if isdir(metadata):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
for x in walk(new_path, topdown):
yield x
if not topdown:
yield top, dirs, nondirs
@with_device
def find(top, name):
for root, dirs, files in walk(top):
if name in files or name in dirs:
yield os.path.join(root, name)
@with_device
def readlink(path):
path = process(['readlink', path]).recvall()
# Readlink will emit a single newline
# We can't use the '-n' flag since old versions don't support it
if path.endswith('\n'):
path = path[:-1]
return path
class Partitions(object):
@property
@context.quietfunc
def by_name_dir(self):
return next(find('/dev/block/platform','by-name'))
@context.quietfunc
def __dir__(self):
return list(self)
@context.quietfunc
@with_device
def __iter__(self):
root()
# Find all named partitions
for name in listdir(self.by_name_dir):
yield name
@context.quietfunc
@with_device
def __getattr__(self, attr):
for name in self:
if name == attr:
break
else:
raise AttributeError("No partition %r" % attr)
path = os.path.join(self.by_name_dir, name)
# Find the actual path of the device
devpath = readlink(path)
devname = os.path.basename(devpath)
# Get the size of the partition
for line in read('/proc/partitions').splitlines():
if not line.strip():
continue
major, minor, blocks, name = line.split(None, 4)
if devname == name:
break
else:
log.error("Could not find size of partition %r" % name)
return Partition(devpath, attr, int(blocks))
partitions = Partitions()
def install(apk, *arguments):
"""Install an APK onto the device.
This is a wrapper around 'pm install', which backs 'adb install'.
Arguments:
apk(str): Path to the APK to intall (e.g. ``'foo.apk'``)
arguments: Supplementary arguments to 'pm install',
e.g. ``'-l', '-g'``.
"""
if not apk.endswith('.apk'):
log.error("APK must have .apk extension")
basename = os.path.basename(apk)
target_path = '/data/local/tmp/{}.apk'.format(basename)
with log.progress("Installing APK {}".format(basename)) as p:
with context.quiet:
p.status('Copying APK to device')
push(apk, target_path)
p.status('Installing')
result = process(['pm', 'install-create', target_path] + list(arguments)).recvall()
status = result.splitlines()[-1]
if 'Success' not in status:
log.error(status)
def uninstall(package, *arguments):
"""Uninstall an APK from the device.
This is a wrapper around 'pm uninstall', which backs 'adb uninstall'.
Arguments:
package(str): Name of the package to uninstall (e.g. ``'com.foo.MyPackage'``)
arguments: Supplementary arguments to ``'pm install'``, e.g. ``'-k'``.
"""
with log.progress("Uninstalling package {}".format(package)):
with context.quiet:
return process(['pm','uninstall',package] + list(arguments)).recvall()
@context.quietfunc
def packages():
"""Returns a list of packages installed on the system"""
packages = process(['pm', 'list', 'packages']).recvall()
return [line.split('package:', 1)[-1] for line in packages.splitlines()]
``` |
{
"source": "6tudent/pyemf",
"score": 3
} |
#### File: pyemf/tests/test--run-all.py
```python
from __future__ import print_function
import os,sys
import glob
import filecmp
import pyemf
is_py3 = sys.version_info[0] == 3
if is_py3:
bchr = lambda x: bytes((x,))
else:
bchr = chr
def dump(fh, length=8):
""""Return a hex dump of the file."""
N=0; result=''
s=fh.read(length)
while len(s)>0:
hexa = ' '.join(["%02X"%ord(s[i:i+1]) for i in range(len(s))])
FILTER=b''.join([ bchr(x) if 32 <= x < 127 else b'.'
for x in range(256) ])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s.decode('ascii'))
N+=length
s=fh.read(length)
return result
def dumpfile(filename):
fh=open(filename, "rb")
if fh:
result=dump(fh)
fh=open(filename+".hex", "w")
fh.write(result)
class Comparison:
def __init__(self,verbose=False):
self.verbose=verbose
self.total=0
self.passed=[]
self.failed=[]
def show(self,char):
sys.stdout.write(char)
sys.stdout.flush()
def comparepy(self,pyfilename):
self.total+=1
filename=pyfilename[:-3]+".emf"
outputfile=filename+".out.emf"
try:
exec(open(pyfilename).read())
e=pyemf.EMF(verbose=self.verbose)
e.load(filename)
if os.path.exists(outputfile): os.remove(outputfile)
ret=e.save(outputfile)
if ret:
if filecmp.cmp(filename,outputfile,shallow=False):
self.show(".")
self.passed.append(filename)
else:
self.show("F")
self.failed.append(filename)
dumpfile(filename)
dumpfile(outputfile)
else:
self.failed.append(filename)
self.show("0")
except Exception as e:
print(e)
self.failed.append(filename)
self.show("E")
dumpfile(filename)
dumpfile(outputfile)
def test(self,tests):
for filename in tests:
self.comparepy(filename)
self.stats()
def stats(self):
print()
print("%d passed out of %d" % (len(self.passed),self.total))
print("passed: %s" % self.passed)
print("failed: %s" % self.failed)
def test(tests,options):
total=0
verbose=False
passed=[]
failed=[]
tests.sort()
for test in tests:
print("Running %s" % test)
total+=1
try:
exec(open(test).read())
filename=test[:-3]+".emf"
try:
e=pyemf.EMF(verbose=options.verbose)
e.load(filename)
outputfile=filename+".out.emf"
if os.path.exists(outputfile): os.remove(outputfile)
ret=e.save(outputfile)
if ret:
if filecmp.cmp(filename,outputfile,shallow=False):
print(".",)
passed.append(filename)
else:
print("F",)
failed.append(filename)
dumpfile(filename)
dumpfile(outputfile)
else:
failed.append(filename)
print("0",)
except Exception as e:
print(e)
failed.append(filename)
print("E")
dumpfile(filename)
dumpfile(outputfile)
except:
failed.append(test)
print("** test %s failed" % test)
print("%d passed out of %d" % (len(passed),total))
print("passed: %s" % passed)
print("failed: %s" % failed)
if __name__=="__main__":
from optparse import OptionParser
parser=OptionParser(usage="usage: %prog [options] emf-files...")
parser.add_option("-v", action="store_true", dest="verbose", default=False)
(options, args) = parser.parse_args()
if len(args)>0:
tests=args
else:
tests=glob.glob("test-[a-z0-9]*py")
comp=Comparison(options.verbose)
comp.test(tests)
``` |
{
"source": "6uayf/6uayf.github.io",
"score": 3
} |
#### File: media/caffeine/tojson.py
```python
import sys
import json
import datetime
from collections import namedtuple, defaultdict
import re
Coffee = namedtuple('Coffee', 'dt desc')
SHOPS = [
'Herkimer',
'Trabant',
'Neptune',
]
OTHER_SEATTLE_SHOPS = [
'Caffe Fiore',
'Caffe Ladro',
'Makeda',
'Milstead',
'Diva',
'Uptown',
'Green Bean',
'Commons', # Microsoft
'Armory', # Seattle Center
'Sip and Ship',
'Solstice',
]
def classify(desc):
"""Get a category string given a coffee description.
"""
if 'at home' in desc or 'Distant Lands' in desc:
return 'home'
elif re.search(r'Seven (Brazil|coffee|Ethiopia)', desc):
return 'work'
elif re.search(r'Seven (Mexico)', desc):
return 'home'
elif "Tony's" in desc:
return 'home'
elif re.search(r'\dg (coffee|water)', desc):
return 'home'
elif re.search(r'Herkimer (Honduras|Guatemala|El Salvador)', desc):
return 'work'
elif 'conference' in desc.lower() or 'hotel' in desc.lower():
return 'travel'
elif re.search(r', (LA|San Diego|MI|Sunnyvale|London|CA)$', desc):
return 'travel'
elif desc.endswith(' CA') or ', CA' in desc or 'Pacahamama' in desc:
return 'travel'
elif 'Cambridge' in desc:
return 'travel'
elif re.search(r'in (SAN)$', desc):
return 'travel'
elif 'Sea-Tac' in desc:
return 'travel'
elif 'wedding' in desc.lower() or 'cupertino' in desc.lower():
return 'travel'
elif 'cse latte' in desc.lower():
return 'espresso room'
elif re.search(r'\b[A-Z][a-z]+\'s', desc):
return 'friend'
else:
for shop in SHOPS:
if shop.lower() in desc.lower():
return shop
for shop in OTHER_SEATTLE_SHOPS:
if shop.lower() in desc.lower():
return 'other Seattle caf\xe9'
return 'other'
def dump_json(note_fn):
# Read coffees.
coffees = []
with open(note_fn) as f:
for line in f:
timestamp, desc = line.strip().split(None, 1)
dt = datetime.datetime.strptime(timestamp, '%Y-%m-%d-%H-%M-%S')
coffees.append(Coffee(dt, desc))
# Bucket the coffees by day.
by_date = defaultdict(list)
for coffee in coffees:
by_date[coffee.dt.date()].append(coffee)
# Emit a row for every date in the range. This captures those dates
# with zero coffees.
days = []
cur_date = min(by_date.keys())
end_date = max(by_date.keys())
while cur_date <= end_date:
days.append({
'date': str(cur_date),
'count': len(by_date[cur_date]),
})
cur_date += datetime.timedelta(days=1)
# Dump the individual coffees and their attributes.
flat_coffees = []
for coffee in coffees:
flat_coffees.append({
'datetime': str(coffee.dt),
'date': str(coffee.dt.date()),
'desc': coffee.desc,
'tod': '2001-01-01T{:%H:%M:%S}'.format(coffee.dt),
'mins': coffee.dt.hour * 60 + coffee.dt.minute,
'kind': classify(coffee.desc),
})
# Dump JSON object.
json.dump({
'days': days,
'coffees': flat_coffees,
}, sys.stdout, indent=2, sort_keys=True)
if __name__ == '__main__':
dump_json(sys.argv[1])
``` |
{
"source": "6Ulm/POT",
"score": 2
} |
#### File: POT/test/test_optim.py
```python
import numpy as np
import ot
def test_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_conditional_gradient2():
n = 1000 # nb samples
mu_s = np.array([0, 0])
cov_s = np.array([[1, 0], [0, 1]])
mu_t = np.array([4, 4])
cov_t = np.array([[1, -.8], [-.8, 1]])
xs = ot.datasets.make_2D_samples_gauss(n, mu_s, cov_s)
xt = ot.datasets.make_2D_samples_gauss(n, mu_t, cov_t)
a, b = np.ones((n,)) / n, np.ones((n,)) / n
# loss matrix
M = ot.dist(xs, xt)
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
G, log = ot.optim.cg(a, b, M, reg, f, df, numItermaxEmd=200000,
verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1))
np.testing.assert_allclose(b, G.sum(0))
def test_generalized_conditional_gradient():
n_bins = 100 # nb bins
np.random.seed(0)
# bin positions
x = np.arange(n_bins, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n_bins, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n_bins, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n_bins, 1)), x.reshape((n_bins, 1)))
M /= M.max()
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
G, log = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True, log=True)
np.testing.assert_allclose(a, G.sum(1), atol=1e-05)
np.testing.assert_allclose(b, G.sum(0), atol=1e-05)
def test_solve_1d_linesearch_quad_funct():
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(1, -1, 0), 0.5)
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 5, 0), 0)
np.testing.assert_allclose(ot.optim.solve_1d_linesearch_quad(-1, 0.5, 0), 1)
def test_line_search_armijo():
xk = np.array([[0.25, 0.25], [0.25, 0.25]])
pk = np.array([[-0.25, 0.25], [0.25, -0.25]])
gfk = np.array([[23.04273441, 23.0449082], [23.04273441, 23.0449082]])
old_fval = -123
# Should not throw an exception and return None for alpha
alpha, _, _ = ot.optim.line_search_armijo(lambda x: 1, xk, pk, gfk, old_fval)
assert alpha is None
``` |
{
"source": "6Ulm/unbalanced_gromov_wasserstein",
"score": 2
} |
#### File: unbalanced_gromov_wasserstein/examples/plot_matching_imbalanced_classes_3d.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import torch
from sklearn.cluster import KMeans
from solver.utils import euclid_dist
from ot.gromov import gromov_wasserstein
from unbalancedgw.vanilla_ugw_solver import log_ugw_sinkhorn
path = os.getcwd() + "/output"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/plots"
if not os.path.isdir(path):
os.mkdir(path)
def generate_data(nsample, ratio):
# Generate first ellipse
s = np.random.uniform(size=(nsample, 3))
x1 = np.zeros_like(s)
x1[:, 0] = (
np.sqrt(s[:, 0])
* np.cos(2 * np.pi * s[:, 1])
* np.cos(2 * np.pi * s[:, 2])
)
x1[:, 1] = 2 * np.sqrt(s[:, 0]) * np.sin(2 * np.pi * s[:, 1])
x1[:, 2] = (
np.sqrt(s[:, 0])
* np.cos(2 * np.pi * s[:, 1])
* np.sin(2 * np.pi * s[:, 2])
)
rot = 0.5 * np.sqrt(2) * np.array([[1, -1, 0], [1, 1, 0], [0, 0, 1]])
x1 = x1.dot(rot)
# Generate second circle
s = np.random.uniform(size=(nsample, 3))
x2 = np.zeros_like(s)
x2[:, 0] = (
np.sqrt(s[:, 0])
* np.cos(2 * np.pi * s[:, 1])
* np.cos(2 * np.pi * s[:, 2])
)
x2[:, 1] = np.sqrt(s[:, 0]) * np.sin(2 * np.pi * s[:, 1])
x2[:, 2] = (
np.sqrt(s[:, 0])
* np.cos(2 * np.pi * s[:, 1])
* np.sin(2 * np.pi * s[:, 2])
)
x2 = x2 + np.array([5.0, 0.0, 0.0])
x = np.concatenate((x1, x2)) + np.array([0.0, 0.0, 5.0])
# Generate second data drom translation
y = np.concatenate((x1[:, :2], s[:, :2] + np.array([4.0, 0.0])))
angle = -np.pi / 4
x[:nsample] = x[:nsample].dot(
np.array(
[
[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
)
y[nsample:] = (y[nsample:] - np.mean(y[nsample:], axis=0)).dot(
np.array(
[[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]
)
) + np.mean(y[nsample:], axis=0)
# Generate weights
a, b = np.ones(x.shape[0]) / x.shape[0], np.ones(y.shape[0]) / y.shape[0]
b[:n1], b[n1:] = (1 - ratio) * b[:n1], ratio * b[n1:]
b = b / np.sum(b)
return a, x, b, y
def plot_density_matching(pi, a, x, b, y, idx, alpha, linewidth):
cmap1 = get_cmap("Blues")
cmap2 = get_cmap("Reds")
plt.figure(figsize=(6.0, 6.0))
ax = plt.axes(projection="3d")
ax.set_xlim(-2, 5)
ax.set_ylim(-3.5, 3.5)
ax.set_zlim(-1, 6)
ax.scatter(
x[:, 0],
x[:, 1],
x[:, 2],
c=cmap1(0.3 * (a - np.amin(b)) / np.amin(b) + 0.4),
s=10 * (a / a) ** 2,
zorder=1,
)
ax.scatter(
y[:, 0],
y[:, 1],
0.0,
c=cmap2(0.3 * (b - np.amin(b)) / np.amin(b) + 0.4),
s=10 * (b / a) ** 2,
zorder=1,
)
# Plot argmax of coupling
for i in idx:
m = np.sum(pi[i, :])
ids = (-pi[i, :]).argsort()[:30]
for j in ids:
w = pi[i, j] / m
t = [x[i][0], y[j][0]]
u = [x[i][1], y[j][1]]
v = [x[i][2], 0.0]
ax.plot(
t, u, v, c="k", alpha=w * alpha, linewidth=linewidth, zorder=0
)
# plt.xticks([])
# plt.yticks([])
plt.tight_layout()
if __name__ == "__main__":
n1 = 1000
dim = 2
rho = 0.5
eps = 0.01
n_clust = 20
ratio = 0.7
compute_balanced = True
# Generate gaussian mixtures translated from each other
a, x, b, y = generate_data(n1, ratio)
clf = KMeans(n_clusters=n_clust)
clf.fit(x)
idx = np.zeros(n_clust)
for i in range(n_clust):
d = clf.transform(x)[:, i]
idx[i] = np.argmin(d)
idx = idx.astype(int)
# Generate costs and transport plan
dx, dy = euclid_dist(x, x), euclid_dist(y, y)
if compute_balanced:
pi_b = gromov_wasserstein(dx, dy, a, b, loss_fun="square_loss")
plot_density_matching(pi_b, a, x, b, y, idx, alpha=1.0, linewidth=0.5)
plt.legend()
plt.savefig(path + f"/fig_matching_plan_balanced_ratio{ratio}.png")
plt.show()
dx, dy = torch.from_numpy(dx), torch.from_numpy(dy)
rho_list = [0.1]
peps_list = [2, 1, 0, -1, -2, -3]
for rho in rho_list:
pi = None
for p in peps_list:
eps = 10 ** p
print(f"Params = {rho, eps}")
a, b = torch.from_numpy(a), torch.from_numpy(b)
pi = log_ugw_sinkhorn(
a,
dx,
b,
dy,
init=pi,
eps=eps,
rho=rho,
rho2=rho,
nits_plan=1000,
tol_plan=1e-5,
nits_sinkhorn=1000,
tol_sinkhorn=1e-5,
)
print(f"Sum of transport plans = {pi.sum().item()}")
# Plot matchings between measures
a, b = a.data.numpy(), b.data.numpy()
pi_ = pi.data.numpy()
plot_density_matching(
pi_, a, x, b, y, idx, alpha=1.0, linewidth=1.0
)
plt.legend()
plt.savefig(
path + f"/fig_matching_plan_ugw_"
f"rho{rho}_eps{eps}_ratio{ratio}.png"
)
plt.show()
```
#### File: unbalanced_gromov_wasserstein/experiments_pu/compute_prediction.py
```python
import os
import numpy as np
from joblib import Parallel, delayed
import torch
import ot
from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn
from unbalancedgw._batch_utils import compute_batch_flb_plan
import utils
from partial_gw import compute_cost_matrices
folder = "marginals_without_rescaling"
path = os.getcwd() + "/saved_plans"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/" + folder
if not os.path.isdir(path):
os.mkdir(path)
def euclid_dist(x, y):
"""
Computes the euclidean distance between two pointclouds, returning a
matrix whose coordinates are the distance between two points.
Parameters
----------
x: torch.Tensor of size [size_X, dim]
coordinates of the first group of vectors of R^d.
y: torch.Tensor of size [size_Y, dim]
coordinates of the second group of vectors of R^d.
Returns
-------
torch.Tensor of size [size_X, size_Y]
Matrix of all pairwise distances.
"""
return (x[:, None, :] - y[None, :, :]).norm(p=2, dim=2)
def prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl, prior, nb_try):
"""
Compute the tensor used as initialization for UGW.
The init is obtained by solving partial EMD as in Chapel et al. when the
domains are the same.
Parameters
----------
dataset_p: string
name of the dataset used for positive data
dataset_u: string
name of the dataset used for unlabeled data
n_pos: int
number of positives samples
n_unl: int
number of unlabeled samples
prior: float
proportion of positive samples in the unlabeled dataset
nb_try: int
number of folds to perform PU learning
Returns
-------
init_plan: torch.Tensor of size [nb_try, n_pos, n_unl]
Set of initialization plans used to init UGW.
"""
init_plan = torch.zeros([nb_try, n_pos, n_unl])
for i in range(nb_try):
# Draw dataset
P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
Ctot, C1, C2, mu, nu = compute_cost_matrices(P, U, prior,
nb_dummies=10)
# Compute init
init_plan[i] = torch.tensor(ot.emd(mu, nu, Ctot)[:n_pos, :])
return init_plan
def compute_plan_ugw(dataset_p, dataset_u, n_pos, n_unl, prior, eps, rho, rho2,
nb_try, device=0):
# Set default type and GPU device
torch.cuda.set_device(device)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# keep constant to normalize cost, uniform over folds by taking first batch
# P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos, n_unl,
# prior, 0)
# U = torch.tensor(U.values,dtype=torch.float) # Convert to torch
# cst_norm = euclid_dist(U, U).max()
# Draw cost for all seeds as batch
Cx = torch.zeros([nb_try, n_pos, n_pos])
Cy = torch.zeros([nb_try, n_unl, n_unl])
for i in range(nb_try):
P, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
P, U = torch.tensor(P.values, dtype=torch.float), \
torch.tensor(U.values, dtype=torch.float)
cx, cy = euclid_dist(P, P), euclid_dist(U, U)
Cx[i], Cy[i] = cx, cy
# Cx[i], Cy[i] = cx / cst_norm, cy / cst_norm
del cx, cy
# Compute init and weights
mu = (torch.ones([n_pos]) / n_pos).expand(nb_try, -1)
nu = (torch.ones([n_unl]) / n_unl).expand(nb_try, -1)
if P.shape[1] == U.shape[1]: # If domains are the same
init_plan = prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl,
prior, nb_try)
else:
_, _, init_plan = compute_batch_flb_plan(
mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,
nits_sinkhorn=50000, tol_sinkhorn=1e-5)
# Compute the marginal of init and save as file
pi_numpy = init_plan.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_init_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
# Set params and start the grid wrt entropic param eps
pi = log_batch_ugw_sinkhorn(mu, Cx, nu, Cy, init=init_plan,
eps=eps, rho=rho, rho2=rho2,
nits_plan=3000, tol_plan=1e-5,
nits_sinkhorn=3000, tol_sinkhorn=1e-6)
if torch.any(torch.isnan(pi)):
raise Exception(f"Solver got NaN plan with params (eps, rho) = "
f"{dataset_p, dataset_u, nb_try, eps, rho, rho2}")
# Compute the marginal and save as file
pi_numpy = pi.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_plan_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
print(
f"DONE = Dataset {dataset_p, dataset_u}, eps = {eps}, "
f"rho = {rho, rho2}, reps = {nb_try}")
return
if __name__ == '__main__':
parallel_gpu = True
# epsilon Set to 2**-9 but an be optimized via grid-search
grid_eps = [2. ** k for k in range(-9, -8, 1)]
grid_rho = [2. ** k for k in range(-10, -4, 1)]
nb_try = 40
# List all tasks for the Caltech datasets
list_tasks = []
# # Matching similar features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching similar features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# Matching different features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching different features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
if parallel_gpu:
assert torch.cuda.is_available()
list_device = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
total_devices = torch.cuda.device_count()
print(
f"Parallel computation // Total GPUs available = {total_devices}")
pll = Parallel(n_jobs=total_devices)
iterator = (
delayed(compute_plan_ugw)(data_pos, data_unl, n_pos, n_unl, prior,
eps, rho, rho2, nb_try,
device=list_device[k % total_devices])
for
k, (
data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in
enumerate(list_tasks))
pll(iterator)
else:
print("Not Parallel")
for (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in list_tasks:
compute_plan_ugw(data_pos, data_unl, n_pos, n_unl, prior, eps, rho,
rho2, nb_try)
print(f'{data_pos, data_unl} done.')
``` |
{
"source": "6un9-h0-Dan/abcd",
"score": 2
} |
#### File: 6un9-h0-Dan/abcd/abcgraph.py
```python
import os
import sys
import cgi
import json
import hashlib
import binascii
import argparse
from abcd import ABCParser
from abcd.ABCParser import ABCdException as ABCdException
from swf.movie import SWF
import signal
import traceback
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
def b2a_printable(s):
result = ''
for c in map(ord, s):
if c >= 0x20 and c <= 0x7e:
result += chr(c)
else:
result += '.'
return result
def hexdump(data):
result = ''
for i in range(0, len(data), 16):
hexstring = ' '.join([binascii.hexlify(a) for a in data[i:i+16]])
asciistring = b2a_printable(data[i:i+16])
result += cgi.escape("%07x: %-48s |%-16s|\n" % (i,
hexstring,
asciistring))
return result
def disassembly_to_dict(body):
result = []
for instr in body.disassemble():
result.append({'name': instr.name,
'opcode': instr.opcode,
'operands': instr.operands})
return result
def create_method_node(parser,
body,
nodes,
edges,
bodies,
relate_to,
color,
label,
level):
if body == None:
opc_hash = "NO BODY"
disassembly = []
dump = ''
else:
#opc_hash = hashlib.md5(body.strip_operands()).hexdigest()
opc_hash = hashlib.md5(body.code).hexdigest()
disassembly = disassembly_to_dict(body)
dump = hexdump(body.code)
if opc_hash in bodies:
id_ = bodies[opc_hash]
node = nodes[id_]
if 'aka' in node:
node['aka'].append(label)
else:
node['aka'] = [label]
print " [-] Duplicate method body: %s (%s) (node: %s)" % (opc_hash,
label,
id_)
# Don't duplicate edges...
edge = {'from': id_, 'to': relate_to}
if edge not in edges:
edges.append(edge)
else:
id_ = len(nodes)
bodies[opc_hash] = id_
nodes.append({'label': label,
'id': id_,
'color': color,
'default_color': color,
'dump': dump,
'disassembly': disassembly,
'level': level})
edges.append({'from': id_, 'to': relate_to})
print " [-] New method body: %s (%s) (node: %s)" % (opc_hash,
label,
id_)
def add_method(parser,
meth_index,
nodes,
edges,
bodies,
relate_to,
color,
label,
level=5):
# Walk all bodies looking for one that references the provided method
# index. If found, add a node and edge.
for body in parser.method_bodies:
if body.method != meth_index:
continue
create_method_node(parser,
body,
nodes,
edges,
bodies,
relate_to,
color,
label,
level)
# Got a body for this one, return.
return
# Not every method has a body. In this case, create an empty body node.
create_method_node(parser,
None,
nodes,
edges,
bodies,
relate_to,
color,
label,
level)
def add_method_nodes(parser, obj, index, nodes, edges, bodies):
# Walk all traits for this object, looking for methods.
for trait in obj.traits:
if (trait.kind & 0x0F) != parser.TRAIT_METHOD:
continue
meth_name = parser.resolve_multiname(trait.name)
meth_index = parser.resolve_trait(trait)['method_index']
add_method(parser,
meth_index,
nodes,
edges,
bodies,
index,
'#CCBBAA',
meth_name)
def get_traits(parser, traits):
results = []
for trait in traits:
t = {}
t['name'] = parser.resolve_multiname(trait.name)
t['type'] = parser.TRAIT_KIND[trait.kind & 0x0F]
results.append(t)
return results
# Return a list of node indexes this file relates to...
def dump_graph(parser,
nodes,
edges,
args,
bodies={},
classes={},
instances={}):
indexes = []
for i, script in enumerate(parser.scripts):
#sname = "script_%s" % i
# Make a node for this script. Every script is unique...
#id_ = len(nodes)
#nodes.append({'label': sname,
# 'id': id_,
# 'color': 'magenta',
# 'default_color': 'magenta',
# 'level': 2})
#indexes.append(id_)
#script_index = id_
#print " [+] Found script: %s" % sname
for trait in script.traits:
if (trait.kind & 0x0F) != parser.TRAIT_CLASS:
continue
cname = parser.resolve_multiname(trait.name)
# If filtering and not a match, skip...
if args.class_names and cname not in args.class_names:
print " [-] Skipping class due to filter (%s)" % cname
continue
# If we have this class already, just use the node index.
# Otherwise, make a new node. Relate node to script node.
if cname in classes:
class_index = classes[cname]
print " [-] Duplicate class: %s (node: %s)!" % (cname,
class_index)
else:
id_ = len(nodes)
nodes.append({'label': "class: %s" % cname,
'id': id_,
'color': '#00CC00',
'default_color': '#00CC00',
'level': 3})
classes[cname] = id_
class_index = id_
print " [-] New class: %s (node: %s)!" % (cname, class_index)
#edges.append({'from': script_index, 'to': class_index})
indexes.append(class_index)
# Handle method for script init...
#add_method(parser,
# script.init,
# nodes,
# edges,
# bodies,
# class_index,
# '#00FFFF',
# "script init %s" % cname,
# level=5)
if not args.full:
continue
# Make instance node for this class and handle init and method nodes.
for instance in parser.instances:
iname = parser.resolve_multiname(instance.name)
if iname != cname:
continue
# Make a node (or use existing one) for this instance.
if iname in instances:
instance_index = instances[iname]
print " [-] Duplicate instance: %s (node: %s)" % (iname,
instance_index)
else:
id_ = len(nodes)
traits = get_traits(parser, instance.traits)
nodes.append({'label': "instance: %s" % iname,
'id': id_,
'color': 'grey',
'default_color': 'grey',
'traits': traits,
'level': 4})
edges.append({'from': class_index, 'to': id_})
instances[iname] = id_
instance_index = id_
print " [-] New instance: %s (node: %s)" % (iname,
instance_index)
# Handle methods and init for this instance.
add_method_nodes(parser,
instance,
instance_index,
nodes,
edges,
bodies)
# Add instance init method too...
add_method(parser,
instance.iinit,
nodes,
edges,
bodies,
instance_index,
'orange',
"instance init %s" % iname,
level=5)
# Got one instance, move along...
break
# Make class node for this script and handle init and method nodes.
for trait in script.traits:
if (trait.kind & 0x0F) != parser.TRAIT_CLASS:
continue
class_index = parser.resolve_trait(trait)['class_index']
klass = parser.classes[class_index]
# Add method for class init.
add_method(parser,
klass.cinit,
nodes,
edges,
bodies,
instance_index,
'yellow',
"class init %s" % cname,
level=5)
add_method_nodes(parser,
klass,
class_index,
nodes,
edges,
bodies)
break
return indexes
def __main__():
parser = argparse.ArgumentParser(description='Dump actionscript stuff.')
parser.add_argument('-s', '--class_names', action='append',
metavar='class', help='class name to dump')
parser.add_argument('-f', '--full', action='store_true',
help='full graph including methods and inits')
parser.add_argument('-m', '--metadata', action='store_true',
help='enable SWF metadata tags')
parser.add_argument('-b', '--binaries', action='store_true',
help='enable SWF binary tags')
parser.add_argument('files', metavar='file', nargs='+',
help='file to parse')
args = parser.parse_args()
if not args.files:
print "[!] Must provide a filename..."
return
nodes = []
edges = []
binaries = {}
metadata = {}
bodies = {}
classes = {}
instances = {}
for file_ in args.files:
print "[+] Opening file: %s" % file_
try:
f = open(file_, 'rb')
except Exception as e:
print "[!] %s" % str(e)
continue
try:
swiff = SWF(f)
except Exception as e:
print "[!] pyswf failure: %s" % str(e)
f.close()
continue
f.close()
parser = None
indexes = []
# Metadata and binary tags are stored until we have nodes returned
# for ABC elements. This ensures that we don't create nodes for these
# tags without also having something else meaningful.
metadata_tags = []
binary_tags = []
for tag in swiff.tags:
#print "Tag: %s" % tag.name
if tag.name == "Metadata" and args.metadata:
metadata_tags.append(tag)
if tag.name == "TagDefineBinaryData" and args.binaries:
binary_tags.append(tag)
elif tag.name in ["DoABC", "DoABCDefine"]:
if hasattr(tag, 'abcName'):
print " [-] ABCName: %s" % tag.abcName
parser = ABCParser.ABCParser(tag.bytes)
try:
parser.parse()
except ABCdException as e:
print "[!] Parsing error: %s" % str(e)
continue
indexes += dump_graph(parser,
nodes,
edges,
args,
bodies=bodies,
classes=classes,
instances=instances)
if indexes:
new_id = len(nodes)
nodes.append({'id': new_id,
'label': os.path.basename(file_),
'color': 'purple',
'default_color': 'purple',
'level': 0})
# Create edge between this new node and all returned indexes
for index in indexes:
edges.append({'from': new_id, 'to': index})
for tag in metadata_tags:
# Create a node for metadata blobs.
md_hash = hashlib.md5(tag.xmlString).hexdigest()
if md_hash in metadata:
mid_id = metadata[md_hash]
else:
md_id = len(nodes)
metadata[md_hash] = md_id
nodes.append({'id': md_id,
'label': md_hash,
'details': tag.xmlString,
'color': 'blue',
'default_color': 'blue',
'level': 1})
edges.append({'from': new_id, 'to': md_id})
print " [-] Metadata: %s" % md_hash
for tag in binary_tags:
# Add a node for binary data blobs.
bin_hash = hashlib.md5(tag.data).hexdigest()
if bin_hash in binaries:
bin_id = binaries[bin_hash]
else:
bin_id = len(nodes)
binaries[bin_hash] = bin_id
# Include hexdump of first 512 bytes...
nodes.append({'id': bin_id,
'label': bin_hash,
'details': "Length: %s" % len(tag.data),
'color': 'pink',
'default_color': 'pink',
'dump': hexdump(tag.data[:512]),
'level': 1})
edges.append({'from': new_id, 'to': bin_id})
print " [-] Binary: %s" % bin_hash
else:
print "[!] No nodes created..."
print "[-] Nodes: %s" % len(nodes)
f = open("nodes.json", 'w')
f.write(json.dumps(nodes))
f.close()
print "[-] Edges: %s" % len(edges)
f = open("edges.json", 'w')
f.write(json.dumps(edges))
f.close()
if __name__ == '__main__':
__main__()
``` |
{
"source": "6un9-h0-Dan/bulk",
"score": 2
} |
#### File: bulk/scripts/get_attachments.py
```python
import argparse
from bulk import message
from bulk.helpers import *
def get_message(filename):
"""
Load message from file.
Keyword arguments:
filename -- path to message
"""
try:
with open(filename) as f:
lines = f.readlines()
except IOError:
print 'Cannot open email file %s, exiting!' % filename
raise
# Filter a list of strings by removing all strings starting with 'BULKMSG:'
# Then concatenate all the remaining strings into one string
# and return it
return ''.join([line for line in lines if not line.startswith('BULKMSG:')])
def save(name, contents):
"""
Write contents to file.
Keyword arguments:
name -- file to write to
contents -- contents to write to file
"""
try:
with open(name, 'wb') as f:
f.write(contents)
except IOError:
print 'Cannot write file %s to disk, skipping!' % name
if __name__ == '__main__':
"""
Main
"""
parser = argparse.ArgumentParser(description='A simple tool to pull \
attachments out of an email')
parser.add_argument(
'--infile',
type=str,
required=True,
help='Email file to pull attachments out of'
)
parser.add_argument(
'--output_path',
default='./',
type=str,
help='Optional path to write attachments to. \
Default is current directory.'
)
args = parser.parse_args()
print 'Reading email from file %s' % args.infile
msg = message.Message(None, None, None, get_message(args.infile))
(names, contents) = msg.get_attachments()
for i, name in enumerate(names):
print 'Writing attachment %s to disk' % name
save(directory_name(args.output_path) + name, contents[i])
```
#### File: bulk/scripts/smtp_server.py
```python
import smtpd
import email
import asyncore
import argparse
class CustomSMTPServer(smtpd.SMTPServer):
"""
A simple SMTP server.
"""
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Process each message as it arrives
"""
print 'Receiving message from:', peer
print 'Message addressed from:', mailfrom
print 'Message addressed to :', rcpttos
print 'Message length :', len(data)
filenames = []
attachments = []
msg = email.message_from_string(data)
for k, v in msg.items():
print k + " -- " + v
for part in msg.walk():
#help(part)
fn = part.get_filename()
if fn:
filenames.append(fn)
attachments.append(part.get_payload(decode=True))
if __name__ == '__main__':
"""
Main
"""
parser = argparse.ArgumentParser(description='A simple SMTP Server')
parser.add_argument(
'--bind_address',
default='127.0.0.1',
help='Address to bind to and listen on for incoming mail. \
Default is 127.0.0.1'
)
parser.add_argument(
'--bind_port',
default=1025,
type=int,
help='Port to bind to and to listen on for incoming mail. \
Default is 1025'
)
args = parser.parse_args()
server = CustomSMTPServer((args.bind_address, args.bind_port), None)
try:
print 'Starting Server'
asyncore.loop()
except KeyboardInterrupt:
print 'Stopping Server'
``` |
{
"source": "6un9-h0-Dan/CIRTKit",
"score": 3
} |
#### File: lib/core/session.py
```python
import time
import datetime
from lib.common.out import *
from lib.common.objects import File
from lib.core.database import Database
from lib.core.investigation import __project__
class Session(object):
def __init__(self):
self.id = None
# This will be assigned with the File object of the file currently
# being analyzed.
self.file = None
# Timestamp of the creation of the session.
self.created_at = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
# MISP event associated to the object
self.misp_event = None
class Sessions(object):
def __init__(self):
self.current = None
self.sessions = []
# Store the results of the last "find" command.
self.find = None
def close(self):
self.current = None
def is_set(self):
# Check if the session has been opened or not.
if self.current:
return True
else:
return False
def switch(self, session):
self.current = session
print_info("Switched to session #{0} on {1}".format(self.current.id, self.current.file.path))
def new(self, path=None, misp_event=None):
if path is None and misp_event is None:
print_error("You have to open a session on a path or on a misp event.")
return
if __project__.name:
pass
else:
print_error("You must open an investigation to store files")
return
session = Session()
total = len(self.sessions)
session.id = total + 1
if path is not None:
if self.is_set() and self.current.misp_event:
session.misp_event = self.current.misp_event
# Open a section on the given file.
session.file = File(path)
# Try to lookup the file in the database. If it is already present
# we get file name and
row = Database().find(key='sha256', value=session.file.sha256)
if row:
session.file.name = row[0].name
session.file.tags = ', '.join(tag.to_dict()['tag'] for tag in row[0].tag)
print_info("Session opened on {0}".format(path))
if misp_event is not None:
if self.is_set() and self.current.file:
session.file = self.current.file
refresh = False
if self.current is not None and self.current.misp_event is not None \
and self.current.misp_event.event_id == misp_event.event_id:
refresh = True
session.misp_event = misp_event
if refresh:
print_info("Session on MISP event {0} refreshed.".format(misp_event.event_id))
else:
print_info("Session opened on MISP event {0}.".format(misp_event.event_id))
if session.file is not None:
# Loop through all existing sessions and check whether there's another
# session open on the same file and delete it. This is to avoid
# duplicates in sessions.
# NOTE: in the future we might want to remove this if sessions have
# unique attributes (for example, an history just for each of them).
for entry in self.sessions:
if entry.file is not None and entry.file.sha256 == session.file.sha256:
self.sessions.remove(entry)
# Add new session to the list.
self.sessions.append(session)
# Mark the new session as the current one.
self.current = session
__sessions__ = Sessions()
```
#### File: viper/pehash/pehasher.py
```python
from __future__ import division
import sys
import bz2
import string
import hashlib
try:
import pefile
HAVE_PEFILE = True
except ImportError:
HAVE_PEFILE = False
try:
import bitstring
HAVE_BITSTRING = True
except ImportError:
HAVE_BITSTRING = False
from lib.common.out import *
def calculate_pehash(file_path=None):
if not HAVE_PEFILE:
self.log('error', "Missing dependency, install pefile (`pip install pefile`)")
return ''
if not HAVE_BITSTRING:
self.log('error', "Missing dependency, install bitstring (`pip install bitstring`)")
return ''
if not file_path:
return ''
try:
exe = pefile.PE(file_path)
#image characteristics
img_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Characteristics))
#pad to 16 bits
img_chars = bitstring.BitArray(bytes=img_chars.tobytes())
img_chars_xor = img_chars[0:8] ^ img_chars[8:16]
#start to build pehash
pehash_bin = bitstring.BitArray(img_chars_xor)
#subsystem -
sub_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Machine))
#pad to 16 bits
sub_chars = bitstring.BitArray(bytes=sub_chars.tobytes())
sub_chars_xor = sub_chars[0:8] ^ sub_chars[8:16]
pehash_bin.append(sub_chars_xor)
#Stack Commit Size
stk_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfStackCommit))
stk_size_bits = string.zfill(stk_size.bin, 32)
#now xor the bits
stk_size = bitstring.BitArray(bin=stk_size_bits)
stk_size_xor = stk_size[8:16] ^ stk_size[16:24] ^ stk_size[24:32]
#pad to 8 bits
stk_size_xor = bitstring.BitArray(bytes=stk_size_xor.tobytes())
pehash_bin.append(stk_size_xor)
#Heap Commit Size
hp_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfHeapCommit))
hp_size_bits = string.zfill(hp_size.bin, 32)
#now xor the bits
hp_size = bitstring.BitArray(bin=hp_size_bits)
hp_size_xor = hp_size[8:16] ^ hp_size[16:24] ^ hp_size[24:32]
#pad to 8 bits
hp_size_xor = bitstring.BitArray(bytes=hp_size_xor.tobytes())
pehash_bin.append(hp_size_xor)
#Section chars
for section in exe.sections:
#virutal address
sect_va = bitstring.BitArray(hex(section.VirtualAddress))
sect_va = bitstring.BitArray(bytes=sect_va.tobytes())
sect_va_bits = sect_va[8:32]
pehash_bin.append(sect_va_bits)
#rawsize
sect_rs = bitstring.BitArray(hex(section.SizeOfRawData))
sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
sect_rs_bits = string.zfill(sect_rs.bin, 32)
sect_rs = bitstring.BitArray(bin=sect_rs_bits)
sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
sect_rs_bits = sect_rs[8:32]
pehash_bin.append(sect_rs_bits)
#section chars
sect_chars = bitstring.BitArray(hex(section.Characteristics))
sect_chars = bitstring.BitArray(bytes=sect_chars.tobytes())
sect_chars_xor = sect_chars[16:24] ^ sect_chars[24:32]
pehash_bin.append(sect_chars_xor)
#entropy calulation
address = section.VirtualAddress
size = section.SizeOfRawData
raw = exe.write()[address+size:]
if size == 0:
kolmog = bitstring.BitArray(float=1, length=32)
pehash_bin.append(kolmog[0:8])
continue
bz2_raw = bz2.compress(raw)
bz2_size = len(bz2_raw)
#k = round(bz2_size / size, 5)
k = bz2_size / size
kolmog = bitstring.BitArray(float=k, length=32)
pehash_bin.append(kolmog[0:8])
m = hashlib.sha1()
m.update(pehash_bin.tobytes())
return m.hexdigest()
except:
return "ERROR not PE"
``` |
{
"source": "6un9-h0-Dan/crits",
"score": 2
} |
#### File: management/commands/create_default_collections.py
```python
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from optparse import make_option
from create_indexes import create_indexes
from create_locations import add_location_objects
from setconfig import create_config_if_not_exist
from create_default_dashboard import create_dashboard
from crits.core.crits_mongoengine import Action
from crits.core.user_role import UserRole
from crits.domains.domain import TLD
from crits.raw_data.raw_data import RawDataType
from crits.signatures.signature import SignatureType
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option('--drop',
'-d',
dest='drop',
action="store_true",
default=False,
help='Drop existing content before adding.'),
)
help = 'Creates default CRITs collections in MongoDB.'
def handle(self, *args, **options):
"""
Script Execution.
"""
drop = options.get('drop')
if drop:
print "Dropping enabled. Will drop content before adding!"
else:
print "Drop protection enabled. Will not drop existing content!"
populate_user_roles(drop)
populate_actions(drop)
populate_raw_data_types(drop)
populate_signature_types(drop)
# The following will always occur with every run of this script:
# - tlds are based off of a Mozilla TLD list so it should never
# contain entries outside of the ones provided.
populate_tlds(drop)
add_location_objects(drop)
create_dashboard(drop)
create_config_if_not_exist()
create_indexes()
def populate_user_roles(drop):
"""
Populate default set of user roles into the system.
:param drop: Drop the existing collection before trying to populate.
:type: boolean
"""
# define your user roles here
# note: you MUST have Administrator, Read Only, and a third option
# available!
user_roles = ['Administrator', 'Analyst', 'Read Only']
if drop:
UserRole.drop_collection()
if len(UserRole.objects()) < 1:
for role in user_roles:
ur = UserRole()
ur.name = role
ur.save()
print "User Roles: added %s roles!" % len(user_roles)
else:
print "User Roles: existing documents detected. skipping!"
def populate_actions(drop):
"""
Populate default set of Actions into the system.
:param drop: Drop the existing collection before trying to populate.
:type: boolean
"""
# define your Actions here
actions = ['Blocked Outbound At Firewall', 'Blocked Outbound At Desktop Firewall']
if drop:
Action.drop_collection()
if len(Action.objects()) < 1:
for action in actions:
ia = Action()
ia.name = action
ia.save()
print "Actions: added %s actions!" % len(actions)
else:
print "Actions: existing documents detected. skipping!"
def populate_raw_data_types(drop):
"""
Populate default set of raw data types into the system.
:param drop: Drop the existing collection before trying to populate.
:type: boolean
"""
# define your raw data types here
data_types = ['Text', 'JSON']
if drop:
RawDataType.drop_collection()
if len(RawDataType.objects()) < 1:
for data_type in data_types:
dt = RawDataType()
dt.name = data_type
dt.save()
print "Raw Data Types: added %s types!" % len(data_types)
else:
print "Raw Data Types: existing documents detected. skipping!"
def populate_signature_types(drop):
"""
Populate default set of signature types into the system.
:param drop: Drop the existing collection before trying to populate.
:type: boolean
"""
# define your signature types here
data_types = ['Bro', 'Snort', 'Yara']
if drop:
SignatureType.drop_collection()
if len(SignatureType.objects()) < 1:
for data_type in data_types:
dt = SignatureType()
dt.name = data_type
dt.save()
print "Signature Types: added %s types!" % len(data_types)
else:
print "Signature Types: existing documents detected. skipping!"
def populate_tlds(drop):
"""
Populate default set of TLDs into the system.
:param drop: Drop the existing collection before trying to populate.
:type: boolean
"""
if not drop:
print "Drop protection does not apply to effective TLDs"
TLD.drop_collection()
f = os.path.join(settings.SITE_ROOT, '..', 'extras', 'effective_tld_names.dat')
count = 0
for line in open(f, 'r').readlines():
line = line.strip()
if line and not line.startswith('//'):
line = line.replace("*.", "")
TLD.objects(tld=line).update_one(set__tld=line, upsert=True)
count += 1
print "Effective TLDs: added %s TLDs!" % count
``` |
{
"source": "6un9-h0-Dan/cti-python-stix2",
"score": 2
} |
#### File: test/v21/test_deterministic_ids.py
```python
from collections import OrderedDict
import datetime
import uuid
import pytest
import six
import stix2.base
import stix2.canonicalization.Canonicalize
import stix2.exceptions
from stix2.properties import (
BooleanProperty, DictionaryProperty, EmbeddedObjectProperty,
ExtensionsProperty, FloatProperty, HashesProperty, IDProperty,
IntegerProperty, ListProperty, StringProperty, TimestampProperty,
TypeProperty,
)
import stix2.v21
SCO_DET_ID_NAMESPACE = uuid.UUID("00abedb4-aa42-466c-9c01-fed23315a9b7")
def _uuid_from_id(id_):
dd_idx = id_.index("--")
uuid_str = id_[dd_idx+2:]
uuid_ = uuid.UUID(uuid_str)
return uuid_
def _make_uuid5(name):
"""
Make a STIX 2.1+ compliant UUIDv5 from a "name".
"""
if six.PY3:
uuid_ = uuid.uuid5(SCO_DET_ID_NAMESPACE, name)
else:
uuid_ = uuid.uuid5(
SCO_DET_ID_NAMESPACE, name.encode("utf-8"),
)
return uuid_
def test_no_contrib_props_defined():
class SomeSCO(stix2.v21._Observable):
_type = "some-sco"
_properties = OrderedDict((
('type', TypeProperty(_type, spec_version='2.1')),
('id', IDProperty(_type, spec_version='2.1')),
(
'extensions', ExtensionsProperty(
spec_version='2.1', enclosing_type=_type,
),
),
))
_id_contributing_properties = []
sco = SomeSCO()
uuid_ = _uuid_from_id(sco["id"])
assert uuid_.variant == uuid.RFC_4122
assert uuid_.version == 4
def test_json_compatible_prop_values():
class SomeSCO(stix2.v21._Observable):
_type = "some-sco"
_properties = OrderedDict((
('type', TypeProperty(_type, spec_version='2.1')),
('id', IDProperty(_type, spec_version='2.1')),
(
'extensions', ExtensionsProperty(
spec_version='2.1', enclosing_type=_type,
),
),
('string', StringProperty()),
('int', IntegerProperty()),
('float', FloatProperty()),
('bool', BooleanProperty()),
('list', ListProperty(IntegerProperty())),
('dict', DictionaryProperty(spec_version="2.1")),
))
_id_contributing_properties = [
'string', 'int', 'float', 'bool', 'list', 'dict',
]
obj = {
"string": "abc",
"int": 1,
"float": 1.5,
"bool": True,
"list": [1, 2, 3],
"dict": {"a": 1, "b": [2], "c": "three"},
}
sco = SomeSCO(**obj)
can_json = stix2.canonicalization.Canonicalize.canonicalize(obj, utf8=False)
expected_uuid5 = _make_uuid5(can_json)
actual_uuid5 = _uuid_from_id(sco["id"])
assert actual_uuid5 == expected_uuid5
def test_json_incompatible_timestamp_value():
class SomeSCO(stix2.v21._Observable):
_type = "some-sco"
_properties = OrderedDict((
('type', TypeProperty(_type, spec_version='2.1')),
('id', IDProperty(_type, spec_version='2.1')),
(
'extensions', ExtensionsProperty(
spec_version='2.1', enclosing_type=_type,
),
),
('timestamp', TimestampProperty()),
))
_id_contributing_properties = ['timestamp']
ts = datetime.datetime(1987, 1, 2, 3, 4, 5, 678900)
sco = SomeSCO(timestamp=ts)
obj = {
"timestamp": "1987-01-02T03:04:05.6789Z",
}
can_json = stix2.canonicalization.Canonicalize.canonicalize(obj, utf8=False)
expected_uuid5 = _make_uuid5(can_json)
actual_uuid5 = _uuid_from_id(sco["id"])
assert actual_uuid5 == expected_uuid5
def test_embedded_object():
class SubObj(stix2.base._STIXBase):
_type = "sub-object"
_properties = OrderedDict((
('value', StringProperty()),
))
class SomeSCO(stix2.v21._Observable):
_type = "some-sco"
_properties = OrderedDict((
('type', TypeProperty(_type, spec_version='2.1')),
('id', IDProperty(_type, spec_version='2.1')),
(
'extensions', ExtensionsProperty(
spec_version='2.1', enclosing_type=_type,
),
),
('sub_obj', EmbeddedObjectProperty(type=SubObj)),
))
_id_contributing_properties = ['sub_obj']
sub_obj = SubObj(value="foo")
sco = SomeSCO(sub_obj=sub_obj)
obj = {
"sub_obj": {
"value": "foo",
},
}
can_json = stix2.canonicalization.Canonicalize.canonicalize(obj, utf8=False)
expected_uuid5 = _make_uuid5(can_json)
actual_uuid5 = _uuid_from_id(sco["id"])
assert actual_uuid5 == expected_uuid5
def test_empty_hash():
class SomeSCO(stix2.v21._Observable):
_type = "some-sco"
_properties = OrderedDict((
('type', TypeProperty(_type, spec_version='2.1')),
('id', IDProperty(_type, spec_version='2.1')),
(
'extensions', ExtensionsProperty(
spec_version='2.1', enclosing_type=_type,
),
),
('hashes', HashesProperty()),
))
_id_contributing_properties = ['hashes']
with pytest.raises(stix2.exceptions.InvalidValueError):
SomeSCO(hashes={})
@pytest.mark.parametrize(
"json_escaped, expected_unescaped", [
("", ""),
("a", "a"),
(r"\n", "\n"),
(r"\n\r\b\t\\\/\"", "\n\r\b\t\\/\""),
(r"\\n", r"\n"),
(r"\\\n", "\\\n"),
],
)
def test_json_unescaping(json_escaped, expected_unescaped):
actual_unescaped = stix2.base._un_json_escape(json_escaped)
assert actual_unescaped == expected_unescaped
def test_json_unescaping_bad_escape():
with pytest.raises(ValueError):
stix2.base._un_json_escape(r"\x")
def test_deterministic_id_same_extra_prop_vals():
email_addr_1 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
email_addr_2 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
assert email_addr_1.id == email_addr_2.id
uuid_obj_1 = uuid.UUID(email_addr_1.id[-36:])
assert uuid_obj_1.variant == uuid.RFC_4122
assert uuid_obj_1.version == 5
uuid_obj_2 = uuid.UUID(email_addr_2.id[-36:])
assert uuid_obj_2.variant == uuid.RFC_4122
assert uuid_obj_2.version == 5
def test_deterministic_id_diff_extra_prop_vals():
email_addr_1 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
email_addr_2 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
assert email_addr_1.id == email_addr_2.id
uuid_obj_1 = uuid.UUID(email_addr_1.id[-36:])
assert uuid_obj_1.variant == uuid.RFC_4122
assert uuid_obj_1.version == 5
uuid_obj_2 = uuid.UUID(email_addr_2.id[-36:])
assert uuid_obj_2.variant == uuid.RFC_4122
assert uuid_obj_2.version == 5
def test_deterministic_id_diff_contributing_prop_vals():
email_addr_1 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
email_addr_2 = stix2.v21.EmailAddress(
value="<EMAIL>",
display_name="<NAME>",
)
assert email_addr_1.id != email_addr_2.id
uuid_obj_1 = uuid.UUID(email_addr_1.id[-36:])
assert uuid_obj_1.variant == uuid.RFC_4122
assert uuid_obj_1.version == 5
uuid_obj_2 = uuid.UUID(email_addr_2.id[-36:])
assert uuid_obj_2.variant == uuid.RFC_4122
assert uuid_obj_2.version == 5
def test_deterministic_id_no_contributing_props():
email_msg_1 = stix2.v21.EmailMessage(
is_multipart=False,
)
email_msg_2 = stix2.v21.EmailMessage(
is_multipart=False,
)
assert email_msg_1.id != email_msg_2.id
uuid_obj_1 = uuid.UUID(email_msg_1.id[-36:])
assert uuid_obj_1.variant == uuid.RFC_4122
assert uuid_obj_1.version == 4
uuid_obj_2 = uuid.UUID(email_msg_2.id[-36:])
assert uuid_obj_2.variant == uuid.RFC_4122
assert uuid_obj_2.version == 4
def test_id_gen_recursive_dict_conversion_1():
file_observable = stix2.v21.File(
name="example.exe",
size=68 * 1000,
magic_number_hex="50000000",
hashes={
"SHA-256": "841a8921140aba50671ebb0770fecc4ee308c4952cfeff8de154ab14eeef4649",
},
extensions={
"windows-pebinary-ext": stix2.v21.WindowsPEBinaryExt(
pe_type="exe",
machine_hex="014c",
sections=[
stix2.v21.WindowsPESection(
name=".data",
size=4096,
entropy=7.980693,
hashes={"SHA-256": "6e3b6f3978e5cd96ba7abee35c24e867b7e64072e2ecb22d0ee7a6e6af6894d0"},
),
],
),
},
)
assert file_observable.id == "file--ced31cd4-bdcb-537d-aefa-92d291bfc11d"
def test_id_gen_recursive_dict_conversion_2():
wrko = stix2.v21.WindowsRegistryKey(
values=[
stix2.v21.WindowsRegistryValueType(
name="Foo",
data="qwerty",
),
stix2.v21.WindowsRegistryValueType(
name="Bar",
data="42",
),
],
)
assert wrko.id == "windows-registry-key--36594eba-bcc7-5014-9835-0e154264e588"
``` |
{
"source": "6un9-h0-Dan/f5-common-python",
"score": 2
} |
#### File: tm/ltm/cipher.py
```python
from f5.bigip.resource import Collection
from f5.bigip.resource import OrganizingCollection
from f5.bigip.resource import Resource
class Cipher(OrganizingCollection):
"""BIG-IP® LTM cipher collection"""
def __init__(self, ltm):
super(Cipher, self).__init__(ltm)
self._meta_data['allowed_lazy_attributes'] = [
Rules,
Groups
]
class Rules(Collection):
"""BIG-IP® cipher rule sub-collection"""
def __init__(self, cipher):
super(Rules, self).__init__(cipher)
self._meta_data['allowed_lazy_attributes'] = [Rule]
self._meta_data['attribute_registry'] =\
{'tm:ltm:cipher:rule:rulestate': Rule}
class Rule(Resource):
"""BIG-IP® cipher rule sub-collection resource"""
def __init__(self, rule_s):
super(Rule, self).__init__(rule_s)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:ltm:cipher:rule:rulestate'
class Groups(Collection):
"""BIG-IP® cipher group sub-collection"""
def __init__(self, cipher):
super(Groups, self).__init__(cipher)
self._meta_data['allowed_lazy_attributes'] = [Group]
self._meta_data['attribute_registry'] =\
{'tm:ltm:cipher:group:groupstate': Group}
class Group(Resource):
"""BIG-IP® cipher group sub-collection resource"""
def __init__(self, group_s):
super(Group, self).__init__(group_s)
self._meta_data['required_creation_parameters'].update(('partition',))
self._meta_data['required_json_kind'] =\
'tm:ltm:cipher:group:groupstate'
```
#### File: test/unit/test_cipher.py
```python
import mock
import pytest
from f5.bigip import ManagementRoot
from f5.bigip.tm.ltm.cipher import Group
from f5.bigip.tm.ltm.cipher import Rule
from f5.sdk_exception import MissingRequiredCreationParameter
@pytest.fixture
def FakeCipherRule():
fake_rule_s = mock.MagicMock()
fake_rule = Rule(fake_rule_s)
return fake_rule
@pytest.fixture
def FakeCipherGroup():
fake_group_s = mock.MagicMock()
fake_group = Group(fake_group_s)
return fake_group
class TestCipherRuleCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
r1 = b.tm.ltm.cipher.rules.rule
r2 = b.tm.ltm.cipher.rules.rule
assert r1 is not r2
def test_create_no_args(self, FakeCipherRule):
with pytest.raises(MissingRequiredCreationParameter):
FakeCipherRule.create()
class TestCipherGroupCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
g1 = b.tm.ltm.cipher.groups.group
g2 = b.tm.ltm.cipher.groups.group
assert g1 is not g2
def test_create_no_args(self, FakeCipherGroup):
with pytest.raises(MissingRequiredCreationParameter):
FakeCipherGroup.create()
``` |
{
"source": "6un9-h0-Dan/malchive",
"score": 2
} |
#### File: malchive/active_discovery/meterpreter_reverse_shell.py
```python
import logging
import struct
import hashlib
import requests
from malchive.helpers import discovery
from string import ascii_letters, digits
from random import sample, random
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
MAX_PAYLOAD_SIZE = 200000
# tested using metasploit v4.17.15-dev-
class MeterpreterReverseShell(discovery.Discover):
def tickle_socket(self, co, timeout):
"""
Probe the server for data and make a determination based on
send/recv traffic.
"""
payload = bytearray()
s = self.connect(co, timeout)
# size of incoming payload
data = s.recv(4)
size = struct.unpack('I', data)[0]
log.info('Response received! Waiting for %s byte payload'
'...' % size)
if size > MAX_PAYLOAD_SIZE:
log.info('Payload size exceeded maximum.')
return co
while len(payload) < size:
data = s.recv(size)
if not data:
break
payload += data
if self.check_payload:
co.details['payload'] = payload
co.success = True
return co
def tickle_http(self, co, timeout):
location = self.gen_msf_uri()
url = f"{co.protocol}://{co.ip}:{co.port}/{location}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/79.0.3945.117 Safari/537.36'
}
log.info('Making attempt using: %s' % url)
resp = requests.get(url, allow_redirects=True,
headers=headers, timeout=timeout)
if resp.status_code == 200:
payload = resp.content
if self.check_payload(payload):
co.details['payload'] = payload
co.success = True
return co
def checksum8(self, string: str) -> int:
"""
Calculate the 8-bit checksum for the given string. Taken from:
https://www.veil-framework.com/veil-framework-2-4-0-reverse-http/
"""
return sum([ord(char) for char in string]) % 0x100
def gen_msf_uri(self) -> str:
"""
Generate a MSF compatible URI. Taken from:
https://www.veil-framework.com/veil-framework-2-4-0-reverse-http/
"""
charset = ascii_letters + digits
msf_uri = ''
for x in range(64):
uri = ''.join(sample(charset, 3))
r = ''.join(sorted(list(charset), key=lambda *args: random()))
for char in r:
# URI_CHECKSUM_INITW (Windows)
if self.checksum8(uri + char) == 92:
msf_uri = uri + char
return msf_uri
def check_payload(self, payload):
parameters = [
b'core_channel_open',
b'core_channel_write',
b'core_channel_close',
b'core_channel_read',
b'core_channel_seek',
]
if payload.startswith(b'MZ') and \
all(p in payload for p in parameters):
return True
else:
return False
def write_payload(self, payload, directory='.'):
"""
Write the retrieved payload to disk.
"""
md5 = hashlib.md5(payload).hexdigest()
fname = '%s/%s.msf' % (directory, md5)
with open(fname, 'wb') as f:
f.write(payload)
log.info('%s written to disk!' % fname)
def initialize_parser():
parser = discovery.generic_args()
parser.add_argument('-w', '--write', action='store_true',
default=False,
help='Write retrieved meterpreter payloads to disk '
'using [MD5.msf] as the filename.')
parser.add_argument('-t', '--target-directory', type=str, default='.',
help='Target directory to write files. Defaults to '
'executing directory.')
return parser
def main():
import os
import sys
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
directory = args.target_directory
if directory != '.':
if not os.path.isdir(directory):
log.error('Could not create %s. Exiting...'
% directory)
sys.exit(2)
d = MeterpreterReverseShell(
ips=args.ipaddress,
ports=args.port,
domains=args.domain,
timeout=args.timeout,
protocols=args.protocol,
)
d.run()
for co in d.results:
if co.success:
print('Successfully discovered candidate! [%s] %s:%s' %
(co.protocol, co.ip, co.port))
if args.write and 'payload' in co.details.keys():
d.write_payload(co.details['payload'], directory)
if __name__ == '__main__':
main()
```
#### File: malchive/active_discovery/spivy.py
```python
import logging
import struct
import camellia
import argparse
from malchive.helpers import discovery
from random import choice, randint
from string import digits
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class SecurePoisonIvy(discovery.Discover):
def __init__(self,
key: str = None,
*args,
**kwargs):
self.key: str = ''
self.parse_key(key)
log.debug('Using supplied key: \'%s\'' % key)
self.challenge_response = bytearray()
super().__init__(*args, **kwargs)
def parse_key(self, key):
key_size = 32
if len(key) > key_size:
raise argparse.ArgumentError(
None,
'The supplied key is too long!'
)
if len(key) == 0:
raise argparse.ArgumentError(
None,
'Need to supply a key!'
)
if len(key) < key_size:
key += '\x00' * (key_size - len(key))
self.key = key
def craft_payload(self):
"""
Craft an SPIVY packet mimicking the beginning of the challenge
request/response chain. Save the expected response.
"""
junk_size = randint(1, 16)
junk_data = bytearray(
[
choice([i for i in range(0, 256)])
for i in range(0, junk_size)
])
challenge_request = bytes(
[
choice([i for i in range(0, 256)])
for i in range(0, 256)
])
payload = \
struct.pack('B', junk_size) + \
junk_data + \
struct.pack('B', (junk_size*2 & 0xff)) + \
challenge_request
c = camellia.CamelliaCipher(bytes(self.key.encode('utf-8')),
mode=camellia.MODE_ECB)
self.challenge_response = c.encrypt(challenge_request)
return payload
def validate_response(self, response):
"""
We can just take the last 0x100 bytes from the challenge
response and compare rather than parsing the packet.
"""
return self.challenge_response == response[-0x100:]
def tickle_http(self, co, timeout=5):
"""
Probe the server for data and make a determination based on
request/response.
"""
payload = self.craft_payload()
# informed by observations of traffic, we fuzz here
cookie_id = ''.join(
[
choice(digits)
for i in range(0, 17)
])
# informed by observations of traffic, we fuzz here again
uri_string = ''.join(
[
choice(digits)
for i in range(0, 16)
])
url = f"{co.protocol}://{co.ip}:{co.port}/{uri_string}"
http_data = f"POST {url} HTTP/1.1\r\n" \
f"Cookie: id={cookie_id}\r\n" \
f"Content-Length: {str(len(payload))}\r\n" \
f"\r\n"
http_request = http_data.encode('utf-8') + payload
# SPIVY 'technically' just uses HTTP over a TCP socket and
# does not have an 'Accept-Encoding' header. Components of
# the requests library force this downstream...
s = self.connect(co, timeout)
s.sendall(http_request)
response = s.recv(512)
if len(response) > 0x100:
if self.validate_response(response):
co.success = True
log.info('Positive match for SPIVY controller!')
else:
log.info('Retrieved data not an SPIVY challenge response.')
else:
log.info('Retrieved data too short for SPIVY response.')
return co
def tickle_socket(self, co, timeout=5):
"""
Probe the server for data and make a determination based on
send/recv traffic.
"""
s = self.connect(co, timeout)
payload = self.craft_payload()
s.send(payload)
response = s.recv(512)
if len(response) > 0x100:
if self.validate_response(response):
co.success = True
log.info('Positive match for SPIVY controller!')
else:
log.info('Retrieved data not an SPIVY challenge response.')
else:
log.info('Retrieved data too short for SPIVY response.')
return co
def initialize_parser():
parser = discovery.generic_args()
parser.add_argument('-k', '--key', default='admin',
help='The key used to crypt traffic. Default: admin')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
d = SecurePoisonIvy(
key=args.key,
ips=args.ipaddress,
ports=args.port,
domains=args.domain,
timeout=args.timeout,
protocols=args.protocol,
)
d.run()
for co in d.results:
if co.success:
print('Successfully discovered candidate! [%s] %s:%s' %
(co.protocol, co.ip, co.port))
if __name__ == '__main__':
main()
```
#### File: malchive/extras/active_discovery_template.py
```python
import logging
import requests
from malchive.helpers import discovery
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class ExampleTemplate(discovery.Discover):
def tickle_http(self, co, timeout):
"""
Probe the server for data and make a determination based on
request/response.
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/34.0.1847.116 Safari/537.36'
}
url = f"{co.protocol}://{co.ip}:{co.port}"
r = requests.get(url, headers=headers, timeout=timeout)
if r.status_code == 200:
co.success = True
return co
def tickle_socket(self, co, timeout=5):
"""
Probe the server for data and make a determination based on
send/recv traffic.
"""
s = self.connect(co, timeout)
data = s.recv(0x100)
if len(data):
# if we validated the response, great set success to true
co.success = True
# details is a dictionary where we can capture
# any additional items of interest if desired
co.details['size'] = len(data)
else:
log.info('Retrieved data did not match desired parameters.')
return co
def initialize_parser():
parser = discovery.generic_args()
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
d = ExampleTemplate(
ips=args.ipaddress,
ports=args.port,
domains=args.domain,
timeout=args.timeout,
protocols=args.protocol,
)
d.run()
for co in d.results:
if co.success:
print('Successfully discovered candidate! [%s] %s:%s' %
(co.protocol, co.ip, co.port))
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/apihash.py
```python
import os
import sys
import sqlite3
import logging
import pefile
import argparse
import binascii
import datetime
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
def ror(data, key):
return (data >> key | data << (32 - key)) & 0xFFFFFFFF
def gen_standard_hash(func, key):
"""
Perform standard hashing algorithm commonly observed in practice.
:param bytearray func: Name of the function to hash.
:param int key: Number for rotations to perform.
:return: hash
:rtype: int
"""
h = 0
for c in func:
h = c + ror(h, key) & 0xFFFFFFFF
return h
# Ref:
# https://github.com/rapid7/metasploit-framework/blob/master/external/source/shellcode/windows/x86/src/hash.py
def convert_unicode(string, uppercase=True):
result = ""
if uppercase:
string = string.upper()
for c in string:
result += c + "\x00"
return result
# Ref:
# https://github.com/rapid7/metasploit-framework/blob/master/external/source/shellcode/windows/x86/src/hash.py
def gen_metasploit_hash(lib, func, key):
"""
Perform Metasploit's hashing algorithm.
:param bytearray lib: Name of the library associated with function.
Used in hash calculation.
:param bytearray func: Name of the function to hash.
:param int key: Number for rotations to perform.
:return: hash
:rtype: int
"""
module_hash = 0
function_hash = 0
for c in convert_unicode(lib + "\x00"):
module_hash = ror(module_hash, key)
module_hash += ord(c)
for c in bytes(func + b'\x00'):
function_hash = ror(function_hash, key)
function_hash += c
h = module_hash + function_hash & 0xFFFFFFFF
return h
def gen_crc32_hash(func):
"""
Perform a simple CRC32 computation of the supplied function name.
:param str func: Name of the function to hash.
:return: hash
:rtype: int
"""
h = binascii.crc32(func)
if h > 0x7FFFFFFF:
h -= 0x100000000 & 0xffffffff
return h
def gen_js_hash(func):
"""
Perform JSHash computation of the supplied function name.
:param bytearray func: Name of the function to hash.
:return: hash
:rtype: int
"""
h = 1315423911
for c in func:
h ^= ((h << 5) + c + (h >> 2) & 0xFFFFFFFF)
return h
def gen_carberp_hash(func):
"""
Perform hash computation of function name using Carberp algorithm.
:param bytearray func: Name of the function to hash.
:return: hash
:rtype: int
"""
h = 0
for c in func:
h = ((h << 7) & 0xFFFFFFFE) | (h >> (32 - 7))
h = h ^ c
return h
def initialize_parser():
parser = argparse.ArgumentParser(
description='Generate an sqlite database of API hashes using '
'algorithms commonly observed in shellcode. Input files'
' must be valid Windows DLLs.')
parser.add_argument('dll', metavar='FILE', nargs='*',
help='Full path to the DLL(s) to be processed.')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when '
'processing (mostly for debugging purposes).')
parser.add_argument('-db', '--database-name', type=str,
default='apihashes.db',
help='Name of database file to be generated.'
' (default: apihashes.db)')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.dll) == 0:
log.error('No files were supplied. Please provide a DLL for hash '
'generation.')
p.print_help()
sys.exit(2)
for filename in args.dll:
basename = os.path.basename(filename)
log.info('Generating hashes for %s...' % basename)
if not os.path.isfile(filename):
log.warning('Failed to find file %s, skipping...' % filename)
continue
f = open(filename, 'rb')
stream = f.read()
symbols = []
try:
pe = pefile.PE(data=stream)
symbols = pe.DIRECTORY_ENTRY_EXPORT.symbols
except pefile.PEFormatError:
log.error('%s not a pe, skipping...' % basename)
continue
# Generate hashes as a list of tuples
entries = []
for exp in symbols:
if exp.name is None:
continue
entries.append(('Standard ROR 0x7', basename,
bytes(exp.name).decode('ascii'),
gen_standard_hash(exp.name, 0x7)))
entries.append(('Standard ROR 0xd', basename,
bytes(exp.name).decode('ascii'),
gen_standard_hash(exp.name, 0xd)))
entries.append(('Metasploit ROR 0xd', basename,
bytes(exp.name).decode('ascii'),
gen_metasploit_hash(basename, exp.name, 0xd)))
entries.append(('CRC32', basename, bytes(exp.name).decode('ascii'),
gen_crc32_hash(exp.name)))
# I've seen this twist as well where the null byte
# is part of the computation for crc32
entries.append(('CRC32', basename,
(bytes(exp.name + b'\\x00')).decode('ascii'),
gen_crc32_hash(bytes(exp.name + b'\x00'))))
entries.append(('JSHash', basename,
bytes(exp.name).decode('ascii'),
gen_js_hash(exp.name)))
entries.append(('Carberp', basename,
bytes(exp.name).decode('ascii'),
gen_carberp_hash(exp.name)))
if len(entries) == 0:
log.info('No export entries were found')
continue
log.info('Found %s export entries...' % len(symbols))
log.info('Adding %s hashes to database...' % len(entries))
start = datetime.datetime.now()
conn = sqlite3.connect(args.database_name)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS apihashes
(Algorithm text, Module text, Function text, Hash int)
""")
cursor.executemany("INSERT INTO apihashes (Algorithm, Module, "
"Function, Hash) VALUES (?, ?, ?, ?)",
entries)
conn.commit()
cursor.close()
conn.close()
end = datetime.datetime.now()
log.info('Inserted %s new entries in %s...' %
(len(entries), end - start))
print('Complete! Any computed hashes saved to %s' % args.database_name)
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/comguidtoyara.py
```python
import re
import os
import sys
import struct
import binascii
import logging
import argparse
import progressbar
from datetime import datetime
from Registry import Registry
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
def iid_text_to_bin(iid):
"""
Process an IID and convert to a YARA compliant search string.
Below describes the GUID structure used to describe an identifier
for a MAPI interface:
https://msdn.microsoft.com/en-us/library/office/cc815892.aspx
:param str iid: Name of the IID to convert
:return: bin_yara
:rtype: str
"""
# remove begin and end brackets
guid = re.sub('[{}-]', '', iid)
# convert to binary representation
bin_struc = struct.unpack("IHH8B", binascii.a2b_hex(guid))
bin_str = '%.8X%.4X%.4X%s' % \
(bin_struc[0], bin_struc[1], bin_struc[2],
(''.join('{:02X}'.format(x) for x in bin_struc[3:])))
# create YARA compliant search string
bin_yara = '{ ' + ' '.join(a + b for a, b in
zip(bin_str[::2], bin_str[1::2])) + ' }'
return bin_yara
def enumerate_com_interfaces(reg_keys, show_bar=False):
"""
Iterate through registry keys and retrieve unique interface identifiers
and their name.
:param list reg_keys: List of registry key objects from python-registry
module.
:param bool show_bar: Show progressbar as subfiles are identified.
:param bytes buff: File to look for subfiles.
:return: com
:rtype: dict
"""
total_iters = 0
counter = 0
com = {}
for key in reg_keys:
total_iters += len(key.subkeys())
if show_bar:
print('Processing %s results...' % total_iters)
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=total_iters)
for key in reg_keys:
for subkey in key.subkeys():
for v in list(subkey.values()):
# Per MS documentation, Interface names must start with the
# 'I' prefix, so we limit our values here as well.
# Not doing so can lead to some crazy names and conflicting
# results!
# https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/names-of-classes-structs-and-interfaces
if v.value_type() == Registry.RegSZ \
and v.name() == '(default)' \
and v.value().startswith('I'):
bin_guid = iid_text_to_bin(subkey.name())
# Names with special characters/spaces are truncated
stop_chars = ['_', '<', '[', ' ']
index = min(v.value().find(i)
if i in v.value()
else
len(v.value())
for i in stop_chars)
value = v.value()[:index]
if value not in com:
com[value] = [bin_guid]
elif bin_guid not in com[value]:
com[value].append(bin_guid)
if show_bar:
bar.update(counter)
counter += 1
if show_bar:
bar.finish()
return com
def initialize_parser():
parser = argparse.ArgumentParser(
description="Crawls windows registry to hunt for and convert IIDs for "
"COM interfaces to binary YARA signatures. The submitted "
"hives must be from HKLM\\SOFTWARE. Make copies of "
"these files off an active Windows OS using the command "
"'reg save HKLM\\SOFTWARE hklm_sft.hiv' when running as "
"administrator.")
parser.add_argument('hive', metavar='FILE', nargs='*',
help='Full path to the registry hive to be processed.')
parser.add_argument('-o', '--output-filename', type=str,
default='com_interface_ids.yara',
help='Filename to write YARA signatures '
'to (default: com_interface_ids.yara)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.hive) == 0:
p.print_help()
sys.exit(2)
keys = []
for hive in args.hive:
print('Collecting IIDs from %s...' % hive)
if not os.path.isfile(hive):
log.warning('Failed to find file %s. Skipping...' % hive)
continue
try:
reg = Registry.Registry(hive)
except Registry.RegistryParse.ParseException:
log.warning('Error parsing %s. Skipping...' % hive)
continue
try:
keys.append(reg.open("Classes\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Interface' key in %s." % hive)
try:
keys.append(reg.open("Classes\\Wow6432Node\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Wow6432Node\\Interface\\ "
"key in %s." % hive)
com_signatures = enumerate_com_interfaces(keys, True)
counter = 0
total_rules = len(com_signatures)
print('Generating %s YARA signatures...' % total_rules)
bar = progressbar.ProgressBar(redirect_stdout=True, max_value=total_rules)
yara_rule = '// %s\n// COM IID YARA sig collection.\n// ' \
'Autogenerated on %s\n\n' % (__author__, datetime.now())
for name, rules in com_signatures.items():
yara_rule += 'rule %s\n{\n\t' \
'strings:' % name
if len(rules) > 1:
for i in range(0, len(rules)):
yara_rule += '\n\t\t$%s_%s = %s' % (name, i, rules[i])
else:
yara_rule += '\n\t\t$%s = %s' % (name, rules[0])
yara_rule += '\n\tcondition:\n\t\tany of them\n}\n'
bar.update(counter)
counter += 1
bar.finish()
print('Writing YARA rules to %s' % args.output_filename)
with open(args.output_filename, 'w') as f:
f.write(yara_rule)
f.close()
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/gensig.py
```python
import os
import sys
import json
import logging
import argparse
import binascii
import pefile
import hashlib
import difflib
from pathlib import Path
from capstone import Cs, CS_ARCH_X86, CS_MODE_16, CS_MODE_32, CS_MODE_64
from capstone.x86 import X86_OP_IMM, X86_OP_MEM
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
# Reference:
# https://wiki.osdev.org/X86-64_Instruction_Encoding#Legacy_Prefixes
MAX_PREFIX_SIZE = 4
def _to_yara_hex_string(b, wildcards=None):
"""
Generate a YARA compliant hex string that is consumable by YARA.
"""
b = binascii.hexlify(b).decode('utf-8')
hex_string = ' '.join(a + b for a, b in zip(b[::2], b[1::2]))
if wildcards is None:
return hex_string
hex_list = hex_string.split(' ')
for offset, size in wildcards:
# get rid of unnecessary ??'s at the end
if offset + size == len(hex_list):
hex_list = hex_list[:offset]
break
for i in range(offset, offset + size):
if i == offset:
hex_list[i] = '[%s]' % size
else:
hex_list[i] = '??'
# remove ??s now that we've identified them
hex_list = list(filter('??'.__ne__, hex_list))
# aesthetically '??' instead of '[1]' seems like a better output
hex_list = ['??' if x == '[1]' else x for x in hex_list]
return ' '.join(hex_list)
def _get_opcode_length(op):
"""
Get length of operand bytes.
"""
while not op[-1]:
# encountered bug for the following
# 00 04 31 add byte ptr [rcx + rsi], al
if len(op) == 1:
break
op.pop()
return len(op)
def generate_yara_rule(rule, name):
"""
Print a compliant YARA rule using supplied data.
:param str rule: YARA compliant hex string sequence.
:param str name: Name of the rule.
:return: str yara_syntax: Created YARA rule.
:rtype: str
"""
meta = ''
meta_filename = '%s/.gensig.json' % Path.home()
if os.path.isfile(meta_filename):
try:
with open(meta_filename) as f:
meta_json = json.load(f)
meta = 'meta:\n'
for k in meta_json:
meta += ' %s = \"%s\"\n' % (k, meta_json[k])
meta += '\n '
except json.JSONDecodeError:
log.error('JSON in %s not valid!' % meta_filename)
yara_syntax = 'rule %s\n{\n ' \
'%s' \
'strings:\n ' \
'$code = %s\n\n ' \
'condition:\n ' \
'$code\n}\n' % (name, meta, rule)
return yara_syntax
def generate_mnemonic(buff, mode):
"""
Return a mnemonic only result of the byte sequence.
:param bytes buff: Complete data stream.
:param int mode: Capstone hardware mode.
:return: YARA compliant hex string sequence.
:rtype: str
"""
md = Cs(CS_ARCH_X86, mode)
md.detail = True
mnemonic_tracker = []
for insn in md.disasm(buff, 0x0):
op_len = _get_opcode_length(insn.opcode)
offset = insn.address + op_len
r_size = len(insn.bytes) - op_len
mnemonic_tracker.append((offset, r_size))
hex_bytes = '{ ' + _to_yara_hex_string(buff, mnemonic_tracker) + ' }'
return hex_bytes
def generate_pic(buff, mode):
"""
Return a position independent result of the byte sequence.
:param bytes buff: Complete data stream.
:param int mode: Capstone hardware mode.
:return: YARA compliant hex string sequence.
:rtype: str
"""
md = Cs(CS_ARCH_X86, mode)
md.detail = True
relative_tracker = []
relative = False
offset = 0
for insn in md.disasm(buff, 0x0):
if relative:
r_size = insn.address - offset
relative_tracker.append((offset, r_size))
relative = False
if insn.op_count(X86_OP_IMM) == 1 or insn.op_count(X86_OP_MEM) == 1:
offset = insn.address + _get_opcode_length(insn.opcode)
relative = True
if insn.modrm > 0:
offset += 1
if insn.rex > 0:
offset += 1
if insn.sib > 0:
offset += 1
offset += MAX_PREFIX_SIZE - insn.prefix.count(0x0)
continue
if relative:
r_size = len(buff) - offset
relative_tracker.append((offset, r_size))
hex_bytes = '{ ' + _to_yara_hex_string(buff, relative_tracker) + ' }'
return hex_bytes
def gen_ndiff(a, b):
"""
Return a binary diff result of the byte sequence as a YARA signature.
:param list a: First buffer representing code block as hexlified bytes.
:param list b: Second buffer representing code block as hexlified bytes.
:return: YARA compliant hex string sequence.
:rtype: str
"""
diff = difflib.ndiff(a, b)
commons = []
start = 0
end = 0
start_code = None
end_code = None
for d in diff:
code = d[0:2]
seq = d[2:]
if code == ' ':
if start > 0 and start == end and len(commons) > 0:
commons.append('[%s]' % start)
if start > 0 and end == 0 and len(commons) > 0:
commons.append('[%s-%s]' % (end, start))
if 0 < start != end > 0 and len(commons) > 0:
commons.append('[%s-%s]' % (start, end))
start = 0
end = 0
commons.append(seq)
if code == '- ' and start == 0:
start_code = '- '
end_code = '+ '
elif code == '+ ' and start == 0:
start_code = '+ '
end_code = '- '
if code == start_code:
start += 1
if code == end_code:
end += 1
return '{ ' + ' '.join(commons) + ' }'
def show_asm(buff, mode, base):
"""
Return the given byte sequence as assembly under the given hardware mode.
:param bytes buff: Complete data stream.
:param int mode: Capstone hardware mode.
:param int base: Base address from which to start.
:return: Assembly code representation.
:rtype: str
"""
md = Cs(CS_ARCH_X86, mode)
md.detail = True
ret = ''
for insn in md.disasm(buff, base):
b = binascii.hexlify(insn.bytes).decode('utf-8')
b = ' '.join(a + b for a, b in zip(b[::2], b[1::2]))
if len(b) > 18:
b = b[:18] + '+'
ret += "{0:10} {1:20} {2:10} {3:10}\n".format(
'%08x:' % insn.address, b, insn.mnemonic, insn.op_str)
ret += '*/\n'
return ret
def generate_strict(buff):
"""
Return a literal interpretation of bytes as a YARA compliant hex string.
:param bytes buff: Complete data stream.
:return: YARA compliant hex string sequence.
:rtype: str
"""
hex_bytes = '{ ' + _to_yara_hex_string(buff) + ' }'
return hex_bytes
def autoint(x):
if x.startswith('0x'):
x = int(x, 16)
else:
x = int(x)
return x
def initialize_parser():
parser = argparse.ArgumentParser(
description='Generate YARA signature for x86 architectures based on '
'data passed. \nNumeric values may be provided as regular '
'integers or hexadecimal \nwith the \'0x\' prefix. \n\n'
'You can optionally create a file .gensig.json in your '
'\nhome directory with JSON data that can serve as a '
'template for \nrule \'meta\'.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-f', '--files', nargs='+', required=True,
help='Name of file(s) to process.')
parser.add_argument('-s', '--start-offset', nargs='+', type=autoint,
default=[0],
help='Starting point within the supplied buffer to '
'begin processing. One or more offsets may\n'
'be provided, and are processed sequentially '
'with respect the list of files. For virtual\n'
'address offsets, please see the '
'--force-virtual-address flag.')
parser.add_argument('-e', '--end-offset', nargs='+', type=autoint,
default=[0],
help='End point to stop processing. Multiple offsets '
'are processed in sequence with\n the list of '
'files. If this is not provided, it will default '
'to the length of the data\n minus the start '
'offset.')
parser.add_argument('-g', '--generator', type=str,
choices=['strict', 'pic', 'mnem', 'bdiff'],
default='strict',
help='Choose how the signatures is generated from '
'the supplied bytes. Defaults to \'strict\'.\n'
'\n* Strict - Literal interpretation of bytes to '
'generate signature.'
'\n* PIC - Position Independent Code (PIC) mode '
'attempts to wildcard immediate and memory type '
'operands.'
'\n* Mnemonic (mnem) - Only show bytes that '
'reflect the represented mnemonic instruction.'
'\n* Bindiff (bdiff) - Compute a diff on two '
'binary streams and produce a YARA compliant '
'regex.')
parser.add_argument('-m', '--mode', type=int, choices=[16, 32, 64],
default=32,
help='The hardware mode to use when creating the '
'signature. Relevant in PIC and Mnemonic modes\n'
'(Default: 32-bit).')
parser.add_argument('-r', '--rule-name', type=str, default='',
help='The name of the rule you wish to create. '
'Default is [generator]_code_[yara_string_md5]')
parser.add_argument('-va', '--force-virtual-address', action='store_true',
default=False,
help='Force interpretation of the provided offset '
'to be the a virtual address.')
parser.add_argument('--suppress-asm', action='store_true', default=False,
help='Suppress automatically generated assembly code.')
parser.add_argument('-w', '--write', action='store_true', default=False,
help='Write to disk using the rule name along '
'with the .yara file extension.')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
bindiffs = []
# some input checks for bindiffs
if args.generator == 'bdiff':
if len(args.start_offset) % 2 != 0 and args.start_offset[0] != 0 or \
len(args.end_offset) % 2 != 0 and args.end_offset[0] or \
len(args.files) % 2 != 0:
log.error('You must specify all file and offset arguments '
'for bdiff in pairs of two.')
sys.exit(2)
for i in range(0, len(args.files)):
filename = args.files[i]
log.info('Processing candidate %s...' % filename)
if not os.path.isfile(filename):
log.error('Failed to find file %s' % filename)
continue
f = open(filename, 'rb')
stream = f.read()
if args.start_offset[0] == 0:
start_offset = 0
else:
start_offset = args.start_offset[i]
if args.end_offset[0] == 0:
end_offset = len(stream) - start_offset
else:
end_offset = args.end_offset[i]
# get the base address before doing any necessary conversion
# to the offset
base_address = start_offset
if args.force_virtual_address:
try:
pe = pefile.PE(data=stream)
start_offset = pe.get_offset_from_rva(
start_offset - pe.OPTIONAL_HEADER.ImageBase)
end_offset = pe.get_offset_from_rva(
end_offset - pe.OPTIONAL_HEADER.ImageBase)
log.info('Processing using provided virtual address...')
except pefile.PEFormatError:
log.error('Invalid virtual address provided! Exiting...')
sys.exit(2)
else:
log.info('Processing as raw binary data...')
if start_offset >= end_offset or \
end_offset > len(stream) or \
start_offset > len(stream):
log.error('Invalid offset provided. '
'Please check your offset parameters.')
continue
buff = stream[start_offset:end_offset]
mode = 0
if args.mode == 16:
mode = CS_MODE_16
elif args.mode == 32:
mode = CS_MODE_32
elif args.mode == 64:
mode = CS_MODE_64
if not args.suppress_asm:
yara_syntax = '/*\nCandidate: %s\n---\n' % \
hashlib.md5(stream).hexdigest() \
+ show_asm(buff, mode, base_address)
else:
yara_syntax = '// Candidate: %s\n' % \
hashlib.md5(stream).hexdigest()
if args.generator == 'strict':
rule = generate_strict(buff)
elif args.generator == 'pic':
rule = generate_pic(buff, mode)
elif args.generator == 'bdiff':
s = binascii.hexlify(buff).decode('utf-8')
s = [s[i:i + 2] for i in range(0, len(s), 2)]
bindiffs.append((s, yara_syntax))
if len(bindiffs) == 2:
# bindiff based on pairs
rule = gen_ndiff(bindiffs[0][0], bindiffs[1][0])
# show asm from both pairs
yara_syntax = bindiffs[0][1] + bindiffs[1][1]
bindiffs = []
else:
continue
else:
rule = generate_mnemonic(buff, mode)
if len(args.rule_name) == 0:
name = '%s_code_%s' % \
(args.generator,
hashlib.md5(rule.encode('utf-8')).hexdigest())
else:
name = args.rule_name
yara_syntax += generate_yara_rule(rule, name)
print(yara_syntax)
if args.write:
fname = '%s.yara' % name
with open(fname, 'w+') as f:
f.write(yara_syntax)
log.info('%s written to disk.' % fname)
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/killaslr.py
```python
import os
import sys
import logging
import argparse
import pefile
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
# Reference:
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
dynamicbase_flag = 0x0040
def patch_aslr(pe):
"""
Disable ASLR protection from a binary.
:param pefile.PE pe: A pe object passed from the pefile project.
:return: List of stream containing the data.
:rtype: pefile.PE
"""
pe.OPTIONAL_HEADER.DllCharacteristics ^= dynamicbase_flag
return pe
def initialize_parser():
parser = argparse.ArgumentParser(
description='Patch provided PE to disable ASLR. '
'Write new PE with \'noaslr\' prefix.')
parser.add_argument('infile', metavar='FILE', nargs='*',
help='Full path to the file(s) to be processed.')
parser.add_argument('-o', '--overwrite', action='store_true',
default=False,
help='Patch existing file instead of creating a '
'new one.')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.infile) == 0:
p.print_help()
sys.exit(2)
for fname in args.infile:
basename = os.path.basename(fname)
log.info('Patching %s...' % basename)
if not os.path.isfile(fname):
log.warning('Failed to find file %s. Skipping...' % fname)
continue
with open(fname, 'rb') as f:
stream = f.read()
try:
pe = pefile.PE(data=stream)
except pefile.PEFormatError:
log.warning('%s not a pe, skipping...' % basename)
continue
if pe.OPTIONAL_HEADER.DllCharacteristics & dynamicbase_flag:
pe = patch_aslr(pe)
if args.overwrite:
outname = basename
else:
outname = basename + '.noaslr'
pe.write(outname)
print('Patched file written as %s...' % outname)
else:
print('%s was not found to have ASLR enabled...' % basename)
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/rotate.py
```python
import sys
import logging
from malchive.helpers import BinDataHelper
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class GenericRotate(BinDataHelper.LiteCrypt):
def __init__(self,
count: int = 0,
right: bool = False,
*args,
**kwargs):
self.count: int = 0
self.right: bool = False
# Only rotating single bytes
count = count % 8
if right:
log.debug('Modifying count to align with right rotation...')
count = 8 - count
self.count = count
self.right = right
super().__init__(*args, **kwargs)
def run_crypt(self):
"""
Perform bitwise rotation of supplied BYTE N times.
:return: processed data
:rtype: bytearray
"""
rotated = bytearray()
for i in range(self.offset, self.total_size):
byte = self.buff[i]
rotated.append(
(byte << self.count | byte >> (8 - self.count))
& 0xff)
return rotated
def initialize_parser():
description = 'Process data stream and rotate each byte. ' \
'Numeric values may be provided ' \
'as regular integers or hexadecimal with the \'0x\' prefix.'
parser = BinDataHelper.generic_args(description)
parser.add_argument('count', type=BinDataHelper.autoint,
help='Number of times to perform rotation. '
'Defaults to the left.')
parser.add_argument('-r', '--right', action='store_true',
default=False,
help='Override default rotation direction, and '
'instead rotate bits to the right.')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
buff = args.infile.buffer.read()
s = GenericRotate(
count=args.count,
right=args.right,
buff=buff,
offset=args.offset,
size=args.size,
)
return_data = s.run_crypt()
sys.stdout.buffer.write(return_data)
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/vtinspect.py
```python
import re
import sys
import json
import struct
import socket
import os.path
import logging
import argparse
import hashlib
import requests
import progressbar
__version__ = "1.4.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class VirusTotalAPI:
"""
Gets specific configuration IoCs from the malware.
:ivar dict params: Parameters to inform query to VT API.
:ivar str base: Base URL for queries to be made against.
"""
def __init__(self, apikey):
"""
Initialize VirusTotal interface.
:param str apikey: API key to use.
"""
self.params = {'apikey': apikey}
self.base = 'https://www.virustotal.com/vtapi/v2'
def search_file(self, resource, allinfo):
"""
Search for information based on a supplied hash.
:param str resource: Hash to search on.
:param bool allinfo: Show extra verbose information about the hash.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['allinfo'] = int(allinfo)
self.params['resource'] = resource
response = requests.get('%s/%s' % (self.base, 'file/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def get_submission_info(self, resource):
"""
Get submitter information for resource.
:param str resource: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = resource
response = requests.get('%s/%s' % (self.base, 'file/submissions'),
params=self.params, verify=True)
response_json = response.json()
if len(response_json) == 0:
# Doesn't return anything if a match fails so
# we manufacture one here
return {"verbose_msg": "The requested resource is not among "
"the finished, queued or pending scans"}
return json.dumps(response_json, indent=4, sort_keys=False)
def download_file(self, hash):
"""
Download file corresponding to supplied hash value.
:param str hash: Hash of file to download.
:return: JSON dump of response data from VirusTotal
:rtype: response
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/download'),
params=self.params, verify=True)
return response
def download_pcap(self, hash):
"""
Download PCAP information gathered corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: response
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/network-traffic'),
params=self.params, verify=True)
return response
def rescan(self, hash):
"""
Search for information based on a supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = hash
response = requests.post('%s/%s' % (self.base, 'file/rescan'),
params=self.params)
response_json = response.json()
return response_json
def behaviour(self, hash):
"""
Search for sandbox information corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['hash'] = hash
response = requests.get('%s/%s' % (self.base, 'file/behaviour'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def comments(self, hash):
"""
Search for user comments corresponding to the supplied hash.
:param str hash: Hash to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['resource'] = hash
response = requests.get('%s/%s' % (self.base, 'comments/get'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_url(self, resource, scan, allinfo):
"""
Search for reputation data corresponding to the supplied url.
:param str resource: URL to search on.
:param bool scan: Bool to force a scan for a URL not in the VirusTotal
database.
:param bool allinfo: Retrieve more verbose information about the
supplied URL.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['allinfo'] = int(allinfo)
self.params['resource'] = resource
self.params['scan'] = int(scan)
response = requests.post('%s/%s' % (self.base, 'url/report'),
params=self.params)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_domain(self, domain):
"""
Search for data corresponding to the submitted domain.
:param str domain: Domain to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['domain'] = domain
response = requests.get('%s/%s' % (self.base, 'domain/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def search_ip(self, ip):
"""
Search for data corresponding to the submitted IP address.
:param str ip: IP address to search on.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
self.params['ip'] = ip
response = requests.get('%s/%s' % (self.base, 'ip-address/report'),
params=self.params, verify=True)
response_json = response.json()
return json.dumps(response_json, indent=4, sort_keys=False)
def query_submissions(self, query, offset=None):
"""
Execute a search modifier compliant file search query against
VirusTotal.
:param str query: Search modifier to use.
:param str offset: Search offset given from VT API when a query has
more than 300 results.
:return: JSON dump of response data from VirusTotal
:rtype: dict
"""
if offset is not None:
self.params['offset'] = offset
self.params['query'] = query
response = requests.post('%s/%s' % (self.base, 'file/search'),
params=self.params, verify=True)
response_json = response.json()
return response_json
def process_hashes(opt, vt):
for h in opt.hashes:
# Verify hash is md5/sha1/sha256
if not (re.match(r'^[a-fA-F\d]{32}$', h) or
re.match(r'^[a-fA-F\d]{40}$', h) or
re.match(r'^[a-fA-F\d]{64}$', h)):
log.warning('Invalid hash supplied, skipping %s' % h)
continue
# Get detailed file information
if opt.info:
print(vt.search_file(h, opt.verbose))
# Get detailed info from submitters
if opt.submitter:
print(vt.get_submission_info(h))
# Download the specimen
if opt.download_file:
log.info('Attempting to retrieve file...')
result = vt.download_file(h)
if result.status_code == 404:
log.warning('404 returned, %s not found in database...' % h)
elif result.status_code == 200:
name = '%s.vt' % h.lower()
with open(name, 'wb') as f:
f.write(result.content)
print('Retrieved file from VT, saved locally as %s' % name)
check_payload_integrity(h, result.content)
else:
log.warning(
'Unable to retrieve file, response code was %s. '
'Try again in a few minutes...' % result.status_code)
# Download the pcap
if opt.download_pcap:
log.info('Attempting to retrieve pcap...')
result = vt.download_pcap(h)
if result.status_code == 404:
log.warning('404 returned, file not found in database...')
elif result.status_code == 200 and \
result.content.startswith(b'\xd4\xc3\xb2\xa1'):
name = '%s.pcap' % h.lower()
with open(name, 'wb') as f:
f.write(result.content)
print('Retrieved file from VT, saved locally as %s' % name)
else:
log.warning(
'Unable to retrieve PCAP for %s. PCAP data may not '
'exist for it. Response code: %s'
% (h, result.status_code))
# Rescan the supplied hash/file
if opt.rescan:
result = vt.rescan(h)
if result['response_code'] == 0:
log.error('There was an error rescanning. '
'The hash may not be in the database.')
elif result['response_code'] == 1:
print('Rescan request successful, please navigate to the '
'provided URL and await the results...\n%s' %
result['permalink'])
# Get behaviour of file
if opt.behaviour:
print(vt.behaviour(h))
# Get user submitted comments
if opt.comments:
print(vt.comments(h))
def check_payload_integrity(h, buff):
result_hash = ''
# Verify hash is md5/sha1/sha256
if re.match(r'^[a-fA-F\d]{32}$', h):
result_hash = hashlib.md5(buff).hexdigest()
if re.match(r'^[a-fA-F\d]{40}$', h):
result_hash = hashlib.sha1(buff).hexdigest()
if re.match(r'^[a-fA-F\d]{64}$', h):
result_hash = hashlib.sha256(buff).hexdigest()
if h.lower() != result_hash.lower():
log.error('The retrieved file does not match the provided '
'hash value! %s != %s' %
(h.lower(), result_hash.lower()))
def process_net_indicators(opt, vt):
if opt.url:
for url in opt.url:
# Verify url
if not re.match(r'^(http|https)://.*?\..*', url):
log.error(
'Invalid URL supplied, skipping %s.\nEnsure the '
'http:// or https:// prefix is at the beginning...'
% url)
continue
# Get detailed URL information
if opt.force_url:
# User coaching
while True:
ans = input(
'Forcing a URL scan will add a public record '
'to the VirusTotal database for the URL you '
'submitted if it\'s not found in the existing '
'dataset. \nAre you sure? (y/n) ')
if ans == 'y':
break
elif ans == 'n':
print('Aborting URL scan of %s...' % url)
continue
else:
print('Please provide either \'y\' or \'n\'')
print('Initiating a scan request on your behalf...')
print(vt.search_url(url, opt.force_url, opt.verbose))
if opt.domain_name:
for d in opt.domain_name:
# Verify domain
if len(d.split(".")) > 1 and d.startswith('.') is True:
log.warning(
'Invalid domain supplied, skipping %s.\nProvide a '
'valid domain, with a basename prefix \
(ex: google.com)...' % d)
continue
# Get detailed domain name information
print(vt.search_domain(d))
if opt.ip:
for ip_addr in opt.ip:
# Very simple verify IP, VT will do additional checks but
# this serves as an initial gate
try:
struct.unpack("!L", socket.inet_aton(ip_addr))[0]
except socket.error:
log.warning('Invalid IP address, %s, skipping...' % ip_addr)
continue
# Get detailed IP address information
print(vt.search_ip(ip_addr))
def process_bulk_query(opt, vt):
hash_list = []
target_dir = opt.target_directory
if target_dir != '.':
try:
log.info('Writing to directory: %s' % target_dir)
os.makedirs(target_dir)
except OSError:
if not os.path.isdir(target_dir):
log.error('Could not create %s' % target_dir)
return
if not opt.query:
log.error('A search modifier query must be provided.')
return
print('Compiling search results. This may take some time '
'depending on the search...')
# Execute search modifier compliant query
answer = vt.query_submissions(opt.query)
if 'hashes' not in list(answer.keys()):
print(json.dumps(answer, indent=4, sort_keys=False))
return
if len(answer['hashes']) >= opt.limit != 0:
hash_list.extend(answer['hashes'][:opt.limit])
log.warning('User defined threshold reached...')
else:
hash_list.extend(answer['hashes'])
while 'offset' in list(answer.keys()):
offset = answer['offset']
answer = vt.query_submissions(opt.query, offset)
new_size = len(hash_list) + len(answer['hashes'])
if new_size >= opt.limit != 0:
hash_list.extend(
answer['hashes'][:(opt.limit - len(hash_list))])
log.warning('User defined threshold reached...')
break
else:
hash_list.extend(answer['hashes'])
if opt.pull_files and len(hash_list) > 0:
while not opt.force_download:
ans = input(
'Query returned %s results. '
'Proceed with collection? (y/n) ' % len(hash_list))
if ans == 'y':
break
elif ans == 'n':
print('Aborting collection!')
return
else:
print('Please provide either \'y\' or \'n\'')
print('Attempting to pull files across %s query matches...'
% len(hash_list))
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=len(hash_list))
counter = 0
fail_count = 0
failed = []
for h in hash_list:
result = vt.download_file(h)
if result.status_code == 200:
name = '%s/%s.vt' % (target_dir, h)
with open(name, 'wb') as f:
f.write(result.content)
check_payload_integrity(h, result.content)
else:
fail_count += 1
failed.append(h)
bar.update(counter)
counter += 1
if fail_count > 0:
log.warning(
'Unable to retrieve files for %s candidates.\n'
'Failed list: \n%s'
% (fail_count, '\n'.join(failed)))
if opt.pull_pcaps and len(hash_list) > 0:
while not opt.force_download:
ans = input(
'Query returned %s results. '
'Proceed with collection? (y/n) ' % len(hash_list))
if ans == 'y':
break
elif ans == 'n':
print('Aborting collection!')
return
else:
print('Please provide either \'y\' or \'n\'')
print('Attempting to pull pcaps across %s query matches on files...'
% len(hash_list))
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=len(hash_list))
counter = 0
fail_count = 0
failed = []
for h in hash_list:
result = vt.download_pcap(h)
if result.status_code == 200 and \
result.content.startswith(b'\xd4\xc3\xb2\xa1'):
name = '%s/%s.pcap' % (target_dir, h)
with open(name, 'wb') as f:
f.write(result.content)
else:
fail_count += 1
failed.append(h)
bar.update(counter)
counter += 1
if fail_count > 0:
log.warning(
'Unable to retrieve PCAP for %s candidates. '
'PCAP data may not exist for them.\nFailed list: \n%s'
% (fail_count, '\n'.join(failed)))
if not opt.pull_files and not opt.pull_pcaps or len(hash_list) == 0:
print('Query returned %s unique results...' % len(hash_list))
def initialize_parser():
parser = argparse.ArgumentParser(
prog='vtinspect',
description='Query VirusTotal database based on supplied indicators '
'such as; md5, sha1, sha256, IP, domain, or URL. '
'At least one of either is required to use this script. '
'A \'.vt.key\' file must also be present in the user '
'home directory with the format '
'{ "key" : "API_KEY_HERE" }')
parser.add_argument('-v', '--verbose', action='store_true',
help='Get more verbose output if supported.')
subparsers = parser.add_subparsers(
dest='subparser_name',
help='Select from a variety of options on how to query the '
'VirusTotal database.')
file_parser = subparsers.add_parser(
'hash',
help='Issue VT queries based on multiple hashes from various files.')
file_parser.add_argument('hashes',
nargs='*',
help='List of hashes (md5/sha1/sha256) for '
'inspection.')
file_parser.add_argument('-i', '--info',
action='store_true',
help='Search VirusTotal to pull file '
'information.')
file_parser.add_argument('-s', '--submitter',
action='store_true',
help='Get information on the submission source.')
file_parser.add_argument('-b', '--behaviour',
action='store_true',
help='Search VirusTotal to pull behaviour '
'information from Cuckoo Sandbox instance '
'(only done on VT for PE files under 10MB).')
file_parser.add_argument('-c', '--comments',
action='store_true',
help='Retrieve comments from others associated '
'with the provided file.')
file_parser.add_argument('-df', '--download-file',
action='store_true',
help='Download file from VirusTotal and '
'save as \'[hash].vt\'.')
file_parser.add_argument('-dp', '--download-pcap',
action='store_true',
help='Download any relevant pcap traffic '
'and save as \'[hash].pcap\'.')
file_parser.add_argument('-r', '--rescan',
action='store_true',
help='Force VirusTotal to rescan the file with '
'it\'s current AV definitions.')
net_parser = subparsers.add_parser(
'net',
help='Issue VT queries various network indicators, such as; IPs, '
'domains, and URLs.')
net_parser.add_argument('-u', '--url',
nargs='*',
help='List of URLs for inspection.')
net_parser.add_argument('--force-url',
action='store_true',
help='Force a scan of the URL if it is not '
'in the database. Can only be used '
'with the --url command.')
net_parser.add_argument('-dn', '--domain-name',
nargs='*',
help='List of domain names for inspection.')
net_parser.add_argument('-ip', '--ip',
nargs='*',
help='List of IP addresses for inspection.')
bulk_parser = subparsers.add_parser(
'bulk',
help='Execute a search modifier compliant file search query against '
'VirusTotal. Returns the first 300 matching hits sorted '
'according to the last submission date in descending order. '
'Example: \'type:peexe size:90kb+ positives:5+ '
'behaviour:"taskkill"\'. '
'Reference: https://www.virustotal.com/intelligence/help/'
'file-search/#search-modifiers')
bulk_parser.add_argument(
'query',
type=str,
help='Search modifier compliant file search query. Enclosed in single '
'quotes.')
bulk_parser.add_argument('-pf',
'--pull-files',
action='store_true',
help='Pull all files returned from a '
'VirusTotal query.')
bulk_parser.add_argument('-pp',
'--pull-pcaps',
action='store_true',
help='Pull all pcaps returned from a '
'VirusTotal query.')
bulk_parser.add_argument('--force-download',
action='store_true',
help='Ignore user prompt indicating query '
'results and download all query file '
'or subordinate PCAP matches.')
bulk_parser.add_argument('-l', '--limit',
type=int,
default=0,
help='Enforce a maximum limit of returned '
'results. Useful for loose VT queries '
'that may return thousands of results '
'to parse. (default: unlimited)')
bulk_parser.add_argument('-t', '--target-directory',
type=str,
default='.',
help='Target directory to write files.')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
root.setLevel(logging.INFO)
vt_key = '%s/.vt.key' % os.path.expanduser('~')
api_key = ''
if os.path.exists(vt_key):
k = json.load(open(vt_key))
api_key = k['key']
else:
log.error('Could not find file %s for API key.' % vt_key)
log.error('Please create a JSON compliant file named \'.vt.key\' '
'in your home directory with \'key\' as the key name '
'and your VT API key as the value.')
sys.exit(2)
vt = VirusTotalAPI(api_key)
if args.subparser_name == 'hash':
process_hashes(args, vt)
if args.subparser_name == 'net':
process_net_indicators(args, vt)
if args.subparser_name == 'bulk':
process_bulk_query(args, vt)
if __name__ == '__main__':
main()
```
#### File: malchive/utilities/xor_pairwise.py
```python
import sys
import logging
from malchive.helpers import BinDataHelper
__version__ = "1.0.0"
__author__ = "<NAME>"
log = logging.getLogger(__name__)
class PairwiseXor(BinDataHelper.LiteCrypt):
def __init__(self,
key: int = 0,
incrementing: bool = False,
*args,
**kwargs):
self.key: int = 0
self.incrementing: bool = False
if 0x00 <= key <= 0xff:
self.key = key
else:
raise ValueError('Key must be between 0x00 and 0xff.')
self.incrementing = incrementing
super().__init__(*args, **kwargs)
def run_crypt(self):
"""
Run pairwise xor on supplied data.
:return: processed data
:rtype: bytearray
"""
data = bytearray(self.buff[self.offset:self.total_size])
if self.total_size % 2 != 0:
raise ValueError('Total size of processed bytes must '
'be even length for pairwise decode.')
if self.incrementing:
data[0] ^= self.key
for i in range(0, len(data) - 1, 1):
data[i + 1] ^= data[i]
else:
for i in range(len(data) - 1, 0, -1):
data[i] ^= data[i - 1]
data[0] ^= self.key
return data
def initialize_parser():
description = 'Instead of a standard single byte xor operation, ' \
'xor end byte with previous byte and continue in a ' \
'decrementing fashion until the final byte is ' \
'reached at the beginning.'
parser = BinDataHelper.generic_args(description)
parser.add_argument('-r', '--reverse',
action='store_true',
default=False,
help='Reverse the process, applying pairwise '
'at the beginning rather than the end.')
parser.add_argument('-k', '--pw-xor-key',
type=BinDataHelper.autoint,
default=0,
help='Key to use to start or end the XOR '
'(depending on if \'r\' is used). '
'Must be 0x00-0xff. Defaults to 0x00.')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
buff = args.infile.buffer.read()
s = PairwiseXor(
key=args.pw_xor_key,
incrementing=args.reverse,
buff=buff,
offset=args.offset,
size=args.size,
)
return_data = s.run_crypt()
sys.stdout.buffer.write(return_data)
if __name__ == '__main__':
main()
``` |
{
"source": "6un9-h0-Dan/mixbox",
"score": 3
} |
#### File: mixbox/mixbox/idgen.py
```python
import uuid
import contextlib
from .namespaces import Namespace
EXAMPLE_NAMESPACE = Namespace("http://example.com", "example", '')
# Don't register this namespace (yet, at least)
__all__ = ['InvalidMethodError', 'IDGenerator', 'set_id_namespace',
'set_id_method', 'create_id']
class InvalidMethodError(ValueError):
def __init__(self, method):
ValueError.__init__(self, "invalid method: %s" % method)
class IDGenerator(object):
"""Utility class for generating IDs for various entities"""
METHOD_UUID = 1
METHOD_INT = 2
METHODS = (METHOD_UUID, METHOD_INT,)
def __init__(self, namespace=EXAMPLE_NAMESPACE, method=METHOD_UUID):
self.namespace = namespace
self.method = method
self.reset()
def reset(self):
self.next_int = 1
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, value):
if not isinstance(value, Namespace):
raise ValueError("Must be a Namespace object")
self._namespace = value
self.reset()
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value not in IDGenerator.METHODS:
raise InvalidMethodError("invalid method: %s" % value)
self._method = value
self.reset()
def create_id(self, prefix="guid"):
"""Create an ID.
Note that if `prefix` is not provided, it will be `guid`, even if the
`method` is `METHOD_INT`.
"""
if self.method == IDGenerator.METHOD_UUID:
id_ = str(uuid.uuid4())
elif self.method == IDGenerator.METHOD_INT:
id_ = self.next_int
self.next_int += 1
else:
raise InvalidMethodError(self.method)
return "%s:%s-%s" % (self.namespace.prefix, prefix, id_)
#: Singleton instance within this module. It is lazily instantiated, so simply
#: importing the utils module will not create the object.
__generator = None
def _get_generator():
"""Return the default IDGenerator object.
Only under rare circumstances should this function be called by external
code. More likely, external code should initialize its own IDGenerator or
use the `set_id_namespace`, `set_id_method`, or `create_id` functions.
"""
global __generator
if not __generator:
__generator = IDGenerator()
return __generator
def set_id_namespace(namespace):
""" Set the namespace for the module-level ID Generator"""
_get_generator().namespace = namespace
def set_id_method(method):
""" Set the method for the module-level ID Generator"""
_get_generator().method = method
def get_id_namespace():
"""Return the namespace associated with generated ids"""
return _get_generator().namespace.name
def get_id_namespace_prefix():
"""Returns the namespace prefix assoicated with generated ids"""
return _get_generator().namespace.prefix
# For backwards compatibility with old name
get_id_namespace_alias = get_id_namespace_prefix
def create_id(prefix=None):
""" Create an ID using the module-level ID Generator"""
if not prefix:
return _get_generator().create_id()
else:
return _get_generator().create_id(prefix)
@contextlib.contextmanager
def temp_id_namespace(namespace):
try:
saved_id_namespace = {get_id_namespace(): get_id_namespace_alias()}
set_id_namespace(namespace)
yield
finally:
set_id_namespace(saved_id_namespace)
```
#### File: mixbox/mixbox/parser.py
```python
from abc import ABCMeta, abstractmethod
from distutils.version import StrictVersion
from .exceptions import ignored
from .xml import get_etree_root, get_etree, get_schemaloc_pairs
from .vendor.six import iteritems
class UnknownVersionError(Exception):
"""A parsed document contains no version information."""
pass
class UnsupportedVersionError(Exception):
"""A parsed document is a version unsupported by the parser."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedVersionError, self).__init__(message)
self.expected = expected
self.found = found
class UnsupportedRootElementError(Exception):
"""A parsed document contains an unsupported root element."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedRootElementError, self).__init__(message)
self.expected = expected
self.found = found
class EntityParser(object):
__metaclass__ = ABCMeta
@abstractmethod
def supported_tags(self):
"""Return an iterable of supported document root tags (strings)."""
@abstractmethod
def get_version(self, root):
"""Return as a string the schema version used by the document root."""
@abstractmethod
def supported_versions(self, tag):
"""Return all the supported versions for a given tag."""
@abstractmethod
def get_entity_class(self, tag):
"""Return the class to be returned as the result of parsing."""
def _get_version(self, root):
"""Return the version of the root element passed in.
Args:
root (etree.Element)
Returns:
distutils.StrictVersion
Raises:
UnknownVersionError
"""
# Note: STIX and MAEC use a "version" attribute. To support CybOX, a
# subclass will need to combine "cybox_major_version",
# "cybox_minor_version", and "cybox_update_version".
version = self.get_version(root)
if version:
return StrictVersion(version)
raise UnknownVersionError(
"Unable to determine the version of the input document. No "
"version information found on the root element."
)
def _check_version(self, root):
"""Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError
"""
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
raise UnsupportedVersionError(
message=error.format(version, supported),
expected=supported,
found=version
)
def _check_root_tag(self, root):
"""Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError
"""
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=supported,
found=root.tag,
)
def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a STIX binding object from the supplied xml file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
root = get_etree_root(xml_file, encoding=encoding)
if check_root:
self._check_root_tag(root)
if check_version:
self._check_version(root)
entity_class = self.get_entity_class(root.tag)
entity_obj = entity_class._binding_class.factory()
entity_obj.build(root)
return entity_obj
def parse_xml(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a python-stix STIXPackage object from the supplied xml_file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`. If
``None``, an attempt will be made to determine the input
character encoding.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
xml_etree = get_etree(xml_file, encoding=encoding)
entity_obj = self.parse_xml_to_obj(
xml_file=xml_etree,
check_version=check_version,
check_root=check_root
)
xml_root_node = xml_etree.getroot()
entity = self.get_entity_class(xml_root_node.tag).from_obj(entity_obj)
# Save the parsed nsmap and schemalocations onto the parsed Entity
entity.__input_namespaces__ = dict(iteritems(xml_root_node.nsmap))
with ignored(KeyError):
pairs = get_schemaloc_pairs(xml_root_node)
entity.__input_schemalocations__ = dict(pairs)
return entity
```
#### File: mixbox/mixbox/typedlist.py
```python
import sys
from .compat import MutableSequence
from .datautils import is_sequence, resolve_class, needkwargs
from .entities import EntityList
from .vendor import six
class TypedList(MutableSequence):
"""A type-aware mutable sequence that performs input validation when
inserting new items.
Args:
type: The type of the items contained in this collection.
ignore_none: If True, do not insert None values.
castfunc: A callable that will convert non-valid items into
valid items.
*args: A variable-length list of items to add to the collection.
If an arg is a non-string, non-EntityList iterable type, each of
its contained items will be added.
"""
@needkwargs("type")
def __init__(self, *args, **kwargs):
self._inner = []
self._type = resolve_class(kwargs["type"])
self._castfunc = kwargs.get("castfunc", self._type)
self._ignore_none = kwargs.get("ignore_none", True)
for item in args:
if isinstance(item, EntityList):
self.append(item)
elif is_sequence(item):
self.extend(item)
else:
self.append(item)
def _is_valid(self, value):
"""Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted.
"""
# Entities have an istypeof method that can perform more sophisticated
# type checking.
if hasattr(self._type, "istypeof"):
return self._type.istypeof(value)
else:
return isinstance(value, self._type)
def _fix_value(self, value):
"""Attempt to coerce value into the correct type.
Subclasses can override this function.
"""
try:
return self._castfunc(value)
except:
error = "Can't put '{0}' ({1}) into a {2}. Expected a {3} object."
error = error.format(
value, # Input value
type(value), # Type of input value
type(self), # Type of collection
self._type # Expected type of input value
)
six.reraise(TypeError, TypeError(error), sys.exc_info()[-1])
def _is_type_castable(self):
return getattr(self._type, "_try_cast", False)
def __nonzero__(self):
return bool(self._inner)
def __getitem__(self, key):
return self._inner.__getitem__(key)
def __setitem__(self, key, value):
"""Attempt to set the value at position `key` to the `value`.
If a value is not the correct type, an attempt will be made to
convert it to the correct type.
Args:
key: An index.
value: A value to set.
"""
if not self._is_valid(value):
value = self._fix_value(value)
self._inner.__setitem__(key, value)
def __delitem__(self, key):
self._inner.__delitem__(key)
def __len__(self):
return self._inner.__len__()
def insert(self, idx, value):
if value is None and self._ignore_none:
return
elif self._is_valid(value):
self._inner.insert(idx, value)
elif self._is_type_castable():
value = self._fix_value(value)
self._inner.insert(idx, value)
else:
err = "Cannot insert type (%s) into %s" % (type(value), type(self))
raise TypeError(err)
def __repr__(self):
return self._inner.__repr__()
def __str__(self):
return self._inner.__str__()
```
#### File: mixbox/mixbox/xml.py
```python
from lxml import etree
# XML NAMESPACES
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
# XML TAGS
TAG_XSI_TYPE = "{%s}type" % NS_XSI
TAG_SCHEMALOCATION = "{%s}schemaLocation" % NS_XSI
# Acceptable values for XML booleans.
FALSE = (False, 'false', 0, '0')
TRUE = (True, 'true', 1, '1')
# CDATA constants
CDATA_START = "<![CDATA["
CDATA_END = "]]>"
def is_element(obj):
"""Returns ``True`` if `obj` is an lxml ``Element``."""
return isinstance(obj, etree._Element) # noqa
def is_etree(obj):
"""Returns ``True`` if `obj` is an lxml ``ElementTree``."""
return isinstance(obj, etree._ElementTree) # noqa
def get_xml_parser(encoding=None):
"""Returns an ``etree.ETCompatXMLParser`` instance."""
parser = etree.ETCompatXMLParser(
huge_tree=True,
remove_comments=True,
strip_cdata=False,
remove_blank_text=True,
resolve_entities=False,
encoding=encoding
)
return parser
def get_etree(doc, encoding=None):
if is_etree(doc):
return doc
elif is_element(doc):
return etree.ElementTree(doc)
else:
parser = get_xml_parser(encoding=encoding)
return etree.parse(doc, parser=parser)
def get_etree_root(doc, encoding=None):
"""Returns an instance of lxml.etree._Element for the given `doc` input.
Args:
doc: The input XML document. Can be an instance of
``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like
object, or a string filename.
encoding: The character encoding of `doc`. If ``None``, an attempt
will be made to determine the character encoding by the XML
parser.
Returns:
An ``lxml.etree._Element`` instance for `doc`.
Raises:
IOError: If `doc` cannot be found.
lxml.ParseError: If `doc` is a malformed XML document.
"""
tree = get_etree(doc, encoding)
root = tree.getroot()
return root
def get_schemaloc_pairs(node):
"""Parses the xsi:schemaLocation attribute on `node`.
Returns:
A list of (ns, schemaLocation) tuples for the node.
Raises:
KeyError: If `node` does not have an xsi:schemaLocation attribute.
"""
schemalocs = node.attrib[TAG_SCHEMALOCATION]
l = schemalocs.split()
return zip(l[::2], l[1::2])
def is_cdata(text):
"""Returns ``True`` if `text` contains a CDATA block.
Example:
>>> is_cdata("<![CDATA[Foo]]>")
True
>>> is_cdata("NOPE")
False
"""
if not text:
return False
return CDATA_START in text
def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text
def cdata(text):
"""Wraps the input `text` in a ``<![CDATA[ ]]>`` block.
If the text contains CDATA sections already, they are stripped and replaced
by the application of an outer-most CDATA block.
Args:
text: A string to wrap in a CDATA block.
Returns:
The `text` value wrapped in ``<![CDATA[]]>``
"""
if not text:
return text
if is_cdata(text):
text = strip_cdata(text)
escaped = "{0}{1}{2}".format(CDATA_START, text, CDATA_END)
return escaped
```
#### File: mixbox/test/dates_tests.py
```python
import unittest
import datetime
from mixbox import dates
class DatesTests(unittest.TestCase):
def test_parse_date(self):
dstr = "2015-04-01"
parsed = dates.parse_date(dstr)
self.assertEqual(dstr, parsed.isoformat())
def test_serialize_datetime_as_date(self):
now = dates.now()
self.assertTrue(isinstance(now, datetime.datetime))
nowstr = dates.serialize_date(now)
self.assertEquals(nowstr, now.date().isoformat())
def test_parse_datetime(self):
dtstr = '2015-04-02T16:44:30.423149+00:00'
parsed = dates.parse_datetime(dtstr)
self.assertEqual(dtstr, parsed.isoformat())
def test_parse_datetime_none(self):
parsed = dates.parse_datetime(None)
self.assertEqual(parsed, None)
def test_parse_date_none(self):
parsed = dates.parse_date(None)
self.assertEqual(parsed, None)
def test_now(self):
now = dates.now()
self.assertTrue(isinstance(now, datetime.datetime))
def test_serialize_date(self):
now = dates.now().date()
nowstr = now.isoformat()
self.assertEqual(nowstr, dates.serialize_date(now))
def test_serialize_datetime(self):
now = dates.now()
nowstr = now.isoformat()
self.assertEqual(nowstr, dates.serialize_datetime(now))
```
#### File: mixbox/test/signals_tests.py
```python
import unittest
from mixbox import signals
FOO_EMIT_VAL = 0xf00
FOO_SIGNAL = "foo.called"
class MockSender(object):
def send(self, value=None):
if not value:
value = FOO_EMIT_VAL
signals.emit(FOO_SIGNAL, value)
class MockReceiver(object):
static_value = None
class_value = None
def __init__(self):
self.value = None
signals.connect(FOO_SIGNAL, self.receive)
def receive(self, value):
self.value = value
def unbound(self):
pass
@staticmethod
def static_receive(value):
MockReceiver.static_value = value
@classmethod
def class_receive(cls, value):
MockReceiver.class_value = value
class SignalsTests(unittest.TestCase):
def test_func_receiver(self):
"""Test that signals are emitted and caught correctly."""
class NonLocal:
pass
@signals.receiver(FOO_SIGNAL)
def foo_handler(value):
NonLocal.to_check = value
m = MockSender()
m.send() # Should emit the signal and caught by foo_handler()
self.assertEqual(NonLocal.to_check,FOO_EMIT_VAL)
def test_bound_receiver(self):
"""Tests that mixbox signals correctly invoke bound method handlers."""
receiver = MockReceiver()
sender = MockSender()
# Make sure that the receiver is initialized to None
self.assertEqual(receiver.value, None)
# Emit the signal
sender.send()
# Check that the receiver was updated correctly
self.assertEqual(receiver.value, FOO_EMIT_VAL)
def test_static_receiver(self):
"""Tests that a static method can be registerd as a receiver."""
self.assertEqual(MockReceiver.static_value, None)
signals.connect(FOO_SIGNAL, MockReceiver.static_receive)
sender = MockSender()
sender.send()
self.assertEqual(MockReceiver.static_value, FOO_EMIT_VAL)
def test_disconnect(self):
"""Tests that receiver disconnection returned True on success and False
on failure (to map the receiver to the signal) and that the receiver
is not called after disconnection.
"""
sender = MockSender()
receiver = MockReceiver()
# Test that disconnecting a valid receiver returns True
disconnected = signals.disconnect(FOO_SIGNAL, receiver.receive)
self.assertTrue(disconnected)
# Test that disconnecting an invalid receiver returns False
disconnected = signals.disconnect(FOO_SIGNAL, receiver.static_receive)
self.assertEqual(disconnected, False)
# Test that the previously connected receiver is disconnected.
expected = "THIS SHOULD NOT CHANGE"
receiver.value = expected
sender.send("IT CHANGED")
self.assertEqual(expected, receiver.value)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "6un9-h0-Dan/osv",
"score": 2
} |
#### File: docker/worker/worker.py
```python
import argparse
import datetime
import json
import logging
import os
import math
import re
import resource
import shutil
import subprocess
import sys
import threading
import time
import traceback
import tempfile
import yaml
from google.cloud import ndb
from google.cloud import pubsub_v1
import pygit2
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import osv
DEFAULT_WORK_DIR = '/work'
OSS_FUZZ_GIT_URL = 'https://github.com/google/oss-fuzz.git'
TASK_SUBSCRIPTION = 'tasks'
MAX_LEASE_DURATION = 6 * 60 * 60 # 4 hours.
OSS_FUZZ_ISSUE_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id='
OSS_FUZZ_SOURCE_PREFIX = 'oss-fuzz:'
# Large projects which take way too long to build.
# TODO(ochang): Don't hardcode this.
PROJECT_DENYLIST = {
'ffmpeg',
'imagemagick',
'libreoffice',
}
REPO_DENYLIST = {
'https://github.com/google/AFL.git',
}
_state = threading.local()
_ndb_client = ndb.Client()
class LogFilter(logging.Filter):
"""Log filter."""
def filter(self, record):
"""Add metadata to record."""
source_id = getattr(_state, 'source_id', None)
if source_id:
record.extras = {
'source_id': source_id,
}
return True
class GkeLogHandler(logging.StreamHandler):
"""GKE log handler."""
def format_stackdriver_json(self, record, message):
"""Helper to format a LogRecord in in Stackdriver fluentd format."""
subsecond, second = math.modf(record.created)
payload = {
'message': message,
'timestamp': {
'seconds': int(second),
'nanos': int(subsecond * 1e9)
},
'thread': record.thread,
'severity': record.levelname,
}
extras = getattr(record, 'extras', None)
if extras:
payload.update(extras)
return json.dumps(payload)
def format(self, record):
"""Format the message into JSON expected by fluentd."""
message = super().format(record)
return self.format_stackdriver_json(record, message)
class _PubSubLeaserThread(threading.Thread):
"""Thread that continuously renews the lease for a message."""
EXTENSION_TIME_SECONDS = 10 * 60 # 10 minutes.
def __init__(self, subscriber_client, subscription, ack_id, done_event,
max_lease_seconds):
super().__init__()
self.daemon = True
self._subscriber = subscriber_client
self._subscription = subscription
self._ack_id = ack_id
self._done_event = done_event
self._max_lease_seconds = max_lease_seconds
def run(self):
"""Run the leaser thread."""
latest_end_time = time.time() + self._max_lease_seconds
while True:
try:
time_left = latest_end_time - time.time()
if time_left <= 0:
logging.warning(
'Lease reached maximum lease time of %d seconds, '
'stopping renewal.', self._max_lease_seconds)
break
extension_seconds = min(self.EXTENSION_TIME_SECONDS, time_left)
logging.info('Renewing lease for task by %d seconds.',
extension_seconds)
self._subscriber.modify_ack_deadline(
subscription=self._subscription,
ack_ids=[self._ack_id],
ack_deadline_seconds=extension_seconds)
# Schedule renewals earlier than the extension to avoid race conditions
# and performing the next extension too late.
wait_seconds = min(time_left, self.EXTENSION_TIME_SECONDS // 2)
# Wait until the next scheduled renewal, or if the task is complete.
if self._done_event.wait(wait_seconds):
logging.info('Task complete, stopping renewal.')
break
except Exception as e:
logging.error('Leaser thread failed: %s', str(e))
def ensure_updated_checkout(git_url, checkout_dir):
"""Ensure a Git repo is checked out to the latest master revision."""
if os.path.exists(checkout_dir):
repo = pygit2.Repository(checkout_dir)
else:
os.makedirs(checkout_dir)
repo = pygit2.clone_repository(git_url, checkout_dir)
for remote in repo.remotes:
remote.fetch()
repo.reset(repo.head.peel().oid, pygit2.GIT_RESET_HARD)
repo.checkout('refs/remotes/origin/master')
logging.info('OSS-Fuzz repo now at: %s', repo.head.peel().message)
def clean_artifacts(oss_fuzz_dir):
"""Clean build artifact from previous runs."""
build_dir = os.path.join(oss_fuzz_dir, 'build')
if os.path.exists(build_dir):
shutil.rmtree(build_dir, ignore_errors=True)
def format_commit_range(old_commit, new_commit):
"""Format a commit range."""
if old_commit == new_commit:
return old_commit
return (old_commit or osv.UNKNOWN_COMMIT) + ':' + new_commit
def do_bisect(bisect_type, source_id, project_name, engine, sanitizer,
architecture, fuzz_target, old_commit, new_commit, testcase):
"""Do the actual bisect."""
import bisector
import build_specified_commit
with tempfile.NamedTemporaryFile() as f:
f.write(testcase)
f.flush()
build_data = build_specified_commit.BuildData(
project_name=project_name,
engine=engine,
sanitizer=sanitizer,
architecture=architecture)
try:
result = bisector.bisect(bisect_type, old_commit, new_commit, f.name,
fuzz_target, build_data)
except bisector.BisectError as e:
logging.error('Bisect failed with exception:\n%s', traceback.format_exc())
return bisector.Result(e.repo_url, None)
except Exception:
logging.error('Bisect failed with unexpected exception:\n%s',
traceback.format_exc())
return None
if result.commit == old_commit:
logging.error('Bisect failed for testcase %s, bisected to old_commit',
source_id)
result = None
return result
def get_oss_fuzz_summary(crash_type, crash_state):
"""Generate a summary from OSS-Fuzz crash type and crash state."""
crash_type = crash_type.splitlines()[0]
state_lines = crash_state.splitlines()
if crash_type in ('ASSERT', 'CHECK failure', 'Security CHECK failure',
'Security DCHECK failure'):
return crash_type + ': ' + state_lines[0]
if crash_type == 'Bad-cast':
return state_lines[0]
if not crash_state or crash_state == 'NULL':
return crash_type
return crash_type + ' in ' + state_lines[0]
def get_oss_fuzz_details(issue_id, crash_type, crash_state):
"""Generate details from OSS-Fuzz crash type and crash state."""
details = ''
if issue_id:
oss_fuzz_link = OSS_FUZZ_ISSUE_URL + issue_id
details = f'OSS-Fuzz report: {oss_fuzz_link}\n\n'
crash_type = crash_type.replace('\n', ' ')
return details + (f'Crash type: {crash_type}\n'
f'Crash state:\n{crash_state}')
def get_ecosystem(oss_fuzz_dir, project_name):
"""Get ecosystem."""
project_yaml_path = os.path.join(oss_fuzz_dir, 'projects', project_name,
'project.yaml')
with open(project_yaml_path) as f:
project_yaml = yaml.safe_load(f)
language = project_yaml.get('language', '')
ecosystems = {
'python': 'pypi',
'rust': 'cargo',
'go': 'golang',
}
# C/C++ projects from OSS-Fuzz don't belong to any package ecosystem.
return ecosystems.get(language, '')
def _set_result_attributes(oss_fuzz_dir, message, entity):
"""Set necessary fields from bisection message."""
project_name = message.attributes['project_name']
issue_id = message.attributes['issue_id'] or None
crash_type = message.attributes['crash_type']
crash_state = message.attributes['crash_state']
severity = message.attributes['severity'].upper()
timestamp = message.attributes['timestamp']
if timestamp:
timestamp = datetime.datetime.fromisoformat(timestamp)
entity.project = project_name
entity.ecosystem = get_ecosystem(oss_fuzz_dir, project_name)
entity.issue_id = issue_id
if issue_id:
entity.reference_urls.append(OSS_FUZZ_ISSUE_URL + issue_id)
entity.summary = get_oss_fuzz_summary(crash_type, crash_state)
entity.details = get_oss_fuzz_details(issue_id, crash_type, crash_state)
if severity:
entity.severity = severity
if timestamp:
entity.timestamp = timestamp
def find_oss_fuzz_fix_via_commit(repo, start_commit, end_commit, source_id,
issue_id):
"""Find fix commit by checking commit messages."""
if not source_id.startswith(OSS_FUZZ_SOURCE_PREFIX):
return None
# Walk through start_commit..end_commit
try:
walker = repo.walk(end_commit, pygit2.GIT_SORT_TOPOLOGICAL)
except KeyError:
logging.error('Failed to walk repo with invalid commit: %s', end_commit)
return None
walker.hide(start_commit)
testcase_id = source_id.split(':')[1]
oss_fuzz_pattern = re.compile(r'oss-?fuzz', re.IGNORECASE)
has_oss_fuzz_in_message = []
has_testcase_id_in_message = []
has_issue_id_in_message = []
# Look for commits with (in order of decreasing priority):
# - "oss-?fuzz" and the issue ID in the message.
# - ClusterFuzz testcase ID in the message.
# - "oss-?fuzz" in the message.
for commit in walker:
commit_message = commit.message.lower()
has_oss_fuzz = False
if oss_fuzz_pattern.search(commit_message):
has_oss_fuzz = True
has_oss_fuzz_in_message.append(commit)
if testcase_id in commit_message:
has_testcase_id_in_message.append(commit)
if issue_id and issue_id in commit_message and has_oss_fuzz:
has_issue_id_in_message.append(commit)
if has_issue_id_in_message:
return str(has_issue_id_in_message[0].id)
if has_testcase_id_in_message:
return str(has_testcase_id_in_message[0].id)
if len(has_oss_fuzz_in_message) == 1:
# Only pick the commit if there is a single one that mentions oss-fuzz.
return str(has_oss_fuzz_in_message[0].id)
return None
def process_bisect_task(oss_fuzz_dir, bisect_type, source_id, message):
"""Process a bisect task."""
bisect_type = message.attributes['type']
project_name = message.attributes['project_name']
engine = 'libfuzzer'
architecture = message.attributes['architecture'] or 'x86_64'
sanitizer = message.attributes['sanitizer']
fuzz_target = message.attributes['fuzz_target']
old_commit = message.attributes['old_commit']
new_commit = message.attributes['new_commit']
testcase = message.data
logging.info(
'Performing %s bisect on source_id=%s, project=%s, engine=%s, '
'architecture=%s, sanitizer=%s, fuzz_target=%s, old_commit=%s, '
'new_commit=%s', bisect_type, source_id, project_name, engine,
architecture, sanitizer, fuzz_target, old_commit, new_commit)
result = None
if project_name in PROJECT_DENYLIST:
logging.info('Skipping bisect for denylisted project %s', project_name)
elif not old_commit:
logging.info('Skipping bisect since there is no old_commit.')
else:
result = do_bisect(bisect_type, source_id, project_name, engine, sanitizer,
architecture, fuzz_target, old_commit, new_commit,
testcase)
if result.repo_url in REPO_DENYLIST:
logging.info('Skipping because of denylisted repo %s.', result.repo_url)
return
if bisect_type == 'fixed':
entity = osv.FixResult(id=source_id)
else:
assert bisect_type == 'regressed'
entity = osv.RegressResult(id=source_id)
_set_result_attributes(oss_fuzz_dir, message, entity)
if result and result.commit:
logging.info('Bisected to %s', result.commit)
entity.commit = result.commit
entity.repo_url = result.repo_url
else:
logging.info(
'Bisect not successfully performed. Setting commit range from request.')
entity.commit = format_commit_range(old_commit, new_commit)
entity.repo_url = result.repo_url if result else None
entity.error = 'Bisect error'
entity.put()
def update_affected_commits(bug_id, result, project, ecosystem, public):
"""Update affected commits."""
to_put = []
to_delete = []
for commit in result.commits:
affected_commit = osv.AffectedCommit(
id=bug_id + '-' + commit,
bug_id=bug_id,
commit=commit,
confidence=result.confidence,
project=project,
ecosystem=ecosystem,
public=public)
to_put.append(affected_commit)
# Delete any affected commits that no longer apply. This can happen in cases
# where a FixResult comes in later and we had previously marked a commit prior
# to the fix commit as being affected by a vulnerability.
for existing in osv.AffectedCommit.query(osv.AffectedCommit.bug_id == bug_id):
if existing.commit not in result.commits:
to_delete.append(existing.key)
ndb.put_multi(to_put)
ndb.delete_multi(to_delete)
def process_impact_task(source_id, message):
"""Process an impact task."""
logging.info('Processing impact task for %s', source_id)
regress_result = ndb.Key(osv.RegressResult, source_id).get()
if not regress_result:
logging.error('Missing RegressResult for %s', source_id)
return
fix_result = ndb.Key(osv.FixResult, source_id).get()
if not fix_result:
logging.warning('Missing FixResult for %s', source_id)
fix_result = osv.FixResult()
# Check if there is an existing Bug for the same source, but with a different
# allocated ID. This shouldn't happen.
allocated_bug_id = message.attributes['allocated_id']
existing_bug = osv.Bug.query(osv.Bug.source_id == source_id).get()
if existing_bug and existing_bug.key.id() != allocated_bug_id:
logging.error('Bug entry already exists for %s with a different ID %s',
source_id, existing_bug.key.id())
return
if existing_bug and existing_bug.status == osv.BugStatus.INVALID:
logging.warning('Bug %s already marked as invalid.', existing_bug.key.id())
return
if existing_bug:
public = existing_bug.public
else:
raise osv.ImpactError('Task requested without Bug allocated.')
# TODO(ochang): Handle changing repo types? e.g. SVN -> Git.
repo_url = regress_result.repo_url or fix_result.repo_url
if not repo_url:
raise osv.ImpactError('No repo_url set')
issue_id = fix_result.issue_id or regress_result.issue_id
fix_commit = fix_result.commit
with tempfile.TemporaryDirectory() as tmp_dir:
repo = osv.clone_with_retries(repo_url, tmp_dir)
# If not a precise fix commit, try to find the exact one by going through
# commit messages (oss-fuzz only).
if source_id.startswith(OSS_FUZZ_SOURCE_PREFIX) and ':' in fix_commit:
start_commit, end_commit = fix_commit.split(':')
commit = find_oss_fuzz_fix_via_commit(repo, start_commit, end_commit,
source_id, issue_id)
if commit:
logging.info('Found exact fix commit %s via commit message (oss-fuzz)',
commit)
fix_commit = commit
# Actually compute the affected commits/tags.
result = osv.get_affected(repo, regress_result.commit, fix_commit)
logging.info('Found affected %s', ', '.join(result.tags))
# If the range resolved to a single commit, simplify it.
if len(result.fix_commits) == 1:
fix_commit = result.fix_commits[0]
elif not result.fix_commits:
# Not fixed.
fix_commit = ''
if len(result.regress_commits) == 1:
regress_commit = result.regress_commits[0]
else:
regress_commit = regress_result.commit
project = fix_result.project or regress_result.project
ecosystem = fix_result.ecosystem or regress_result.ecosystem
summary = fix_result.summary or regress_result.summary
details = fix_result.details or regress_result.details
severity = fix_result.severity or regress_result.severity
reference_urls = fix_result.reference_urls or regress_result.reference_urls
update_affected_commits(allocated_bug_id, result, project, ecosystem, public)
existing_bug.repo_url = repo_url
existing_bug.fixed = fix_commit
existing_bug.regressed = regress_commit
existing_bug.affected = result.tags
existing_bug.affected_fuzzy = osv.normalize_tags(result.tags)
existing_bug.confidence = result.confidence
existing_bug.issue_id = issue_id
existing_bug.project = project
existing_bug.ecosystem = ecosystem
existing_bug.summary = summary
existing_bug.details = details
existing_bug.status = osv.BugStatus.PROCESSED
existing_bug.severity = severity
existing_bug.reference_urls = reference_urls
existing_bug.additional_commit_ranges = []
# Don't display additional ranges for imprecise commits, as they can be
# confusing.
if ':' in existing_bug.fixed or ':' in existing_bug.regressed:
existing_bug.put()
return
def _sort_key(value):
# Allow sorting of None values.
return (value[0] or '', value[1] or '')
for introduced_in, fixed_in in sorted(result.affected_ranges, key=_sort_key):
if (introduced_in == existing_bug.regressed and
(fixed_in or '') == existing_bug.fixed):
# Don't include the main range.
continue
existing_bug.additional_commit_ranges.append(
osv.CommitRange(introduced_in=introduced_in, fixed_in=fixed_in))
existing_bug.put()
def find_bugs_for_tag(project_name, tag, public):
"""Find bugs for a given project and tag."""
query = osv.Bug.query(osv.Bug.project == project_name,
osv.Bug.affected == tag, osv.Bug.public == public)
return [bug.key.id() for bug in query]
def process_package_info_task(message):
"""Process project info."""
package_name = message.attributes['package_name']
ecosystem = message.attributes['ecosystem']
repo_url = message.attributes['repo_url']
tags_info = osv.get_tags(repo_url)
if tags_info.latest_tag:
info = osv.PackageInfo(id=f'{ecosystem}/{package_name}')
info.latest_tag = tags_info.latest_tag
info.put()
infos = []
for tag in tags_info.tags:
tag_info = osv.PackageTagInfo(id=f'{ecosystem}/{package_name}-{tag}')
tag_info.package = package_name
tag_info.ecosystem = ecosystem
tag_info.tag = tag
tag_info.bugs = find_bugs_for_tag(package_name, tag, public=True)
tag_info.bugs_private = find_bugs_for_tag(package_name, tag, public=False)
infos.append(tag_info)
ndb.put_multi(infos)
def mark_bug_invalid(message):
"""Mark a bug as invalid."""
source_id = get_source_id(message)
bug = osv.Bug.query(osv.Bug.source_id == source_id).get()
if not bug:
logging.error('Bug with source id %s does not exist.', source_id)
return
bug.status = osv.BugStatus.INVALID
bug.put()
affected_commits = osv.AffectedCommit.query(
osv.AffectedCommit.bug_id == bug.key.id())
ndb.delete_multi([commit.key for commit in affected_commits])
def get_source_id(message):
"""Get message ID."""
source_id = message.attributes['source_id']
if source_id:
return source_id
testcase_id = message.attributes['testcase_id']
if testcase_id:
return OSS_FUZZ_SOURCE_PREFIX + testcase_id
return None
def do_process_task(oss_fuzz_dir, subscriber, subscription, ack_id, message,
done_event):
"""Process task with timeout."""
try:
with _ndb_client.context():
source_id = get_source_id(message)
_state.source_id = source_id
task_type = message.attributes['type']
if task_type in ('regressed', 'fixed'):
process_bisect_task(oss_fuzz_dir, task_type, source_id, message)
elif task_type == 'impact':
try:
process_impact_task(source_id, message)
except osv.ImpactError:
logging.error('Failed to process impact: %s', traceback.format_exc())
elif task_type == 'package_info':
process_package_info_task(message)
elif task_type == 'invalid':
mark_bug_invalid(message)
_state.source_id = None
subscriber.acknowledge(subscription=subscription, ack_ids=[ack_id])
except Exception:
logging.error('Unexpected exception while processing task: %s',
traceback.format_exc())
subscriber.modify_ack_deadline(
subscription=subscription, ack_ids=[ack_id], ack_deadline_seconds=0)
finally:
logging.info('Ending task')
done_event.set()
def handle_timeout(subscriber, subscription, ack_id, oss_fuzz_dir, message):
"""Handle a timeout."""
subscriber.acknowledge(subscription=subscription, ack_ids=[ack_id])
bisect_type = message.attributes['type']
source_id = get_source_id(message)
logging.error('Task %s timed out (source_id=%s)', bisect_type, source_id)
if bisect_type not in ('fixed', 'regressed'):
return
old_commit = message.attributes['old_commit']
new_commit = message.attributes['new_commit']
if bisect_type == 'fixed':
entity = osv.FixResult(id=source_id)
else:
assert bisect_type == 'regressed'
entity = osv.RegressResult(id=source_id)
_set_result_attributes(oss_fuzz_dir, message, entity)
entity.commit = format_commit_range(old_commit, new_commit)
entity.error = 'Timeout'
entity.put()
def task_loop(oss_fuzz_dir):
"""Task loop."""
subscriber = pubsub_v1.SubscriberClient()
cloud_project = os.environ['GOOGLE_CLOUD_PROJECT']
subscription = subscriber.subscription_path(cloud_project, TASK_SUBSCRIPTION)
def process_task(ack_id, message):
"""Process a task."""
ensure_updated_checkout(OSS_FUZZ_GIT_URL, oss_fuzz_dir)
clean_artifacts(oss_fuzz_dir)
# Enforce timeout by doing the work in another thread.
done_event = threading.Event()
thread = threading.Thread(
target=do_process_task,
args=(oss_fuzz_dir, subscriber, subscription, ack_id, message,
done_event),
daemon=True)
thread.start()
done = done_event.wait(timeout=MAX_LEASE_DURATION)
logging.info('Returned from task thread')
if not done:
handle_timeout(subscriber, subscription, ack_id, oss_fuzz_dir, message)
logging.error('Timed out processing task')
while True:
response = subscriber.pull(subscription=subscription, max_messages=1)
if not response.received_messages:
continue
message = response.received_messages[0].message
ack_id = response.received_messages[0].ack_id
leaser_done = threading.Event()
leaser = _PubSubLeaserThread(subscriber, subscription, ack_id, leaser_done,
MAX_LEASE_DURATION)
leaser.start()
try:
process_task(ack_id, message)
finally:
leaser_done.set()
leaser.join()
def main():
logging.getLogger().addFilter(LogFilter())
logging.getLogger().addHandler(GkeLogHandler())
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('google.api_core.bidi').setLevel(logging.ERROR)
logging.getLogger('google.cloud.pubsub_v1.subscriber._protocol.'
'streaming_pull_manager').setLevel(logging.ERROR)
parser = argparse.ArgumentParser(description='Worker')
parser.add_argument(
'--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)
args = parser.parse_args()
# Work around kernel bug: https://gvisor.dev/issue/1765
resource.setrlimit(resource.RLIMIT_MEMLOCK,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
subprocess.call(('service', 'docker', 'start'))
oss_fuzz_dir = os.path.join(args.work_dir, 'oss-fuzz')
tmp_dir = os.path.join(args.work_dir, 'tmp')
os.makedirs(tmp_dir, exist_ok=True)
os.environ['TMPDIR'] = tmp_dir
# Add oss-fuzz/infra to the import path so we can import from it.
sys.path.append(os.path.join(oss_fuzz_dir, 'infra'))
ensure_updated_checkout(OSS_FUZZ_GIT_URL, oss_fuzz_dir)
task_loop(oss_fuzz_dir)
if __name__ == '__main__':
with _ndb_client.context():
main()
```
#### File: gcp/appengine/handlers.py
```python
import datetime
import json
import logging
from flask import abort
from flask import Blueprint
from flask import request
from google.cloud.datastore_admin_v1.gapic import datastore_admin_client \
as ds_admin
from google.cloud import ndb
from google.cloud import pubsub_v1
from google.cloud import secretmanager
import requests
import osv
import monorail
_PROJECT = 'oss-vdb'
_PROJECT_ID = '651737493649'
_BUG_REDO_DAYS = 14
_CRON_ROUTE = '/cron'
_BACKUP_BUCKET = 'osv-backup'
_TASKS_TOPIC = 'projects/{project}/topics/{topic}'.format(
project=_PROJECT, topic='tasks')
blueprint = Blueprint('handlers', __name__)
def _get_counter(year=None):
"""Get next Bug ID."""
if year is None:
year = datetime.datetime.utcnow().year
key = ndb.Key(osv.IDCounter, year)
counter = key.get()
if counter:
return counter
return osv.IDCounter(id=year, next_id=1)
def make_affected_commits_public(bug):
"""Make related AffectedCommit entities public."""
to_update = []
query = osv.AffectedCommit.query(osv.AffectedCommit.bug_id == bug.key.id())
for affected_commit in query:
affected_commit.public = True
to_update.append(affected_commit)
if to_update:
ndb.put_multi(to_update)
def get_monorail_service_account():
"""Get monorail service account credentials."""
client = secretmanager.SecretManagerServiceClient()
response = client.access_secret_version(
f'projects/{_PROJECT_ID}/secrets/monorail-service-account/versions/latest'
)
return json.loads(response.payload.data.decode())
@blueprint.route(_CRON_ROUTE + '/make-bugs-public')
def make_bugs_public():
"""Mark bugs public."""
if not request.headers.get('X-Appengine-Cron'):
abort(403)
monorail_account = get_monorail_service_account()
monorail_client = monorail.Client('oss-fuzz', monorail_account)
query = osv.Bug.query(
osv.Bug.public == False, # pylint: disable=singleton-comparison
osv.Bug.status == osv.BugStatus.PROCESSED)
to_mark_public = []
for bug in query:
issue_id = bug.issue_id
if not issue_id:
logging.info('Missing issue_id for %s.', bug.key.id())
continue
try:
issue = monorail_client.get_issue(issue_id)
except requests.exceptions.HTTPError:
logging.error('Failed to get issue %s.', issue_id)
continue
labels = [label['label'].lower() for label in issue['labels']]
if 'restrict-view-commit' not in labels:
bug.public = True
logging.info('Marking %s as public.', bug.key.id())
to_mark_public.append(bug)
make_affected_commits_public(bug)
if to_mark_public:
ndb.put_multi(to_mark_public)
return 'done'
@blueprint.route(_CRON_ROUTE + '/process-results')
def process_results():
"""Generate impact requests."""
if not request.headers.get('X-Appengine-Cron'):
abort(403)
publisher = pubsub_v1.PublisherClient()
counters = {}
for regress_result in osv.RegressResult.query():
key_id = regress_result.key.id()
if not regress_result.commit:
logging.info('Missing commit info for %s.', key_id)
continue
fixed_result = ndb.Key(osv.FixResult, key_id).get()
if not fixed_result or not fixed_result.commit:
logging.info('Fixed result does not exist for %s.', key_id)
bug = osv.Bug.query(osv.Bug.source_id == key_id).get()
if bug:
logging.info('Bug already exists for %s.', key_id)
continue
if regress_result.issue_id:
bug = osv.Bug.query(osv.Bug.issue_id == regress_result.issue_id).get()
if bug:
logging.info('Bug already exists for issue %s.',
regress_result.issue_id)
continue
# Get ID counter for the year.
if regress_result.timestamp:
id_year = regress_result.timestamp.year
else:
id_year = None
counter = counters.get(id_year)
if not counter:
counter = _get_counter(id_year)
counters[id_year] = counter
try:
cur_id = '{}-{}'.format(counter.key.id(), counter.next_id)
logging.info('Allocating OSV-%s.', cur_id)
counter.next_id += 1
# Create the Bug now to avoid races when this cron is run again before the
# impact task finishes.
bug = osv.Bug(
id=cur_id,
timestamp=datetime.datetime.utcnow(),
public=False,
source_id=key_id,
status=osv.BugStatus.UNPROCESSED)
bug.put()
logging.info('Requesting impact for %s.', key_id)
publisher.publish(
_TASKS_TOPIC,
data=b'',
type='impact',
source_id=key_id,
allocated_id=cur_id)
finally:
counter.put()
# Re-compute bugs that aren't fixed.
for bug in osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,
osv.Bug.fixed == ''):
publisher.publish(
_TASKS_TOPIC,
data=b'',
type='impact',
source_id=bug.source_id,
allocated_id=bug.key.id())
# Re-compute existing Bugs for a period of time, as upstream changes may
# affect results.
cutoff_time = (
datetime.datetime.utcnow() - datetime.timedelta(days=_BUG_REDO_DAYS))
query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,
osv.Bug.timestamp >= cutoff_time)
for bug in query:
logging.info('Re-requesting impact for %s.', bug.key.id())
if not bug.fixed:
# Previous query already requested impact tasks for unfixed bugs.
continue
publisher.publish(
_TASKS_TOPIC,
data=b'',
type='impact',
source_id=bug.source_id,
allocated_id=bug.key.id())
return 'done'
@blueprint.route(_CRON_ROUTE + '/generate-package-info-tasks')
def generate_package_info_tasks():
"""Generate package_info tasks."""
if not request.headers.get('X-Appengine-Cron'):
abort(403)
publisher = pubsub_v1.PublisherClient()
query = osv.Bug.query(
projection=(osv.Bug.project, osv.Bug.ecosystem, osv.Bug.repo_url),
distinct_on=(osv.Bug.project, osv.Bug.ecosystem))
for result in query:
if not result.project or not result.repo_url:
continue
if result.ecosystem is None:
# Invalid/incomplete bug.
continue
publisher.publish(
_TASKS_TOPIC,
data=b'',
type='package_info',
package_name=result.project,
ecosystem=result.ecosystem,
repo_url=result.repo_url)
return 'done'
@blueprint.route(_CRON_ROUTE + '/backup')
def backup():
"""Create a Datastore backup."""
if not request.headers.get('X-Appengine-Cron'):
abort(403)
client = ds_admin.DatastoreAdminClient()
client.export_entities(
project_id=_PROJECT, output_url_prefix=f'gs://{_BACKUP_BUCKET}')
return 'done'
@blueprint.route('/_ah/warmup')
def warmup():
"""Warmup handler."""
return 'OK'
```
#### File: lib/osv/impact.py
```python
import collections
import datetime
import logging
import tempfile
import time
import pygit2
CLONE_TRIES = 3
COMMIT_RANGE_LIMIT = 4
CONFIDENCE_FULL = 100
# Flat reduction in confidence for any range.
CONFIDENCE_RANGE_REDUCTION = 20
# Reduction in confidence per commit in a range.
CONFIDENCE_RANGE_REDUCTION_STEP = 10
RETRY_SLEEP_SECONDS = 5
TAG_PREFIX = 'refs/tags/'
# Used in cases where an earlier commit in a regression range cannot be
# determined.
UNKNOWN_COMMIT = 'unknown'
AffectedResult = collections.namedtuple(
'AffectedResult',
'tags commits affected_ranges regress_commits fix_commits confidence')
TagsInfo = collections.namedtuple('TagsInfo', 'tags latest_tag')
class ImpactError(Exception):
"""Impact error."""
def clone_with_retries(git_url, checkout_dir, callbacks=None):
"""Clone with retries."""
logging.info('Cloning %s to %s', git_url, checkout_dir)
for _ in range(CLONE_TRIES):
try:
repo = pygit2.clone_repository(git_url, checkout_dir, callbacks=callbacks)
repo.cache = {}
return repo
except pygit2.GitError as e:
logging.error('Clone failed: %s', str(e))
time.sleep(RETRY_SLEEP_SECONDS)
continue
class RangeCollector:
"""Affected range collector."""
def __init__(self):
self.grouped_ranges = {}
def add(self, introduced_in, fixed_in):
"""Add a new commit range."""
if introduced_in in self.grouped_ranges:
if fixed_in is None:
# New range doesn't add anything new.
return
self.grouped_ranges[introduced_in].append((introduced_in, fixed_in))
# Remove any existing ranges with the same introduced in commit but with a
# None fixed commit.
self.grouped_ranges[introduced_in] = [
commit_range for commit_range in self.grouped_ranges[introduced_in]
if commit_range[1] is not None
]
else:
self.grouped_ranges[introduced_in] = [(introduced_in, fixed_in)]
def ranges(self):
"""Return a set representing the collected commit ranges."""
ranges = set()
for value in self.grouped_ranges.values():
ranges.update(value)
return ranges
def get_affected(repo, regress_commit_or_range, fix_commit_or_range):
""""Get list of affected tags and commits for a bug given regressed and fixed
commits."""
confidence = CONFIDENCE_FULL
# If multiple, assume any commit in the regression range cause the
# regression.
regress_commits = get_commit_range(repo, regress_commit_or_range)
if len(regress_commits) > COMMIT_RANGE_LIMIT:
raise ImpactError('Too many commits in regression range.')
# If multiple, assume all commits are necessary for fixing the regression.
fix_commits = get_commit_range(repo, fix_commit_or_range)
if len(fix_commits) > COMMIT_RANGE_LIMIT:
logging.warning('Too many commits in fix range.')
# Rather than bail out here and potentially leaving a Bug as "unfixed"
# indefinitely, we do the best we can here, by assuming the last
# COMMIT_RANGE_LIMIT commits fix the bug.
fix_commits = fix_commits[-COMMIT_RANGE_LIMIT:]
confidence -= CONFIDENCE_RANGE_REDUCTION
# For every extra commit in the range, reduce the confidence.
if len(regress_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(regress_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
# Special case: unknown status for earlier revisions.
unknown_earlier_revisions = UNKNOWN_COMMIT in regress_commit_or_range
if unknown_earlier_revisions:
confidence -= CONFIDENCE_RANGE_REDUCTION
if len(fix_commits) > 1:
confidence -= CONFIDENCE_RANGE_REDUCTION
confidence -= (len(fix_commits) - 1) * CONFIDENCE_RANGE_REDUCTION_STEP
if confidence < 0:
confidence = 0
tags_with_bug = set()
for commit in regress_commits:
tags_with_bug.update(get_tags_with_commits(repo, [commit]))
tags_with_fix = get_tags_with_commits(repo, fix_commits)
affected_tags = list(tags_with_bug - tags_with_fix)
affected_tags.sort()
affected_commits, affected_ranges = get_affected_range(
repo, regress_commits, fix_commits)
if unknown_earlier_revisions:
# Include the unknown marker in resulting entities.
regress_commits.insert(0, UNKNOWN_COMMIT)
return AffectedResult(affected_tags, affected_commits, affected_ranges,
regress_commits, fix_commits, confidence)
def get_affected_range(repo, regress_commits, fix_commits):
"""Get affected range."""
range_collector = RangeCollector()
commits = set()
seen_commits = set()
# Check all branches for cherry picked regress/fix commits.
for branch in repo.branches.remote:
ref = 'refs/remotes/' + branch
# Get the earliest equivalent commit in the regression range.
equivalent_regress_commit = None
for regress_commit in regress_commits:
logging.info('Finding equivalent regress commit to %s in %s',
regress_commit, ref)
equivalent_regress_commit = get_equivalent_commit(repo, ref,
regress_commit)
if equivalent_regress_commit:
break
if not equivalent_regress_commit:
continue
# Get the latest equivalent commit in the fix range.
equivalent_fix_commit = None
for fix_commit in fix_commits:
logging.info('Finding equivalent fix commit to %s in %s', fix_commit, ref)
equivalent_commit = get_equivalent_commit(repo, ref, fix_commit)
if equivalent_commit:
equivalent_fix_commit = equivalent_commit
range_collector.add(equivalent_regress_commit, equivalent_fix_commit)
last_affected_commits = []
if equivalent_fix_commit:
# Last affected commit is the one before the fix.
last_affected_commits.extend(
parent.id
for parent in repo.revparse_single(equivalent_fix_commit).parents)
else:
# Not fixed in this branch. Everything is still vulnerabile.
last_affected_commits.append(repo.revparse_single(ref).id)
commits.add(equivalent_regress_commit)
for last_affected_commit in last_affected_commits:
if (equivalent_regress_commit, last_affected_commit) in seen_commits:
continue
seen_commits.add((equivalent_regress_commit, last_affected_commit))
commits.update(
get_commit_list(repo, equivalent_regress_commit,
last_affected_commit))
return commits, range_collector.ranges()
def get_commit_range(repo, commit_or_range):
"""Get a commit range."""
if not commit_or_range:
return []
if ':' not in commit_or_range:
return [commit_or_range]
start_commit, end_commit = commit_or_range.split(':')
if start_commit == UNKNOWN_COMMIT:
# Special case: No information about earlier builds. Assume the end_commit
# is the regressing commit as that's the best we can do.
return [end_commit]
return get_commit_list(repo, start_commit, end_commit)
def get_tags_with_commits(repo, commits):
"""Get tags with a given commit."""
if not commits:
return set()
affected = set()
logging.info('Getting tags which contain %s', ','.join(commits))
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
for tag in tags:
if all(get_equivalent_commit(repo, tag, commit) for commit in commits):
affected.add(tag[len(TAG_PREFIX):])
return affected
def get_commit_list(repo, start_commit, end_commit):
"""Get commit list."""
logging.info('Getting commits %s..%s', start_commit, end_commit)
try:
walker = repo.walk(end_commit,
pygit2.GIT_SORT_TOPOLOGICAL | pygit2.GIT_SORT_REVERSE)
except KeyError as e:
raise ImpactError('Invalid commit.') from e
walker.hide(start_commit)
return [str(commit.id) for commit in walker]
def find_latest_tag(repo, tags):
"""Find the latest tag (by commit time)."""
latest_commit_time = None
latest_tag = None
for tag in tags:
commit = repo.lookup_reference(tag).peel()
commit_time = (
datetime.datetime.fromtimestamp(commit.commit_time) -
datetime.timedelta(minutes=commit.commit_time_offset))
if not latest_commit_time or commit_time > latest_commit_time:
latest_commit_time = commit_time
latest_tag = tag[len(TAG_PREFIX):]
return latest_tag
def get_equivalent_commit(repo, to_search, target_commit):
"""Find an equivalent commit at to_search, or None. The equivalent commit can
be equal to target_commit."""
if not target_commit:
return None
target = repo.revparse_single(target_commit)
target_patch_id = repo.diff(target.parents[0], target).patchid
search = repo.revparse_single(to_search)
try:
commits = repo.walk(search.id)
except ValueError:
# Invalid commit
return None
for commit in commits:
# Ignore commits without parents and merge commits with multiple parents.
if not commit.parents or len(commit.parents) > 1:
continue
patch_id = repo.cache.get(commit.id)
if not patch_id:
diff = repo.diff(commit.parents[0], commit)
patch_id = diff.patchid
repo.cache[commit.id] = patch_id
if patch_id == target_patch_id:
return str(commit.id)
# TODO(ochang): Possibly look at commit message, author etc.
return None
def get_tags(repo_url):
"""Get tags information."""
with tempfile.TemporaryDirectory() as tmp_dir:
repo = clone_with_retries(repo_url, tmp_dir)
tags = [
ref for ref in repo.listall_references() if ref.startswith(TAG_PREFIX)
]
latest_tag = find_latest_tag(repo, tags)
return TagsInfo([tag[len(TAG_PREFIX):] for tag in tags], latest_tag)
``` |
{
"source": "6un9-h0-Dan/PoshC2",
"score": 2
} |
#### File: poshc2/server/PowerStatus.py
```python
from poshc2.server.database.DB import get_powerstatusbyrandomuri, insert_blankpowerstatus, update_screenlocked, update_monitoron
from poshc2.server.database.DB import update_powerstatus, update_acstatus, update_apmstatus
def create_if_no_status_for_uri(RandomURI):
result = get_powerstatusbyrandomuri(RandomURI)
if result is None:
insert_blankpowerstatus(RandomURI)
def translate_power_status(status, RandomURI):
if "Power Status Monitoring:" in status:
print(status)
elif ":" in status:
create_if_no_status_for_uri(RandomURI)
splt = status.split(":")
if splt[0] == "WM_QUERYENDSESSION":
print("[!] SHUTDOWN may be imminent. Query End Session has been called:")
elif splt[0] == "WM_WTSSESSION_CHANGE":
if splt[1] == "CONSOLE_CONNECT":
print("[+] Console session has been connected to")
elif splt[1] == "CONSOLE_DISCONNECT":
print("[-]Console session has been disconnected from ")
elif splt[1] == "REMOTE_CONNECT":
print("[+] Remote connection has been made to the machine (RDP)")
elif splt[1] == "REMOTE_DISCONNECT":
print("[-] Remote connection has been dropped (RDP)")
elif splt[1] == "SESSION_LOGON":
print("[+] A user has logged on")
elif splt[1] == "SESSION_LOGOFF":
print("[!] A user has logged off")
elif splt[1] == "SESSION_LOCK":
print("[!] Session has been locked")
update_screenlocked(RandomURI, 1)
elif splt[1] == "SESSION_UNLOCK":
print("[+] Session has been unlocked")
update_screenlocked(RandomURI, 0)
elif splt[1] == "SESSION_REMOTE_CONTROL":
print("[-] Session remote control status has changed")
elif splt[0] == "WM_POWERBROADCAST":
if splt[1] == "GUID_MONITOR_POWER_ON":
if splt[2] == "On":
update_monitoron(RandomURI, 1)
print("[+] Monitor(screen) has been switched ON")
else:
update_monitoron(RandomURI, 0)
print("[!] Monitor(screen) has been switched OFF")
elif splt[1] == "GUID_BATTERY_PERCENTAGE_REMAINING":
result = get_powerstatusbyrandomuri(RandomURI)
if (splt[2].isdigit()):
battperc = int(splt[2])
if (battperc <= 100 and battperc >= 0):
if (battperc > 50):
print("[+] Battery has %s%% charge" % battperc)
elif battperc > 15:
print("[!] WARNING: Battery has only %s%% charge" % battperc)
elif battperc < 15:
print("[!] CRITICAL BATTERY: %s%% charge left" % battperc)
update_powerstatus(RandomURI, result[3], result[4], result[5], ("%s%%" % battperc))
else:
print("[-] Battery status: UNKNOWN")
update_powerstatus(RandomURI, result[3], result[4], result[5], "UNKNOWN")
elif splt[1] == "GUID_ACDC_POWER_SOURCE":
if splt[2] == "Unplugged":
update_acstatus(RandomURI, 0)
print("[!] DISCHARGING the battery now. AC has been unplugged.")
else:
if splt[2] == "UPS":
print("[!] UPS powered now. Machine may turn off at any time")
update_acstatus(RandomURI, 0)
elif splt[1] == "PBT_APMBATTERYLOW":
print("[!] Low battery reported")
result = get_powerstatusbyrandomuri(RandomURI)
update_powerstatus(RandomURI, result[3], result[4], "LOW", result[6])
elif splt[1] == "PBT_APMQUERYSUSPEND":
print("[!] SUSPEND may be imminent. QuerySuspend has been called:")
update_apmstatus(RandomURI, "QUERYSUSPEND")
elif splt[1] == "PBT_APMSUSPEND":
print("[!] SUSPEND/SLEEP, machine has been hibernated")
update_apmstatus(RandomURI, "SUSPEND")
elif splt[1] == "PBT_APMRESUMESUSPEND":
print("[+] Resume from suspend.")
update_apmstatus(RandomURI, "RESUME")
elif splt[1] == "PBT_APMPOWERSTATUSCHANGE":
lns = status.splitlines(False)
result = get_powerstatusbyrandomuri(RandomURI)
acpower = result[3]
chrging = result[4]
stus = result[5]
percent = result[6]
for i in lns:
if i.startswith("GUID_ACDC_POWER_SOURCE:"):
if(i[23:] == "Plugged"):
print("[+] AC is plugged in")
acpower = 1
elif (i[23:] == "Unplugged"):
print("[!] AC has been unplugged")
acpower = 0
elif (i[23:] == "UPS"):
print("[!] Computer is on a UPS")
acpower = 0
elif i.startswith("CHRG:"):
chrging = (i[5:] == "CHARGING")
print("[+] Battery is charging: %s" % chrging)
elif i.startswith("PERCENT:"):
prcnt = i[8:]
if prcnt != "UNKNOWN" and prcnt.isdigit():
percent = ("%s%%" % prcnt)
print("[+] Battery Percent: %s" % percent)
else:
percent = "UNKNOWN"
print("[-] Battery Percent: UNKNOWN")
elif i.startswith("BATTERY:"):
stus = i[8:]
if stus is None or status == "":
stus = "UNKNOWN"
print("[-] Battery Status: UNKNOWN")
else:
print("[+] Battery Status: %s" % stus)
update_powerstatus(RandomURI, acpower, chrging, stus, percent)
def getpowerstatus(randomuri):
pwrStatus = get_powerstatusbyrandomuri(randomuri)
if (pwrStatus is not None):
if (pwrStatus[9] is not None and pwrStatus[9] != ""):
print("[+] Power status @ %s" % pwrStatus[9])
else:
print("[+] Power status")
if (pwrStatus[2] is not None and pwrStatus[2] != ""):
print("apmstatus: %s" % pwrStatus[2])
if (pwrStatus[3]):
if (not pwrStatus[4]):
print("BATTERY: Not Charging")
else:
print("BATTERY: Charging")
else:
print("BATTERY: Discharging %s%%" % pwrStatus["BatteryPercentLeft"])
if (pwrStatus[5] is not None and pwrStatus[5] != ""):
print("BATTERY FLAG: %s" % pwrStatus[5])
if (pwrStatus[7] > 0):
print("SCREEN: LOCKED")
else:
print("SCREEN: UNLOCKED")
if (pwrStatus[8]):
print("MONITOR: ON")
else:
print("MONITOR: OFF")
else:
print("[X] No power status has been recorded for this implant")
```
#### File: PoshC2/poshc2/Utils.py
```python
import os, base64, string, random, re, argparse, shlex, datetime
validate_sleep_regex = re.compile("^[0-9]+[smh]$")
def gen_key():
key = os.urandom(256 // 8)
return base64.b64encode(key)
def formStrMacro(varstr, instr):
holder = []
str1 = ''
str2 = ''
str1 = varstr + ' = "' + instr[:54] + '"'
for i in range(54, len(instr), 48):
holder.append(varstr + ' = ' + varstr + ' + "' + instr[i:i + 48])
str2 = '"\r\n'.join(holder)
str2 = str2 + "\""
str1 = str1 + "\r\n" + str2
return str1
def formStr(varstr, instr):
holder = []
str1 = ''
str2 = ''
str1 = varstr + ' = "' + instr[:56] + '"'
for i in range(56, len(instr), 48):
holder.append('"' + instr[i:i + 48])
str2 = '"\r\n'.join(holder)
str2 = str2 + "\""
str1 = str1 + "\r\n" + str2
return "%s;" % str1
# Can pass a list of words to use and it will randomly concatenate those until
# the length is above the size value. If whole_words is set to True it will
# return the full sentence, if False it will strip the sentence to length 'size'
def randomuri(size=15, chars=string.ascii_letters + string.digits, words=None, whole_words=False):
if words is not None:
result = ""
while len(result) < size:
result = result + random.choice(words)
if whole_words:
return result
return result[:size]
else:
return random.choice(string.ascii_letters) + "".join(random.choice(chars) for _ in range(size - 1))
def validate_sleep_time(sleeptime):
if sleeptime is None:
return None
sleeptime = sleeptime.strip()
return validate_sleep_regex.match(sleeptime)
def validate_killdate(killdate):
return validate_timestamp_string(killdate, '%Y-%m-%d')
def argp(cmd):
args = ""
try:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-Help', '-help', '-h', action='store', dest='help', required=False)
parser.add_argument('-Source', '-source', action='store', dest='source', required=True)
parser.add_argument('-Destination', '-destination', action='store', dest='destination', required=True)
parser.add_argument('-NotHidden', '-nothidden', action='store', dest='nothidden', required=False)
args, unknown = parser.parse_known_args(shlex.split(cmd))
except Exception:
pass
return args
def load_file(location):
fr = None
try:
file = open((location), "rb")
fr = file.read()
except Exception as e:
print("Error loading file %s" % e)
if fr:
return fr
else:
return None
def parse_creds(allcreds):
creds = ""
hashes = ""
if allcreds is None:
return (creds, hashes)
for cred in allcreds:
if cred is not None:
if cred[3] is not None and cred[3] != "":
creds += str(cred[0]) + ": " + str(cred[1]) + "\\" + str(cred[2]) + " : " + str(cred[3]) + "\n"
if cred[4] is not None and cred[4] != "":
hashes += str(cred[0]) + ": " + str(cred[1]) + "\\" + str(cred[2]) + " : " + str(cred[4]) + "\n"
return (creds, hashes)
def string_to_array(stringarg):
y = ""
x = []
p = stringarg.replace(" ", "")
x = p.split(",")
c = 0
for i in x:
if c > 0:
y += f",\"{i}\""
else:
y += f"\"{i}\""
c += 1
return(y, c)
def get_first_dfheader(DomainFrontHeader):
DomainFrontHeader = DomainFrontHeader.replace('"', '')
if DomainFrontHeader:
if "," in DomainFrontHeader:
return DomainFrontHeader.split(',')[0]
return DomainFrontHeader
return None
def get_first_url(PayloadCommsHost, DomainFrontHeader):
PayloadCommsHost = PayloadCommsHost.replace('"', '')
DomainFrontHeader = DomainFrontHeader.replace('"', '')
if DomainFrontHeader:
if "," in DomainFrontHeader:
domain = DomainFrontHeader.split(',')[0]
else:
domain = DomainFrontHeader
if PayloadCommsHost.startswith("http://"):
return f"http://{domain}"
return f"https://{domain}"
else:
if "," in PayloadCommsHost:
return PayloadCommsHost.split(',')[0]
return PayloadCommsHost
def offsetFinder(filepath):
with open(filepath, "rb") as input_file:
file = input_file.read()
file = base64.b64decode(file)
try:
offset = hex(file.index(b'\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41'))
return(int(offset, 0))
except ValueError:
offset = hex(file.index(b'\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00'))
return(int(offset, 0))
def yes_no_prompt(message):
ri = input(f"{message} (Y/n) ")
if ri.lower() == "n":
return False
if ri == "" or ri.lower() == "y":
return True
def no_yes_prompt(message):
ri = input(f"{message} (N/y) ")
if ri == "" or ri.lower() == "n":
return False
if ri.lower() == "y":
return True
def validate_timestamp_string(timestamp_string, format_string):
if not timestamp_string:
return False
timestamp_string = timestamp_string.strip()
try:
datetime.datetime.strptime(timestamp_string, format_string)
return True
except ValueError:
return False
``` |
{
"source": "6un9-h0-Dan/ProcDOT-Plugins",
"score": 3
} |
#### File: Files List/Windows/File_list.py
```python
import os
def main():
data = os.getenv('PROCDOTPLUGIN_GraphFileDot')
out = os.getenv('PROCDOTPLUGIN_ResultCSV')
outfile = open(out, 'w')
outfile.write ('"File Path","Exists"\n')
outfile.write ('"*","*"\n')
with open(data) as f:
for line in f:
if line.startswith(' "FILE:'):
if 'fontcolor = magenta' in line:
line = line.strip().split('"')
f1 = (','.join(line[1:2]))
f2 = (','.join(line[3:4]))
outfile.write('{{color:purple}}' + '"' + f1[5:] + ' -> ' + f2[5:] + '","rename"\n')
elif 'fontcolor = red' in line:
line = line.strip().split('"')
line = (','.join(line[1:2]))
outfile.write('{{color:red}}' + '"' + line[5:] + '","No"\n')
else:
line = line.strip().split('"')
line = (','.join(line[1:2]))
outfile.write('"' + line[5:] + '","Yes"\n')
if __name__ == '__main__':
main()
```
#### File: GeoIP/Linux/GeoIP.py
```python
import os
import pygeoip
import sys
import urllib
import gzip
reload(sys)
sys.setdefaultencoding('utf8')
out = os.getenv('PROCDOTPLUGIN_ResultTXT')
def database(url,dbgz,db):
geo = urllib.URLopener()
try:
geo.retrieve(url, dbgz)
with gzip.open(dbgz, 'rb') as infile:
with open(db, 'wb') as outfile:
for line in infile:
outfile.write(line)
os.remove(dbgz)
except Exception as e:
open(out, 'wb').write(str(e))
sys.exit(0)
if os.path.isfile('GeoLiteCity.dat'):
pass
else:
database('http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz', 'GeoLiteCity.dat.gz', 'GeoLiteCity.dat')
if os.path.isfile('GeoIPASNum.dat'):
pass
else:
database('http://download.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz', 'GeoIPASNum.dat.gz', 'GeoIPASNum.dat')
tfolder = os.listdir(os.getenv('PROCDOTPLUGIN_TempFolder'))
details = open(os.getenv('PROCDOTPLUGIN_GraphFileDetails'),'rb').readlines()
n = open(os.getenv('PROCDOTPLUGIN_GraphFileDetails'),'w')
for num, line in enumerate(details,1):
if 'IP-Address' in line:
query = pygeoip.GeoIP('GeoLiteCity.dat')
asn = pygeoip.GeoIP('GeoIPASNum.dat')
x = [x.strip() for x in line.split(' ')][2]
try:
results = query.record_by_addr(x)
asn_info = asn.asn_by_addr(x)
for key, val in results.items():
details.insert(num,str(key) + ' = ' + str(val) + '\n')
try:
details.insert(num,'asn = '+asn_info+'\n')
except:
pass
except:
pass
details = "".join(details)
n.write(details)
n.close()
sys.exit(0)
``` |
{
"source": "6un9-h0-Dan/python-idb",
"score": 2
} |
#### File: python-idb/scripts/extract_version.py
```python
import sys
import logging
import hexdump
import argparse
import idb
import idb.netnode
logger = logging.getLogger(__name__)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Extract the IDA Pro version used to create the IDB.")
parser.add_argument("idbpath", type=str,
help="Path to input idb file")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable debug logging")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output but errors")
args = parser.parse_args(args=argv)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
with idb.from_file(args.idbpath) as db:
root = idb.analysis.Root(db)
print(root.version)
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: python-idb/scripts/run_ida_script.py
```python
import sys
import os.path
import logging
import argparse
import idb
import idb.shim
logger = logging.getLogger(__name__)
def main(argv=None):
# TODO: do version check for 3.x
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Dump an IDB B-tree to a textual representation.")
parser.add_argument("script_path", type=str,
help="Path to script file")
parser.add_argument("idbpath", type=str,
help="Path to input idb file")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable debug logging")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output but errors")
parser.add_argument("--ScreenEA", type=str,
help="Prepare value of ScreenEA()")
args = parser.parse_args(args=argv)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger('idb.netnode').setLevel(logging.ERROR)
logging.getLogger('idb.fileformat').setLevel(logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('idb.netnode').setLevel(logging.ERROR)
logging.getLogger('idb.fileformat').setLevel(logging.ERROR)
with idb.from_file(args.idbpath) as db:
if args.ScreenEA:
if args.ScreenEA.startswith('0x'):
screenea = int(args.ScreenEA, 0x10)
else:
screenea = int(args.ScreenEA)
else:
screenea = list(sorted(idb.analysis.Segments(db).segments.keys()))[0]
hooks = idb.shim.install(db, ScreenEA=screenea)
# update sys.path to point to directory containing script.
# so scripts can import .py files in the same directory.
script_dir = os.path.dirname(args.script_path)
sys.path.insert(0, script_dir)
with open(args.script_path, 'rb') as f:
g = {
'__name__': '__main__',
}
g.update(hooks)
exec(f.read(), g)
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: python-idb/tests/test_analysis.py
```python
import pytest
import idb.analysis
from fixtures import *
def pluck(prop, s):
'''
generate the values from the given attribute with name `prop` from the given sequence of items `s`.
Args:
prop (str): the name of an attribute.
s (sequnce): a bunch of objects.
Yields:
any: the values of the requested field across the sequence
'''
for x in s:
yield getattr(x, prop)
def lpluck(prop, s):
'''
like `pluck`, but returns the result in a single list.
'''
return list(pluck(prop, s))
@kern32_test()
def test_root(kernel32_idb, version, bitness, expected):
root = idb.analysis.Root(kernel32_idb)
assert root.version in (695, 700)
assert root.get_field_tag('version') == 'A'
assert root.get_field_index('version') == -1
assert root.version_string in ('6.95', '7.00')
assert root.open_count == 1
assert root.md5 == '00bf1bf1b779ce1af41371426821e0c2'
@kern32_test([
(695, 32, '2017-06-20T22:31:34'),
(695, 64, '2017-07-10T01:36:23'),
(700, 32, '2017-07-10T18:28:22'),
(700, 64, '2017-07-10T21:37:15'),
])
def test_root_timestamp(kernel32_idb, version, bitness, expected):
root = idb.analysis.Root(kernel32_idb)
assert root.created.isoformat() == expected
@kern32_test([
(695, 32, 1),
(695, 64, 1),
(700, 32, 1),
(700, 64, 1),
])
def test_root_open_count(kernel32_idb, version, bitness, expected):
root = idb.analysis.Root(kernel32_idb)
assert root.open_count == expected
@kern32_test([
(695, 32, 'pe.ldw'),
(695, 64, 'pe64.l64'),
(700, 32, 'pe.dll'),
(700, 64, 'pe64.dll'),
])
def test_loader(kernel32_idb, version, bitness, expected):
loader = idb.analysis.Loader(kernel32_idb)
assert loader.format.startswith('Portable executable') is True
assert loader.plugin == expected
@kern32_test([
(695, 32, 0x75),
(695, 64, 0x75),
(700, 32, 0x7A), # not supported.
(700, 64, 0x7A), # not supported.
])
def test_fileregions(kernel32_idb, version, bitness, expected):
fileregions = idb.analysis.FileRegions(kernel32_idb)
regions = fileregions.regions
assert len(regions) == 3
assert list(regions.keys()) == [0x68901000, 0x689db000, 0x689dd000]
assert regions[0x68901000].start == 0x68901000
assert regions[0x68901000].end == 0x689db000
assert regions[0x68901000].rva == 0x1000
@kern32_test([
(695, 32, 0x12a8),
(695, 64, 0x12a8),
(700, 32, 0x1290),
(700, 64, 0x1290),
])
def test_functions(kernel32_idb, version, bitness, expected):
functions = idb.analysis.Functions(kernel32_idb)
funcs = functions.functions
for addr, func in funcs.items():
assert addr == func.startEA
assert len(funcs) == expected
@kern32_test([
(695, 32, 0x75),
(695, 64, 0x75),
(700, 32, 0x7A),
(700, 64, 0x7A),
])
def test_function_frame(kernel32_idb, version, bitness, expected):
DllEntryPoint = idb.analysis.Functions(kernel32_idb).functions[0x68901695]
assert DllEntryPoint.startEA == 0x68901695
assert DllEntryPoint.endEA == 0x689016B0
assert DllEntryPoint.frame == expected
@kern32_test([
(695, 32, None),
(695, 64, None),
(700, 32, None),
(700, 64, None),
])
def test_struct(kernel32_idb, version, bitness, expected):
# ; BOOL __stdcall DllEntryPoint(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved)
# .text:68901695 public DllEntryPoint
# .text:68901695 DllEntryPoint proc near
# .text:68901695
# .text:68901695 hinstDLL = dword ptr 8
# .text:68901695 fdwReason = dword ptr 0Ch
# .text:68901695 lpReserved = dword ptr 10h
DllEntryPoint = idb.analysis.Functions(kernel32_idb).functions[0x68901695]
struc = idb.analysis.Struct(kernel32_idb, DllEntryPoint.frame)
members = list(struc.get_members())
assert list(map(lambda m: m.get_name(), members)) == [' s',
' r',
'hinstDLL',
'fdwReason',
'lpReserved', ]
assert members[2].get_type() == 'HINSTANCE'
@kern32_test()
def test_function(kernel32_idb, version, bitness, expected):
# .text:689016B5 sub_689016B5 proc near
# .text:689016B5
# .text:689016B5 var_214 = dword ptr -214h
# .text:689016B5 var_210 = dword ptr -210h
# .text:689016B5 var_20C = dword ptr -20Ch
# .text:689016B5 var_205 = byte ptr -205h
# .text:689016B5 var_204 = word ptr -204h
# .text:689016B5 var_4 = dword ptr -4
# .text:689016B5 arg_0 = dword ptr 8
# .text:689016B5
# .text:689016B5 ; FUNCTION CHUNK AT .text:689033D9 SIZE 00000017 BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:68904247 SIZE 000000A3 BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:689061B9 SIZE 0000025E BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:689138B4 SIZE 0000001F BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:6892BC20 SIZE 00000021 BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:6892F138 SIZE 00000015 BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:6892F267 SIZE 00000029 BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:68934D65 SIZE 0000003D BYTES
# .text:689016B5 ; FUNCTION CHUNK AT .text:68937707 SIZE 00000084 BYTES
# .text:689016B5
# .text:689016B5 8B FF mov edi, edi
# .text:689016B7 55 push ebp
# .text:689016B8 8B EC mov ebp, esp
# .text:689016BA 81 EC 14 02 00 00 sub esp, 214h
sub_689016B5 = idb.analysis.Function(kernel32_idb, 0x689016B5)
assert sub_689016B5.get_name() == 'sub_689016B5'
chunks = list(sub_689016B5.get_chunks())
assert chunks == [(0x689033D9, 0x17),
(0x68904247, 0xA3),
(0x689061B9, 0x25E),
(0x689138B4, 0x1F),
(0x6892BC20, 0x21),
(0x6892F138, 0x15),
(0x6892F267, 0x29),
(0x68934D65, 0x3D),
(0x68937707, 0x84)]
# sub_689016B5.get_unk()
# ; BOOL __stdcall DllEntryPoint(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpReserved)
# .text:68901695 public DllEntryPoint
# .text:68901695 DllEntryPoint proc near
# .text:68901695
# .text:68901695 hinstDLL = dword ptr 8
# .text:68901695 fdwReason = dword ptr 0Ch
# .text:68901695 lpReserved = dword ptr 10h
DllEntryPoint = idb.analysis.Function(kernel32_idb, 0x68901695)
sig = DllEntryPoint.get_signature()
assert sig.calling_convention == '__stdcall'
assert sig.rtype == 'BOOL'
assert len(sig.parameters) == 3
assert list(map(lambda p: p.type, sig.parameters)) == [
'HINSTANCE', 'DWORD', 'LPVOID']
assert list(map(lambda p: p.name, sig.parameters)) == [
'hinstDLL', 'fdwReason', 'lpReserved']
@kern32_test()
def test_stack_change_points(kernel32_idb, version, bitness, expected):
# .text:68901AEA CreateThread proc near
# .text:68901AEA
# .text:68901AEA lpThreadAttributes= dword ptr 8
# .text:68901AEA dwStackSize = dword ptr 0Ch
# .text:68901AEA lpStartAddress = dword ptr 10h
# .text:68901AEA lpParameter = dword ptr 14h
# .text:68901AEA dwCreationFlags = dword ptr 18h
# .text:68901AEA lpThreadId = dword ptr 1Ch
# .text:68901AEA
# .text:68901AEA 8B FF mov edi, edi
# .text:68901AEC 55 push ebp
# .text:68901AED 8B EC mov ebp, esp
# .text:68901AEF FF 75 1C push [ebp+lpThreadId]
# .text:68901AF2 8B 45 18 mov eax, [ebp+dwCreationFlags]
# .text:68901AF5 6A 00 push 0
# .text:68901AF7 25 04 00 01 00 and eax, 10004h
# .text:68901AFC 50 push eax
# .text:68901AFD FF 75 14 push [ebp+lpParameter]
# .text:68901B00 FF 75 10 push [ebp+lpStartAddress]
# .text:68901B03 FF 75 0C push [ebp+dwStackSize]
# .text:68901B06 FF 75 08 push [ebp+lpThreadAttributes]
# .text:68901B09 6A FF push 0FFFFFFFFh
# .text:68901B0B FF 15 00 D8 9D 68 call ds:CreateRemoteThreadEx_0
# .text:68901B11 5D pop ebp
# .text:68901B12 C2 18 00 retn 18h
# .text:68901B12 CreateThread endp
CreateThread = idb.analysis.Function(kernel32_idb, 0x68901aea)
change_points = list(CreateThread.get_stack_change_points())
assert change_points == [(0x68901aed, -4),
(0x68901af2, -4),
(0x68901af7, -4),
(0x68901afd, -4),
(0x68901b00, -4),
(0x68901b03, -4),
(0x68901b06, -4),
(0x68901b09, -4),
(0x68901b0b, -4),
(0x68901b11, 32),
(0x68901b12, 4)]
# .text:68901493 ; HANDLE __stdcall GetCurrentProcess()
# .text:68901493 public GetCurrentProcess
# .text:68901493 GetCurrentProcess proc near
# .text:68901493 83 C8 FF or eax, 0FFFFFFFFh
# .text:68901496 C3 retn
# .text:68901496 GetCurrentProcess endp
GetCurrentProcess = idb.analysis.Function(kernel32_idb, 0x68901493)
# there are no stack change points in this function
assert list(GetCurrentProcess.get_stack_change_points()) == []
@kern32_test()
def test_xrefs(kernel32_idb, version, bitness, expected):
assert lpluck('to', idb.analysis.get_crefs_from(kernel32_idb, 0x68901695)) == []
assert lpluck('to', idb.analysis.get_crefs_from(kernel32_idb, 0x6890169E)) == [0x68906156]
assert lpluck('frm', idb.analysis.get_crefs_to(kernel32_idb, 0x6890169E)) == []
assert lpluck('frm', idb.analysis.get_crefs_to(kernel32_idb, 0x68906156)) == [0x6890169E]
# .text:689016BA 004 81 EC 14 02 00 00 sub esp, 214h
# .text:689016C0 218 A1 70 B3 9D 68 mov eax, ___security_cookie
# .text:689016C5 218 33 C5 xor eax, ebp
security_cookie = 0x689DB370
assert lpluck('to', idb.analysis.get_drefs_from(kernel32_idb, 0x689016C0)) == [security_cookie]
assert lpluck('frm', idb.analysis.get_drefs_to(kernel32_idb, 0x689016C0)) == []
assert 0x689016C0 in pluck('frm', idb.analysis.get_drefs_to(kernel32_idb, security_cookie))
assert lpluck('to', idb.analysis.get_drefs_from(kernel32_idb, security_cookie)) == []
@kern32_test([
(695, 32, None),
(695, 64, None),
(700, 32, None),
(700, 64, None),
])
def test_fixups(kernel32_idb, version, bitness, expected):
fixups = idb.analysis.Fixups(kernel32_idb).fixups
assert len(fixups) == 31608
# .text:68901022 020 57 push edi
# .text:68901023 024 8B 3D 98 B1 9D 68 mov edi, dword_689DB198
# .text:68901029 024 85 FF test edi, edi
assert fixups[0x68901023 + 2].offset == 0x689DB198
assert fixups[0x68901023 + 2].get_fixup_length() == 0x4
@kern32_test()
def test_segments(kernel32_idb, version, bitness, expected):
segs = idb.analysis.Segments(kernel32_idb).segments
assert list(sorted(map(lambda s: s.startEA, segs.values()))) == [
0x68901000, 0x689db000, 0x689dd000]
assert list(sorted(map(lambda s: s.endEA, segs.values()))) == [
0x689db000, 0x689dd000, 0x689de230]
@kern32_test()
def test_segstrings(kernel32_idb, version, bitness, expected):
strs = idb.analysis.SegStrings(kernel32_idb).strings
# the first string is some binary data.
assert strs[1:] == ['.text', 'CODE', '.data', 'DATA', '.idata']
def test_segments2(elf_idb):
EXPECTED = {
'.init': {
'startEA': 0x80496ac,
'sclass': 0x2,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x5,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x1,
'type': 0x2,
'color': 0xffffffff,
},
'.plt': {
'startEA': 0x80496d0,
'sclass': 0x2,
'orgbase': 0x0,
'align': 0x3,
'comb': 0x2,
'perm': 0x5,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x2,
'type': 0x2,
'color': 0xffffffff,
},
'.plt.got': {
'startEA': 0x8049de0,
'sclass': 0x2,
'orgbase': 0x0,
'align': 0xa,
'comb': 0x2,
'perm': 0x5,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x3,
'type': 0x2,
'color': 0xffffffff,
},
'.text': {
'startEA': 0x8049df0,
'sclass': 0x2,
'orgbase': 0x0,
'align': 0x3,
'comb': 0x2,
'perm': 0x5,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x4,
'type': 0x2,
'color': 0xffffffff,
},
'.fini': {
'startEA': 0x805b634,
'sclass': 0x2,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x5,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x5,
'type': 0x2,
'color': 0xffffffff,
},
'.rodata': {
'startEA': 0x805b660,
'sclass': 0x8,
'orgbase': 0x0,
'align': 0x8,
'comb': 0x2,
'perm': 0x4,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x6,
'type': 0x3,
'color': 0xffffffff,
},
'.eh_frame_hdr': {
'startEA': 0x8060c14,
'sclass': 0x8,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x4,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x7,
'type': 0x3,
'color': 0xffffffff,
},
'.eh_frame': {
'startEA': 0x8061430,
'sclass': 0x8,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x4,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x8,
'type': 0x3,
'color': 0xffffffff,
},
'.init_array': {
'startEA': 0x8067f00,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x9,
'type': 0x3,
'color': 0xffffffff,
},
'.fini_array': {
'startEA': 0x8067f04,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xa,
'type': 0x3,
'color': 0xffffffff,
},
'.jcr': {
'startEA': 0x8067f08,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xb,
'type': 0x3,
'color': 0xffffffff,
},
'.got': {
'startEA': 0x8067ffc,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xc,
'type': 0x3,
'color': 0xffffffff,
},
'.got.plt': {
'startEA': 0x8068000,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x5,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xd,
'type': 0x3,
'color': 0xffffffff,
},
'.data': {
'startEA': 0x80681e0,
'sclass': 0xc,
'orgbase': 0x0,
'align': 0x8,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xe,
'type': 0x3,
'color': 0xffffffff,
},
'.bss': {
'startEA': 0x8068380,
'sclass': 0x13,
'orgbase': 0x0,
'align': 0x9,
'comb': 0x2,
'perm': 0x6,
'bitness': 0x1,
'flags': 0x10,
'sel': 0xf,
'type': 0x9,
'color': 0xffffffff,
},
'extern': {
'startEA': 0x8068fb8,
'sclass': 0x0,
'orgbase': 0x0,
'align': 0x3,
'comb': 0x2,
'perm': 0x0,
'bitness': 0x1,
'flags': 0x10,
'sel': 0x10,
'type': 0x1,
'color': 0xffffffff,
},
}
segs = idb.analysis.Segments(elf_idb).segments
strs = idb.analysis.SegStrings(elf_idb).strings
for seg in segs.values():
segname = strs[seg.name_index]
expected_seg = EXPECTED[segname]
for k, v in expected_seg.items():
assert v == getattr(seg, k)
@kern32_test()
def test_imports(kernel32_idb, version, bitness, expected):
imports = list(idb.analysis.enumerate_imports(kernel32_idb))
assert len(imports) == 1116
assert ('api-ms-win-core-rtlsupport-l1-2-0',
'RtlCaptureContext',
0x689dd000) in imports
libs = set([])
for imp in imports:
libs.add(imp.library)
assert 'KERNELBASE' in libs
assert 'ntdll' in libs
@kern32_test()
def test_entrypoints2(kernel32_idb, version, bitness, expected):
entrypoints = list(idb.analysis.enumerate_entrypoints(kernel32_idb))
assert len(entrypoints) == 1572
assert entrypoints[0] == ('BaseThreadInitThunk', 0x6890172d, 1, None)
assert entrypoints[-100] == ('WaitForThreadpoolWorkCallbacks', 0x689dab51, 1473, 'NTDLL.TpWaitForWork')
assert entrypoints[-1] == ('DllEntryPoint', 0x68901696, None, None)
@kern32_test()
def test_idainfo(kernel32_idb, version, bitness, expected):
idainfo = idb.analysis.Root(kernel32_idb).idainfo
if version == 695:
assert idainfo.tag == 'IDA'
elif version == 700:
assert idainfo.tag == 'ida'
assert idainfo.version == version
assert idainfo.procname == 'metapc'
# this was a 6.95 file upgraded to 7.0b
cd = os.path.dirname(__file__)
idbpath = os.path.join(cd, 'data', 'multibitness', 'multibitness.idb')
with idb.from_file(idbpath) as db:
idainfo = idb.analysis.Root(db).idainfo
assert idainfo.tag == 'IDA' # like from 6.95
assert idainfo.version == 700 # like from 7.00
assert idainfo.procname == 'metapc' # actually stored as `| 0x06 m e t a p c |`
``` |
{
"source": "6un9-h0-Dan/speakeasy",
"score": 2
} |
#### File: defs/windows/com.py
```python
from speakeasy.struct import EmuStruct, Ptr
class ComInterface(object):
def __init__(self, iface, name, ptr_size):
self.iface = iface(ptr_size)
self.address = 0
self.name = name
class IUnknown(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.QueryInterface = Ptr
self.AddRef = Ptr
self.Release = Ptr
class IMalloc(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.IUnknown = IUnknown
self.Alloc = Ptr
self.Realloc = Ptr
self.Free = Ptr
self.GetSize = Ptr
self.DidAlloc = Ptr
self.HeapMinimize = Ptr
IFACE_TYPES = {'IUnknown': IUnknown,
'IMalloc': IMalloc}
``` |
{
"source": "6un9-h0-Dan/stix-shifter",
"score": 2
} |
#### File: async_dummy/stix_transmission/async_dummy_delete_connector.py
```python
from stix_shifter_utils.modules.base.stix_transmission.base_delete_connector import BaseDeleteConnector
class AsyncDummyDeleteConnector(BaseDeleteConnector):
def __init__(self, api_client):
self.api_client = api_client
def delete_query_connection(self, search_id):
try:
response = self.api_client.delete_search(search_id)
return response
except Exception as err:
print('error when deleting search {}:'.format(err))
raise
```
#### File: async_dummy/stix_transmission/async_dummy_ping_connector.py
```python
from stix_shifter_utils.modules.base.stix_transmission.base_ping_connector import BasePingConnector
class AsyncDummyPingConnector(BasePingConnector):
def __init__(self, api_client):
self.api_client = api_client
def ping_connection(self):
try:
response = self.api_client.ping_data_source()
return response
except Exception as err:
print('error when pinging datasource {}:'.format(err))
raise
```
#### File: async_dummy/stix_transmission/async_dummy_query_connector.py
```python
from stix_shifter_utils.modules.base.stix_transmission.base_query_connector import BaseQueryConnector
class AsyncDummyQueryConnector(BaseQueryConnector):
def __init__(self, api_client):
self.api_client = api_client
def create_query_connection(self, query):
try:
response = self.api_client.create_search(query)
return response
except Exception as err:
print('error when creating search: {}'.format(err))
raise
```
#### File: stix_shifter_modules/elastic/entry_point.py
```python
from stix_shifter_utils.utils.entry_point_base import EntryPointBase
from stix_shifter_utils.modules.cim.stix_translation.cim_data_mapper import CimDataMapper
from stix_shifter_utils.modules.car.stix_translation.car_data_mapper import CarDataMapper
from .stix_translation.stix_to_elastic import StixToElastic
class EntryPoint(EntryPointBase):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(options)
self.add_dialect('default', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default=True)
self.add_dialect('cim', query_translator=StixToElastic(), data_mapper=CimDataMapper(options), default_include=False)
self.add_dialect('car', query_translator=StixToElastic(), data_mapper=CarDataMapper(options), default_include=False)
```
#### File: synchronous_dummy/stix_transmission/api_client.py
```python
from stix_shifter_utils.stix_transmission.utils.RestApiClient import RestApiClient
class APIClient():
def __init__(self, connection, configuration):
# Uncomment when implementing data source API client.
# auth = configuration.get('auth')
# headers = dict()
# headers['X-Auth-Token'] = auth.get('token')
# self.client = RestApiClient(connection.get('host'),
# connection.get('port'),
# connection.get('cert', None),
# headers,
# cert_verify=connection.get('cert_verify', 'True')
#
# Placeholder client to allow dummy transmission calls.
# Remove when implementing data source API client. )
self.client = "data source API client"
def ping_box(self):
# Pings the data source
return {"code": 200, "results": "Was able to hit the data source"}
def run_search(self, query_expression, offset=None, length=None):
# headers = dict()
# return self.client.call_api(endpoint, 'GET', headers, urldata=data)
# Return the search results. Results must be in JSON format before being translated into STIX
return {"code": 200, "search_id": query_expression, "results": "Results from search"}
``` |
{
"source": "6un9-h0-Dan/sysdig",
"score": 2
} |
#### File: sysdig/probe-builder/oracle-kernel-crawler.py
```python
import sys
try:
from urllib2 import urlopen, unquote
except ImportError:
from urllib.request import urlopen
from urllib.parse import unquote
from lxml import html
#
# Copied from kernel-crawler.py and hacked up for oracle linux
# because they don't use a normal directory structure.
#
repos = {
# Oracle only puts full isos with unhelpful names on mirrors.kernel.org, so skip it
"OL6-UEK": [
{
# yum.oracle.com has a bad cert, so use http instead of https
"root": "http://yum.oracle.com/",
"discovery_pattern": "/html/body//h3/a[regex:test(@href, 'oracle-linux-6\.html')]/@href",
"sub_discovery_pattern": "/html/body//h3[regex:test(., '^UEK Release [3-9]:')]/a[regex:test(@href, 'x86_64/index.html')]/@href",
"page_pattern": "/html/body//a[regex:test(@href, '^getPackage/kernel-uek-(devel-)?[0-9].*\.rpm$')]/@href",
}
],
"OL7-UEK": [
{
"root": "http://yum.oracle.com/",
"discovery_pattern": "/html/body//h3/a[regex:test(@href, 'oracle-linux-7\.html')]/@href",
"sub_discovery_pattern": "/html/body//h3[regex:test(., '^UEK Release [3-9]:')]/a[regex:test(@href, 'x86_64/index.html')]/@href",
"page_pattern": "/html/body//a[regex:test(@href, '^getPackage/kernel-uek-(devel-)?[0-9].*\.rpm$')]/@href",
}
],
"Oracle-RHCK": [
{
"root": "http://yum.oracle.com/",
"discovery_pattern": "/html/body//h3/a[regex:test(@href, 'oracle-linux-[6-7]+\.html')]/@href",
"sub_discovery_pattern": "/html/body//h3[regex:test(., '^Latest:')]/a[regex:test(@href, 'x86_64/index.html')]/@href",
"page_pattern": "/html/body//a[regex:test(@href, '^getPackage/kernel-(devel-)?[0-9].*\.rpm$')]/@href",
}
]
}
def progress(distro, current, total, package):
sys.stderr.write('\r{} {}/{} {} '.format(distro, current, total, package))
#
# In our design you are not supposed to modify the code. The whole script is
# created so that you just have to add entry to the `repos` array and new
# links will be found automagically without needing to write any single line of
# code.
#
urls = set()
URL_TIMEOUT=30
if len(sys.argv) < 2 or not sys.argv[1] in repos:
sys.stderr.write("Usage: " + sys.argv[0] + " <distro>\n")
sys.exit(1)
#
# Navigate the `repos` tree and look for packages we need that match the
# patterns given. Save the result in `packages`.
#
for repo in repos[sys.argv[1]]:
try:
root = urlopen(repo["root"],timeout=URL_TIMEOUT).read()
except:
continue
versions = html.fromstring(root).xpath(repo["discovery_pattern"], namespaces = {"regex": "http://exslt.org/regular-expressions"})
vid = 0
for version in versions:
vid += 1
ver_url = repo["root"] + version
progress(repo["root"], vid, len(versions), version)
try:
subroot = urlopen(ver_url,timeout=URL_TIMEOUT).read()
except:
continue
sub_vers = html.fromstring(subroot).xpath(repo["sub_discovery_pattern"], namespaces = {"regex": "http://exslt.org/regular-expressions"})
for sub_ver in sub_vers:
sub_ver = sub_ver.lstrip('/')
# The try - except block is used because 404 errors and similar
# might happen (and actually happen because not all repos have
# packages we need)
try:
source = repo["root"] + sub_ver
page = urlopen(source,timeout=URL_TIMEOUT).read()
rpms = html.fromstring(page).xpath(repo["page_pattern"], namespaces = {"regex": "http://exslt.org/regular-expressions"})
source = source.replace("index.html", "")
for rpm in rpms:
urls.add(source + str(unquote(rpm)))
except:
continue
#
# Print URLs to stdout
#
for url in urls:
print(url)
``` |
{
"source": "6un9-h0-Dan/vivisect",
"score": 2
} |
#### File: archs/arm/__init__.py
```python
import envi
from envi.archs.arm.regs import *
from envi.archs.arm.disasm import *
class ArmModule(envi.ArchitectureModule):
def __init__(self, name='armv6'):
import envi.archs.thumb16.disasm as eatd
envi.ArchitectureModule.__init__(self, name, maxinst=4)
self._arch_reg = self.archGetRegCtx()
self._arch_dis = ArmDisasm()
self._arch_thumb_dis = eatd.Thumb2Disasm()
def archGetRegCtx(self):
return ArmRegisterContext()
def archGetBreakInstr(self):
raise Exception ("weird... what are you trying to do here? ARM has a complex breakpoint instruction")
return
def archGetNopInstr(self):
return '\x00'
def getPointerSize(self):
return 4
def pointerString(self, va):
return "0x%.8x" % va
def archParseOpcode(self, bytes, offset=0, va=0):
"""
Parse a sequence of bytes out into an envi.Opcode instance.
"""
if va & 3:
return self._arch_thumb_dis.disasm(bytes, offset, va)
return self._arch_dis.disasm(bytes, offset, va)
def getEmulator(self):
return ArmEmulator()
from envi.archs.arm.emu import *
``` |
{
"source": "6un9-h0-Dan/vulnerability-rating-taxonomy",
"score": 2
} |
#### File: lib/tests/test_artifact_format.py
```python
from utils import utils
import os
import unittest
class TestArtifactFormat(unittest.TestCase):
def setUp(self):
self.scw_artifact_path = os.path.join(
utils.THIRD_PARTY_MAPPING_DIR,
utils.SCW_DIR,
utils.SCW_FILENAME
)
def test_artifact_loads_valid_json(self):
self.assertTrue(
utils.get_json(self.scw_artifact_path),
self.scw_artifact_path + ' is not valid JSON.'
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "6un9-h0-Dan/yaraprocessor",
"score": 2
} |
#### File: 6un9-h0-Dan/yaraprocessor/yaraprocessor.py
```python
import os
import sys
import errno
import logging
import argparse
import binascii
# Libary Imports
import yara
class ProcessorException(Exception):
pass
class Processor(object):
"""
A wrapper to Yara.
"""
def __init__(self, rule_files, processing_mode='raw',
compiled=False, **kwargs):
"""
Default initializer.
Keyword arguments:
rule_files -- (List) Filepaths to yara rule files.
(Ex. ['/path/to/file1', '/path/to/file2'])
processing_mode -- (String) Mode used in processing data. Allowed
options include; fixed_buffer, sliding_window,
and raw. Default is raw mode.
compiled -- (Boolean) If True, treat the provided rule file as compiled.
Optional arguments:
"fixed_buffer" processing mode:
Data will be processed by yara in fixed sized buffers.
buffer_size -- (Integer) Amount of data to buffer
before processing in bytes. Default is
1024 bytes.
"sliding_window" processing mode:
Data will be processed by yara in fixed sized buffers, but it
is possible for buffers to "overlap" by controlling the buffer
increment.
buffer_size -- (Integer) Amount of data to process in bytes.
Default is 1024 bytes.
window_step -- (Integer) Amount to increment window per chunk.
Default is 1 byte.
"""
# Get handle to logger
self.logger = logging.getLogger('yaraprocessor')
# Validate all file names to ensure they exist and can be read
for f in rule_files:
if os.path.isfile(f):
try:
with open(f):
pass
except IOError:
raise IOError((errno.EACCES, 'Cannot open/read file.', f))
else:
raise IOError((errno.ENOENT, 'Cannot find file.', f))
if not compiled:
self._rule_files = self._prepare_rules(rule_files)
# Try to load the rules into yara
try:
self._rules = yara.compile(filepaths=self._rule_files)
except yara.SyntaxError as e:
err = ('Rule syntax error. If using compiled rules, you must '
'pass the "compiled" argument. Original error: %s' % e)
raise ProcessorException(err)
except yara.Error:
raise
else: # rules are compiled
try:
# yara.load only accepts a single file
assert(len(rule_files) == 1)
except AssertionError:
err = ('Compiled rules must be compiled to one file. Loading '
'from compiled rules does not support multiple rule files.')
raise ProcessorException(err)
self._rule_files = rule_files[0]
try:
self._rules = yara.load(self._rule_files)
except yara.Error as e:
err = ('Generic error loading compiled rules. '
'Original error: %s' % e)
raise ProcessorException(err)
# Validate that the processing mode is supported
self._allowed_modes = ['raw', 'fixed_buffer', 'sliding_window']
if not processing_mode.lower() in self._allowed_modes:
raise ProcessorException("%s is not a supported processing mode." \
% processing_mode)
self._processing_mode = processing_mode
# Optional arguments with defaults
self._buffer_size = kwargs.get('buffer_size', 1024)
self._window_step = kwargs.get('window_step', 1)
# Set window_step to buffer size when processing in fixed buffer mode
# This makes the analysis code simpler
if self._processing_mode == 'fixed_buffer':
self._window_step = self._buffer_size
# Attribute used to hold data and results to be processed
self._raw_results = []
self._formatted_results = []
self.data = ''
# Private variables for buffering and window processing
self._current = ''
self._next = None
self._window_index = 0
self._offset = 0
def __str__(self):
"""
Pretty way to print a processor.
"""
s = 'Processor ' + __name__
if self._rule_files:
s += ' running with rules ' + ' '.join(self._rule_files.values())
return s
def _prepare_rules(self, rules):
"""
Convert a list of rule files to a dict of rule files.
Keyword arguments:
rules -- list of rule files as fully qualified paths
Yara expects a dictionary of {Namespaces:filepaths}. Returns a
dictionary of rule files.
"""
results = {}
for i, fn in enumerate(rules):
results['RuleFile%s' % i] = fn
return results
def _window(self, sequence, size=2, step=1):
"""
Returns a sliding window (of width n) over data from the iterable.
The window increments by 'step'.
s -> (s0,s1,...s[n-1]), (s0+step,s1+step,...,sn), ...
"""
i = 0
while True:
result = sequence[i: i + size]
if not result:
break
else:
i = i + step
yield result
def analyze(self, data=None):
"""
Analyze data with yara.
Calls yara's "match" function on self.data and
returns the results returned by match.
"""
if not data:
data = self.data
for r in self._rules.match(data=data):
result = {'result': r.rule,
'strings': [],
'subtype': 'scan_result'}
for s in r.strings:
result['strings'].append({'offset': self._offset + s[0],
'rule_id': s[1],
'string': binascii.hexlify(s[2])})
self._raw_results.append(r)
self._formatted_results.append(result)
if self._processing_mode == 'raw':
self._offset += len(data)
return self.results
@property
def results(self):
"""
Get the analysis results.
"""
return self._formatted_results
def clear_results(self):
"""
Clear the current set of results.
"""
self._raw_results = []
self._formatted_results = []
@property
def data(self):
"""
Get the data to be analyzed by yara.
"""
return self._current
@data.setter
def data(self, value):
"""
Set the data to be analyzed by yara.
This behaves differently based on the processing mode
being used.
If set to "raw", data is a simple buffer.
If set to "fixed_buffer", data will be buffered until that size
is reached. When reached, the data will automatically be analyzed,
and the buffer will be cleared. If data is larger than the fixed_buffer
any extra will be buffered into the next chunk.
If set to "sliding_window", data will be buffered similar to
"fixed_buffer" mode. However, the analysis window will increment
based on the buffer size. For example, with a buffer size set to 5,
a data stream of '123456789' would be analyzed in the following chunks:
12345
23456
34567
45678
56789
The option "window_step" controls the increment between windows. For
example, a window step of 2 changes the above example to:
12345
34567
56789
"""
self._current = value
if self._processing_mode != 'raw':
if self._current and \
len(self._current[self._window_index:]) >= self._buffer_size:
for chunk in self._window(self._current[self._window_index:],
size=self._buffer_size,
step=self._window_step):
# Analyze each chunk and concatenate the results
self.analyze(''.join(chunk))
if self._processing_mode == 'fixed_buffer':
self._offset += len(chunk)
elif self._processing_mode == 'sliding_window':
self._offset += self._window_step
# Update the index
self._window_index = len(self._current)
if __name__ == '__main__':
"""
Helper code used to test yaraprocessor.
"""
# Setup logging
logger = logging.getLogger('yaraprocessor')
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(stream=sys.stdout)
consoleHandler.setFormatter(logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s%(message)s'))
logger.addHandler(consoleHandler)
# Parse command line arguments
parser = argparse.ArgumentParser(description="Analyze data with Yara")
parser.add_argument(
'--mode',
choices=['raw', 'fixed_buffer', 'sliding_window'],
default='raw',
help='Set the operating mode for yara. Default is "raw".')
parser.add_argument(
'--input',
nargs='?',
type=argparse.FileType('r'),
required=True,
help='File to read data from for analysis.')
parser.add_argument(
'--rules',
nargs='*',
required=True,
help='Rule files for use in Yara.')
parser.add_argument(
'--compiled',
action='store_true',
help='Treat provided rule file as compiled. Note, all rules must \
be compiled to a single file.'
)
parser.add_argument(
'--size',
type=int,
default=5,
help='If using fixed_buffer or sliding_window mode, \
set the size of the buffer/window. Default is 5.')
parser.add_argument(
'--step',
type=int,
default=1,
help='Window step. Default is 1.')
args = parser.parse_args()
data = args.input.read()
logger.debug('Building Processor with rules:')
for i, each in enumerate(args.rules):
logger.debug(' %i) %s' % (i, each))
if args.compiled:
logger.debug('Treating rule file as compiled.')
logger.debug('Operating in %s mode.' % args.mode)
if args.mode != 'raw':
logger.debug('Buffer/Window size is %s' % args.size)
logger.debug('Window step is %s' % args.step)
p = Processor(args.rules, processing_mode=args.mode,
compiled=args.compiled, buffer_size=args.size,
window_step=args.step)
p.data += data
else:
p = Processor(args.rules, compiled=args.compiled)
p.data += data
results = p.analyze()
if p.results:
for match in p.results:
logger.debug('Match found; %s', match)
``` |
{
"source": "6vasia/ii-base",
"score": 2
} |
#### File: 6vasia/ii-base/run.py
```python
import api,points
import api.x
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
CONFIG='server.cfg'
api.CONFIG = CONFIG
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return 'deprecated'
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return 'deprecated'
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=utf-8')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = config('_server.cfg')
mo = api.toss(msgfrom,'%s,%s' % (cfg['server']['address'],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
# === extended API ===
@route('/x/t/<echoareas:path>')
def get_mtime(echoareas):
response.set_header ('content-type','text/plain; charset=utf-8')
arealist = echoareas.split('+')
arealist_legacy = echoareas.split('/')
if len(arealist) > len(arealist_legacy):
return api.x.get_mtime(arealist)
else:
return api.x.get_mtime(arealist_legacy)
import iitpl
iitpl.II_PATH=II_PATH
iitpl.CONFIG = CONFIG
run(host='127.0.0.1',port=62220,debug=False)
```
#### File: tools/gate/webfetch.py
```python
import urllib2, base64
cfg = open('config.cfg').read().splitlines()
def getf(l):
print 'fetch %s' % l
from StringIO import StringIO
import gzip
request = urllib2.Request(l)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
f = gzip.GzipFile(fileobj=StringIO( response.read()))
else:
f = response
return f.read()
def get_echoarea(name):
try: return open('echo/%s' % name).read().splitlines()
except: return []
def sep(l,step=20):
for x in range(0,len(l),step):
yield l[x:x+step]
def unp(s):
return base64.b64decode(s.replace('-','+').replace('_','/'))
def debundle(ea,s):
for n in s.splitlines():
mid,kod = n.split(':',1)
open('msg/%s' % mid,'w').write(unp(kod))
open('echo/%s' % ea, 'a').write(mid + '\n')
def walk_el(out):
ea = ''; el = {}
for n in out.splitlines():
if '.' in n:
ea = n
el[ea] = []
elif ea:
el[ea].append(n)
return el
def parse():
out = getf('%se/%s' % (cfg[1], '/'.join(cfg[2:])))
el = walk_el(out)
for ea in cfg[2:]:
myel = set(get_echoarea(ea))
dllist = [x for x in el[ea] if x not in myel]
for dl in sep(dllist,40):
s = getf('%sm/%s' % (cfg[1], '/'.join(dl)))
debundle(ea,s)
parse()
``` |
{
"source": "6wilink/ARN-iOMC3",
"score": 2
} |
#### File: ARN-iOMC3/iomc3/QZRegressionTest.py
```python
import time
import random
from optparse import OptionParser
import json
import requests
'''
Simulate ARN Device Agent
1. post "sync"
2. post "report" but abb is idle
3. post "report" with abb connected
'''
class ARNDeviceSimulator(object):
wmac = '86:02:11:89:04:29'
server = 'localhost'
def SimReportSync(self):
data = self.ARNDeviceCommon('sync')
self.SimHttpReport(data)
def SimReportIdle(self):
data = self.ARNDeviceCommon('report')
data['abb_safe'] = self.ARNDeviceABB(0)
data['nw_thrpt'] = self.ARNDeviceNwThrpt()
data['radio_safe'] = self.ARNDeviceRadio()
self.SimHttpReport(data)
def SimReportPeers(self):
data = self.ARNDeviceCommon('report')
data['abb_safe'] = self.ARNDeviceABB(2)
data['nw_thrpt'] = self.ARNDeviceNwThrpt()
data['radio_safe'] = self.ARNDeviceRadio()
self.SimHttpReport(data)
def SimHttpReport(self, data):
payload = {}
payload['data'] = json.dumps(data, ensure_ascii=True)
#print(payload)
headers = { 'user-agent': 'OMC3Agent' }
url = ('http://%s/iomc3/ws.php?do=report'
% (self.server or 'localhost'))
# POST with header, data
response = requests.post(url, headers = headers, data = payload)
cmds = response.text
print(cmds)
def ARNDeviceCommon(self, ops):
data = {}
data['ops'] = ops or 'sync'
data['wmac'] = self.wmac
data['ts'] = int(time.time())
return data
def ARNDeviceABB(self, peer_qty):
data = {}
data['noise'] = -105
data['ssid'] = 'QZRTest'
data['bssid'] = self.wmac
data['chanbw'] = 8
data['wmac'] = self.wmac
data['mode'] = 'Ad-Hoc'
data['signal'] = -88
if (peer_qty > 0):
data['peers'] = []
for i in range(0, peer_qty):
peer = self.ARNDeviceABBPeer(i)
data['peers'].append(peer)
else:
data['peer_qty'] = 0
data['peers'] = None
return data
def ARNDeviceABBPeer(self, idx):
data = {}
data['bssid'] = self.wmac
data['noise'] = -101
data['inactive'] = 2999
data['wmac'] = self.wmac
data['signal'] = -66
data['rx_short_gi'] = 1
data['rx_mcs'] = 99
data['rx_br'] = 54
data['tx_br'] = 150
data['tx_mcs'] = 99
data['tx_short_gi'] = 0
return data
def ARNDeviceRadio(self):
data = {}
data['timeout'] = 60
data['region'] = 1
data['elapsed'] = random.randint(0, 60)
data['freq'] = 666
data['chanbw'] = random.randint(5, 24)
data['channo'] = 45
data['txpwr'] = random.randint(9, 33)
data['hw_ver'] = 'QZRTest'
data['rxgain'] = random.randint(0, 20) - 10
return data
def ARNDeviceNwThrpt(self):
data = {}
data['tx'] = random.randint(0, 260) / 10
data['rx'] = random.randint(0, 260) / 10
return data
'''
Simulate WebApp:
1. signin
2. device(s) list, +search by id/keyword/status
3. device detail
4. config
5. save config
6. kpi
7. msg(s)
8. maps view
'''
class ARNWebApp(object):
token = None
server = None
def AjaxLogin(self, user, password):
payloads = { 'user': user, 'passwd': password }
url = ('http://%s/iomc3/ws.php?do=signin'
% (self.server or 'localhost'))
payloads = { 'user': user, 'passwd': password }
response = requests.post(url, data = payloads)
result = response.text
return result
def CBFindToken(self, result):
try:
r = json.loads(result)
return r['data']['auth']['token']
except:
pass
return None
def GetDevices(self):
try:
if (self.token):
url = ('http://%s/iomc3/ws.php?do=devices&token=%s'
% (self.server or 'localhost', self.token))
response = requests.get(url)
result = response.text
return result
except:
pass
return None
def CBFindDevices(self, result):
try:
r = json.loads(result)
return r['data']['devices']
except:
pass
return None
def GetDeviceDetail(self, deviceid):
try:
if (self.token):
url = ('http://%s/iomc3/ws.php?do=detail&did=%s&token=%s'
% (self.server or 'localhost', deviceid, self.token))
response = requests.get(url)
result = response.text
return result
except:
pass
return None
def CBPrintDeviceDetail(self, result):
try:
r = json.loads(result)
ip = r['data']['device']['ipaddr']
wmac = r['data']['device']['wmac']
print('-> device %s + %s' %(ip, wmac))
except:
pass
return
'''
1. ARNDeviceSimulator;
- report (sync)
- report (abb idle)
- report (abb connected)
2. ARNWebAppSimulator.
- signin
- fetch devices
- fetch device detail
'''
def main():
usage= 'Usage: %prog -s <server> [-u <user>] [-p <password>]'
parser = OptionParser(usage = usage)
parser.add_option('-s', '--server', type = 'string', dest = 'server', help = 'which server, default "localhost"')
parser.add_option('-u', '--user', type = 'string', dest = 'user', help = 'as who, default "admin"')
parser.add_option('-p', '--password', type = 'string', dest = 'password', help = 'key, default "<PASSWORD>"')
(options, args) = parser.parse_args()
print('- [init] checking settings ...')
if (not options.server):
options.server = 'localhost'
if (not options.user):
options.user = 'admin'
if (not options.password):
options.password = '<PASSWORD>'
print('- [Device] simulating arn device ...')
device = ARNDeviceSimulator()
device.server = options.server
print('- [Device] [SYNC] posted ...')
device.SimReportSync()
print('- [Device] [ABB idle] posted ...')
device.SimReportIdle()
print('- [Device] [ABB good] posted ...')
device.SimReportPeers()
print('- [WebApp] trying [SIGNIN] %s ...' % (options.server))
App = ARNWebApp()
App.server = options.server
auth = App.AjaxLogin(options.user, options.password)
print(auth or '-')
App.token = App.CBFindToken(auth)
if (App.token):
print('- [WebApp] got token [%s]' % (App.token))
else:
print('- [WebApp] *** INVALID token [%s] ***' % (App.token or ''))
print('- [WebApp] getting [DEVICES] [%s:%s] ...' % (options.server, App.token or ''))
devices = App.GetDevices()
print(devices or '*** INVALID response ***')
devicelist = App.CBFindDevices(devices)
print('- [WebApp] %dx [DEVICES] found' % (len(devicelist)))
for device in devicelist:
id = device['id']
detail = App.GetDeviceDetail(id)
App.CBPrintDeviceDetail(detail)
if __name__ == '__main__':
main()
``` |
{
"source": "6WIND/networking-6wind",
"score": 2
} |
#### File: networking_6wind/common/utils.py
```python
import os
from fp_vdev_remote import vdev_utils
from neutron.agent.common import utils as neutron_utils
from networking_6wind.common import constants
from oslo_config import cfg
cfg.CONF.import_group('ml2_fp', 'networking_6wind.common.config')
FP_VDEV_CMD = None
def get_socket_settings():
global FP_VDEV_CMD
if FP_VDEV_CMD is None:
FP_VDEV_CMD = vdev_utils.get_vdev_cmd()
path = neutron_utils.execute(cmd=[FP_VDEV_CMD, 'get', 'sockfolder'],
run_as_root=True)
mode = neutron_utils.execute(cmd=[FP_VDEV_CMD, 'get', 'sockmode'],
run_as_root=True)
return (path.strip(), mode.strip())
def get_socket_path(socket_dir, port_id):
if cfg.CONF.ml2_fp.vhostuser_socket_use_devname:
vhostuser_socket_name = (constants.VHOSTUSER_SOCKET_DEVNAME_PREFIX
+ port_id)[:14]
else:
vhostuser_socket_name = constants.VHOSTUSER_SOCKET_PREFIX + port_id
return os.path.join(socket_dir, vhostuser_socket_name)
```
#### File: unit/ml2_drivers/_test_mech_agent.py
```python
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
from neutron_lib.api.definitions import portbindings
NETWORK_ID = "fake_network"
PORT_ID = "fake_port"
class FPPortContext(base.FakePortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL,
original=None, profile=None):
super(FPPortContext, self).__init__(agent_type, agents, segments,
vnic_type, original, profile)
def host_agents(self, agent_type):
return [agent for agent in self._agents if agent_type in
agent.get('agent_type') and agent.get('alive')]
class FPMechanismGenericTestCase(base.AgentMechanismGenericTestCase):
def test_unknown_type(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS,
self.UNKNOWN_TYPE_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class FPMechanismLocalTestCase(base.AgentMechanismLocalTestCase):
def test_type_local(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS,
self.LOCAL_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.LOCAL_SEGMENTS[1])
def test_type_local_dead(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS_DEAD,
self.LOCAL_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class FPMechanismFlatTestCase(base.AgentMechanismFlatTestCase):
def test_type_flat(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.FLAT_SEGMENTS[1])
def test_type_flat_bad(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.FLAT_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class FPMechanismVlanTestCase(base.AgentMechanismVlanTestCase):
def test_type_vlan(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
def test_type_vlan_bad(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS,
vnic_type=self.VNIC_TYPE)
self.driver.bind_port(context)
self._check_unbound(context)
class FPMechanismGreTestCase(base.AgentMechanismGreTestCase):
def test_type_gre(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.GRE_SEGMENTS[1])
def test_type_gre_bad(self):
context = FPPortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
``` |
{
"source": "6WIND/pyFDT",
"score": 2
} |
#### File: pyFDT/fdt/tool.py
```python
import os
import sys
import fdt
import click
# Application error code
ERROR_CODE = 1
# The version of u-boot tools
VERSION = fdt.__version__
# Short description of DTC tool
DESCRIP = (
"Device Tree Converter tool for converting FDT blob (*.dtb) to readable text file (*.dts) and reverse"
)
# Base options
@click.group(context_settings=dict(help_option_names=['-?', '--help']), help=DESCRIP)
@click.version_option(VERSION, '-v', '--version')
def cli():
click.echo()
@cli.command(short_help="Convert *.dtb to *.dts")
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.option('-t', '--tabsize', type=click.INT, default=4, show_default=True, help="Tabulator Size")
@click.option('-o', '--outfile', type=click.Path(), default=None, help="Output path/file name (*.dts)")
def todts(outfile, infile, tabsize):
""" Convert device tree binary blob (*.dtb) into readable text file (*.dts) """
fdt_obj = None
if outfile is None:
outfile = os.path.splitext(os.path.basename(infile))[0] + ".dts"
try:
with open(infile, 'rb') as f:
try:
fdt_obj = fdt.parse_dtb(f.read())
except:
raise Exception('Not supported file format: {}'.format(infile))
with open(outfile, 'w') as f:
f.write(fdt_obj.to_dts(tabsize))
except Exception as e:
click.echo(" Error: {}".format(str(e) if str(e) else "Unknown!"))
sys.exit(ERROR_CODE)
click.secho(" DTS saved as: %s" % outfile)
@cli.command(short_help="Convert *.dts to *.dtb")
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.option('-v', '--version', type=click.INT, default=None, help="DTB Version")
@click.option('-l', '--lcversion', type=click.INT, default=None, help="DTB Last Compatible Version")
@click.option('-c', '--cpuid', type=click.INT, default=None, help="Boot CPU ID")
@click.option('-a', '--align', type=click.INT, default=None, help="Make the blob align to the <bytes>")
@click.option('-p', '--padding', type=click.INT, default=None, help="Add padding to the blob of <bytes> long")
@click.option('-s', '--size', type=click.INT, default=None, help="Make the blob at least <bytes> long")
@click.option('-o', '--outfile', type=click.Path(), default=None, help="Output path/file name (*.dtb)")
def todtb(outfile, infile, version, lcversion, cpuid, align, padding, size):
""" Convert device tree as readable text file (*.dts) into binary blob (*.dtb) """
fdt_obj = None
if outfile is None:
outfile = os.path.splitext(os.path.basename(infile))[0] + ".dtb"
try:
if version is not None and version > fdt.Header.MAX_VERSION:
raise Exception("DTB Version must be lover or equal {} !".format(fdt.Header.MAX_VERSION))
with open(infile, 'r') as f:
try:
fdt_obj = fdt.parse_dts(f.read(), os.path.dirname(infile))
except:
raise Exception('Not supported file format: {}'.format(infile))
raw_data = fdt_obj.to_dtb(version, lcversion, cpuid)
if align is not None:
if size is not None:
raise Exception("The \"-a/--align\" option can't be used together with \"-s/--size\"")
if not align % 2:
raise Exception("The \"-a/--align\" option must be dividable with two !")
if len(raw_data) % align:
raw_data += bytes([0] * (len(raw_data) % align))
if padding is not None:
if align is not None:
raise Exception("The \"-p/--padding\" option can't be used together with \"-a/--align\"")
raw_data += bytes([0] * padding)
if size is not None:
if size < len(raw_data):
raise Exception("The \"-s/--size\" option must be > {}".format(len(raw_data)))
raw_data += bytes([0] * (size - len(raw_data)))
with open(outfile, 'wb') as f:
f.write(raw_data)
except Exception as e:
click.echo(" Error: {}".format(str(e) if str(e) else "Unknown!"))
sys.exit(ERROR_CODE)
click.secho(" DTB saved as: %s" % outfile)
@cli.command(short_help="Merge two and more *.dtb or *.dts files")
@click.argument('outfile', nargs=1, type=click.Path())
@click.argument('infiles', nargs=-1, type=click.Path(exists=True))
@click.option('-t', '--tabsize', type=click.INT, default=4, show_default=True, help="Tabulator Size")
@click.option('-i', '--intype', type=click.Choice(['auto', 'dts', 'dtb']),
default='auto', show_default=True, help="Input file type")
def merge(outfile, infiles, tabsize, intype):
""" Merge two and more *.dtb or *.dts files into one *.dts file """
def open_fdt(file_path, file_type):
if file_type == 'auto':
if file_path.endswith(".dtb"):
file_type = 'dtb'
elif file_path.endswith(".dts"):
file_type = 'dts'
else:
raise Exception('Not supported file extension: {}'.format(file_path))
try:
if file_type == 'dtb':
with open(file_path, 'rb') as f:
obj = fdt.parse_dtb(f.read())
else:
with open(file_path, 'r') as f:
obj = fdt.parse_dts(f.read(), os.path.dirname(file_path))
except Exception as e:
raise Exception('Not supported file format: {} {}'.format(file_path, str(e)))
return obj
fdt_obj = None
if not infiles:
click.echo("Usage: pydtc todtb [OPTIONS] [INFILES]...")
click.echo("\nError: Missing argument \"infiles\"")
sys.exit(ERROR_CODE)
if len(infiles) < 2:
click.echo("Usage: pydtc todtb [OPTIONS] [INFILES]...")
click.echo("\nError: Minimum is two \"infiles\"")
sys.exit(ERROR_CODE)
try:
for file in infiles:
if fdt_obj is None:
fdt_obj = open_fdt(file, intype)
else:
fdt_obj.merge(open_fdt(file, intype))
with open(outfile, 'w') as f:
f.write(fdt_obj.to_dts(tabsize))
except Exception as e:
click.echo(" Error: {}".format(str(e) if str(e) else "Unknown!"))
sys.exit(ERROR_CODE)
click.secho(" Merge output saved as: %s" % outfile)
@cli.command(short_help="Compare two *.dtb or *.dts files")
@click.argument('file1', nargs=1, type=click.Path(exists=True))
@click.argument('file2', nargs=1, type=click.Path(exists=True))
@click.option('-t', '--intype', type=click.Choice(['auto', 'dts', 'dtb']),
default='auto', show_default=True, help="Input file type")
@click.option('-o', '--outdir', type=click.Path(), default=None, help="Output directory/path [default: diff_out]")
def diff(file1, file2, intype, outdir):
""" Compare two *.dtb or *.dts files """
def open_fdt(file_path, file_type):
if file_type == 'auto':
if file_path.endswith(".dtb"):
file_type = 'dtb'
elif file_path.endswith(".dts"):
file_type = 'dts'
else:
raise Exception('Not supported file extension: {}'.format(file_path))
try:
if file_type == 'dtb':
with open(file_path, 'rb') as f:
obj = fdt.parse_dtb(f.read())
else:
with open(file_path, 'r') as f:
obj = fdt.parse_dts(f.read(), os.path.dirname(file_path))
except:
raise Exception('Not supported file format: {}'.format(file_path))
return obj
try:
# load input files
fdt1 = open_fdt(file1, intype)
fdt2 = open_fdt(file2, intype)
# compare it
diff = fdt.diff(fdt1, fdt2)
if diff[0].empty:
click.echo(" Input files are completely different !")
sys.exit()
# create output directory
if outdir is None:
outdir = "diff_out"
os.makedirs(outdir, exist_ok=True)
# save the diff
file_name = (
"same.dts",
os.path.splitext(os.path.basename(file1))[0] + ".dts",
os.path.splitext(os.path.basename(file2))[0] + ".dts")
for index, obj in enumerate(diff):
if not obj.empty:
with open(os.path.join(outdir, file_name[index]), 'w') as f:
f.write(obj.to_dts())
except Exception as e:
click.echo(" Error: {}".format(str(e) if str(e) else "Unknown!"))
sys.exit(ERROR_CODE)
click.secho(" Diff output saved into: %s" % outdir)
def main():
cli(obj={})
if __name__ == '__main__':
main()
```
#### File: pyFDT/tests/test_cli_tool.py
```python
import pytest
DIRECTORY='tests/data/'
@pytest.mark.script_launch_mode('subprocess')
def test_pydtc_todts(script_runner):
ret = script_runner.run('pydtc', 'todts', DIRECTORY + 'imx7d-sdb.dtb')
assert ret.success
@pytest.mark.script_launch_mode('subprocess')
def test_pydtc_todtb(script_runner):
ret = script_runner.run('pydtc', 'todtb', DIRECTORY + 'imx7d-sdb.dts')
assert ret.success
@pytest.mark.script_launch_mode('subprocess')
def test_pydtc_merge(script_runner):
ret = script_runner.run('pydtc', 'merge', 'merged.dts', DIRECTORY + 'fdtdump.dts', DIRECTORY + 'addresses.dts')
assert ret.success
@pytest.mark.script_launch_mode('subprocess')
def test_pydtc_diff(script_runner):
ret = script_runner.run('pydtc', 'diff', DIRECTORY + 'fdtdump.dts', DIRECTORY + 'addresses.dts')
assert ret.success
``` |
{
"source": "6wunderkinder/night-shift",
"score": 3
} |
#### File: night-shift/tests/attempt_log_sizes.py
```python
from __future__ import print_function
import os
import sys
import glob
import argparse
import datetime
# datetime.date
def valid_date(s):
try:
return datetime.datetime.strptime(s, "%Y-%m-%d").date()
except ValueError:
msg = "Not a valid date: `{}`.".format(s)
raise argparse.ArgumentTypeError(msg)
# float
def median(lst):
if not lst:
return None
elif len(lst) % 2 == 1:
return sorted(lst)[((len(lst)+1)/2)-1]
else:
return float(sum(sorted(lst)[(len(lst)/2)-1:(len(lst)/2)+1]))/2.0
# list<int>
def get_log_size_for_date(date):
return sum(map(os.path.getsize, map(os.path.abspath, glob.glob("logs/{}/attempt-*.log".format(str(date))))))
# list<int>
def get_log_size_for_last_week(date):
return filter(lambda v: v, [ get_log_size_for_date(date - datetime.timedelta(days=i)) for i in range(7) ])
# tuple<int,int,int>
def get_median_thresholds(lst):
median_value = median(lst)
return int(median_value * 0.9), int(median_value), int(median_value * 1.15)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test_log_sizes')
parser.add_argument('-d', '--date', help="current date", type=valid_date, default=str(datetime.date.today()))
args = parser.parse_args()
log_sizes_lst = list(get_log_size_for_last_week(args.date))
if not log_sizes_lst:
print('[!] No log files were found!')
sys.exit(0)
if len(log_sizes_lst) < 5:
print('[!] Not enough log files are available!')
sys.exit(0)
min_thr, _, max_thr = get_median_thresholds(log_sizes_lst)
today_log_size = get_log_size_for_date(args.date)
if today_log_size > max_thr or today_log_size < min_thr:
print('[!] Log size is below or above threshold:')
print('Expected min: {min_thr} < actual: {actual} < max: {max_thr}' \
.format(min_thr=min_thr, max_thr=max_thr, actual=today_log_size))
sys.exit(1)
sys.exit(0)
```
#### File: night-shift/tests/check_unused_files.py
```python
from __future__ import print_function
import io
import os
import sys
import glob
import re
import argparse
class Parser(object):
EXTENSIONS = ['sh', 'py', 'erb', 'rb', 'mk', 'sql']
# void
def __init__(self, directory, start_files_path):
self.directory = os.path.abspath(directory)
self.need_to_check_files_path = set(start_files_path)
self.used_files_path = set()
self.all_files_path = set(self.collect_files())
self.not_found_mentions = set()
@property
def unused_files_path(self):
return self.all_files_path - self.used_files_path
def read(self):
while len(self.need_to_check_files_path) != 0:
f = self.need_to_check_files_path.pop()
self.used_files_path.add(f)
collected_files_path = set()
with io.open(f, 'r', encoding='utf-8') as fd:
_, ext = os.path.splitext(f)
content = fd.read() \
.replace('$*', '*')
if ext == '.sh':
content = content.replace('$(pwd)/$(dirname $0)', os.path.dirname(f)) \
.replace('$(dirname $0)', os.path.dirname(f)) \
.replace('$(pwd)', os.path.dirname(f))
content = re.sub(r'\$\((.*?)\)', '*', content)
content = re.sub(r'\$\{(.*?)\}', '*', content)
content = re.sub(r'\$([^ \/\-]*?)', '*', content)
if ext == '.rb':
for mention in re.findall(r'require_relative "(.*?)"', content, re.I):
file_name = '{}.rb'.format(mention) if not mention.endswith('.rb') else mention
file_path = os.path.abspath(os.path.join(os.path.dirname(f), file_name))
if os.path.exists(file_path):
self.need_to_check_files_path.add(file_path)
if ext == '.py':
for imports in re.findall(r'import (.*?)\n', content, re.I):
for mention in imports.split(','):
file_path = os.path.abspath(os.path.join(os.path.dirname(f), '{}.py'.format(mention.strip())))
if os.path.exists(file_path):
self.need_to_check_files_path.add(file_path)
for level, mention in re.findall(r'from ([\.]*)(.*?) import', content, re.I):
rel_file_path = '../'*(len(level)-1) + '{}.py'.format(mention.strip())
file_path = os.path.abspath(os.path.join(os.path.dirname(f), rel_file_path))
if os.path.exists(file_path):
self.need_to_check_files_path.add(file_path)
for mention, ext in set(re.findall(r'([a-z0-9\_\*\-\.\\\/]+\.({}))'.format('|'.join(self.EXTENSIONS)), content, re.I)):
possible_files_path = glob.glob(os.path.abspath(mention))
if possible_files_path:
collected_files_path |= set(possible_files_path)
continue
possible_files_path = glob.glob(os.path.abspath(os.path.join(os.path.dirname(f), mention)))
if possible_files_path:
collected_files_path |= set(possible_files_path)
continue
possible_files_path = glob.glob(os.path.abspath(os.path.join(self.directory, mention)))
if possible_files_path:
collected_files_path |= set(possible_files_path)
continue
self.not_found_mentions.add((f, mention))
for file_path in collected_files_path:
if file_path.startswith(self.directory) and file_path not in self.used_files_path:
self.need_to_check_files_path.add(file_path)
return self
def collect_files(self):
for root, dirs, files in os.walk(self.directory):
for f in files:
if f.endswith(tuple(self.EXTENSIONS)):
yield os.path.join(root, f)
def show_recognizable_files(self):
if not self.not_found_mentions:
print('Every file was recognizable!')
return
print()
print('Not recognizable files ({}):'.format(len(self.not_found_mentions)))
print('\n'.join(sorted(' {} <- {}'.format(m,f) for f, m in self.not_found_mentions)))
def show_unused_files(self):
if not self.unused_files_path:
print('No unused files found!')
return
print()
print('Unused files list ({}):'.format(len(self.unused_files_path)))
print('\n'.join(sorted(' {}'.format(f) for f in self.unused_files_path)))
def show_used_files(self):
if not self.used_files_path:
print('No used files found!')
return
print()
print('Used files list ({}):'.format(len(self.used_files_path)))
print('\n'.join(sorted(' {}'.format(f) for f in self.used_files_path)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('files_pattern', nargs='+', help='entry point of files')
parser.add_argument('-d', '--dir', help='directory to check', required = True)
args = parser.parse_args()
files_path = map(os.path.abspath, sum(map(glob.glob, sum(map(str.split, args.files_pattern), [])), []))
p = Parser(args.dir, files_path).read()
p.show_unused_files()
sys.exit(min(len(p.unused_files_path),1))
```
#### File: night-shift/web/webapp.py
```python
import os
import re
import io
import datetime
import argparse
from math import log
from flask import *
from functools import wraps
from dateutil.parser import parse as parse_datetime
app = Flask(__name__)
class Logs(object):
# extendable menu list
MENU = []
# void
def __init__(self, dir_project, date = None):
self.dir_project = os.path.abspath(dir_project)
self.dir_logs = os.path.join(self.dir_project, 'logs')
self.rexp_dir_date = re.compile(r'^\d{4}-\d{2}-\d{2}$')
self.date_str = date or self.get_last_log_date() or str(datetime.date.today())
self.date = parse_datetime(self.date_str).date()
self.dir_date = os.path.join(self.dir_logs, self.date_str)
# str
def get_log_id(self, target):
return target.replace("/", "_").replace('.','_')
# str
def get_content(self, file_path):
if not os.path.exists(file_path):
return None
return io.open(file_path, 'r', encoding='utf-8').read().strip()
# list<str>
def find_available_log_dates(self):
if not os.path.exists(self.dir_logs):
return []
return sorted(filter(lambda dir: self.rexp_dir_date.match(dir), os.listdir(self.dir_logs)))
# str
def get_last_log_date(self):
log_dates = self.find_available_log_dates()
return log_dates[-1] \
if log_dates \
else []
class TrackingShellLog(Logs):
# str
def get_timing_env_log_content(self):
path = os.path.join(self.dir_date, 'timing_env.log')
if os.path.exists(path):
return io.open(path, 'r', encoding='utf-8').read()
return u''
# tupe<list<dict>,list<str>>
def find_timing_env_commands(self):
content = self.get_timing_env_log_content()
if not content: return [], set(['Timing env log is not found!'])
commands, ordered_commands, errors = {}, [], set()
for line in content.split('\n'):
try:
data = json.loads(line)
except:
continue
cmd_hash_key = (data['command'],data['unique_nr'])
if cmd_hash_key not in commands.keys() and data['tag'] == 'BEGIN':
commands[cmd_hash_key] = data
commands[cmd_hash_key]['tags'] = [data['tag']]
ordered_commands.append(commands[cmd_hash_key])
elif cmd_hash_key in commands.keys() and data['tag'] in commands[cmd_hash_key]['tags']:
errors.add('Found duplicated command: {}'.format(data['command']))
elif cmd_hash_key in commands.keys() and data['tag'] == 'END':
commands[cmd_hash_key].update(data)
commands[cmd_hash_key]['tags'].append(data['tag'])
else:
errors.add('Unknown error: {}'.format(data['command']))
for command in ordered_commands:
command['started_at'] = parse_datetime(command['started_at'])
if 'finished_at' in command: command['finished_at'] = parse_datetime(command['finished_at'])
return ordered_commands, errors
# str
def get_timing_env_command_status(self, cmd_dict):
if cmd_dict.get('started_at') and cmd_dict.get('finished_at'):
return 'success' if cmd_dict['exit_code'] == 0 else 'failure'
elif cmd_dict['date'] == datetime.date.today():
return 'running'
return 'timeout'
# tuple<list<dict>,list<str>>
def get_timing_env_commands_dict(self):
ordered_commands, errors = self.find_timing_env_commands()
if not ordered_commands: return ordered_commands, errors
first_command_started_at, attempt_dict = ordered_commands[0]['started_at'], {}
for cmd_dict in ordered_commands:
started_at, finished_at = cmd_dict.get('started_at'), cmd_dict.get('finished_at', datetime.datetime.now())
cmd_dict['date'] = started_at.date()
cmd_dict['status'] = self.get_timing_env_command_status(cmd_dict)
cmd_dict['waited'] = (started_at-first_command_started_at).total_seconds() / 60
cmd_dict['length'] = (finished_at-started_at).total_seconds() / 60
cmd_dict['log_id'] = self.get_log_id(cmd_dict['target'])
attempt_dict.setdefault((cmd_dict['target'],cmd_dict['command']), 0)
attempt_dict[(cmd_dict['target'],cmd_dict['command'])] += 1
cmd_dict['attempt_nr'] = attempt_dict[(cmd_dict['target'],cmd_dict['command'])]
return ordered_commands, errors
class TargetLogs(Logs):
TARGET_LOG_IGNORE = ['timing_env','attempt','trackingshell']
# str
def get_path_by_log_id(self, log_id):
for file_path in self.find_target_log_files():
rel_file_path = os.path.relpath(file_path, self.dir_date)
name, _ = os.path.splitext(rel_file_path)
if self.get_log_id(name) == log_id:
return file_path
# list<str>
def find_target_log_files(self):
if not os.path.exists(self.dir_date):
return []
return [ os.path.join(self.dir_date, f) \
for f in os.listdir(self.dir_date) \
if f.endswith('.log') and not f.startswith(tuple(self.TARGET_LOG_IGNORE)) ]
# dict
def get_tracking_shell_log_content(self):
ts_log_path = os.path.join(self.dir_date, 'trackingshell.log')
if not os.path.exists(ts_log_path): return None
return self.get_target_log_dict(ts_log_path, return_content = True)
# bool
def is_target_log_succeed(self, content):
tracking_shell_lines = re.findall(r'\[tracking_shell [^\]]+\] Working on target (\S+) attempt', content, re.I)
if not tracking_shell_lines:
return False
target_file_name = os.path.join(
self.dir_project,
tracking_shell_lines[-1]
)
if not target_file_name:
return False
return os.path.exists(target_file_name)
# dict
def get_target_log_dict(self, file_path, return_content = False):
_, file_name = os.path.split(file_path)
name, _ = os.path.splitext(file_name)
content = self.get_content(file_path)
return {
'id': self.get_log_id(name),
'name': name,
'size': os.path.getsize(file_path),
'lines': len(content.split('\n')),
'success': self.is_target_log_succeed(content),
'content': content if return_content else None
}
# list<dict>
def get_target_logs_dict(self):
for file_path in self.find_target_log_files():
yield self.get_target_log_dict(file_path)
# list<dict>
def get_sorted_target_logs_dict(self):
logs_sorted_by_size = sorted(self.get_target_logs_dict(), \
key = lambda x: x['size'], reverse = True)
return sorted(logs_sorted_by_size, \
key = lambda x: x['success'])
# str
def filesize(n,pow=0,b=1024,u='B',pre=['']+[p+'i'for p in'KMGTPEZY']):
pow,n=min(int(log(max(n*b**pow,1),b)),len(pre)-1),n*b**pow
return "%%.%if %%s%%s"%abs(pow%(-pow-1))%(n/b**float(pow),pre[pow],u)
class resolve(object):
# void
def __init__(self, logger_cls):
self.logger_cls = logger_cls
self.dir_project = os.environ.get('NIGHT_SHIFT_PROJECT_DIR')
# func
def __call__(self, f):
outer_cls = self
@wraps(f)
def decorated_function(date = None, *args, **kwargs):
logger = outer_cls.logger_cls(outer_cls.dir_project, date)
data = f(logger, *args, **kwargs)
if isinstance(data, dict):
data.update({
'page': f.__name__,
'current_date': logger.date,
'dates': logger.find_available_log_dates()[-7:],
'filesize': filesize,
'menus': Logs.MENU,
})
return render_template('{}.html'.format(f.__name__), **data)
else:
return data
return decorated_function
@app.route('/download/<date>/<log_id>')
@resolve(TargetLogs)
def download(ns, log_id):
return ns.get_content(ns.get_path_by_log_id(log_id))
@app.route("/")
@app.route("/flow")
@app.route("/flow/<date>")
@app.route('/flow/<date>/<log_id>')
@resolve(TargetLogs)
def flow(ns, log_id = None):
return {
'target_logs': ns.get_sorted_target_logs_dict(),
'log_id': log_id,
'ts': ns.get_tracking_shell_log_content()
}
@app.route("/gantt")
@app.route("/gantt/<date>")
@resolve(TrackingShellLog)
def gantt(ns):
commands, _ = ns.get_timing_env_commands_dict()
return {'commands': commands}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
app.run(host='0.0.0.0', port=8000, threaded=True, debug=args.debug)
``` |
{
"source": "6wunderkinder/trackingshell",
"score": 2
} |
#### File: trackingshell/trackingshell/__init__.py
```python
import argparse
import sys
import os
import subprocess
import logging
from .decorators import *
logger = logging.getLogger('trackingshell')
logger.addHandler(logging.NullHandler())
class MakeTarget(object):
WITHOUT_TARGET = 'no-target'
# MakeTarget
def __init__(self, target, command, **kwargs):
self.target = target or self.WITHOUT_TARGET
self.command = command
self.__dict__.update(kwargs)
self.logger = logger
self.set_logger()
# void
def set_logger(self):
pass
# bool
def has_target(self):
return self.target != self.WITHOUT_TARGET
# bool
def has_makelevel(self):
return "MAKELEVEL" in os.environ
# int
def execute_command(self):
return subprocess.call(["/bin/bash", "-e", "-o", "pipefail", "-c",
self.command])
# void
def logger_fn_for_exit_code(self, exit_code, success = None, failure = None):
return getattr(self.logger, (success or 'info') \
if exit_code == 0 \
else (failure or 'error'))
# dict
def as_dict(self):
return dict(self.__dict__)
# str
def __repr__(self):
if not self.has_makelevel():
return '<{module}.{cls} with command `{command}` and without target>'.format(
module=self.__module__, cls=self.__class__.__name__,
command=self.command)
return '<{module}.{cls} with command `{command}` and `{target}` target>'.format(
module=self.__module__, cls=self.__class__.__name__,
target=self.target, command=self.command)
# int
@plugin
def execute_command(mt, next_plugin_fn):
assert next_plugin_fn is None
return mt.execute_command()
class PluginRegistry(object):
# PluginRegistry
def __init__(self, plugins = None):
self.plugins = plugins or []
# function
def _wraps(self, plugins):
next_plugin_fn, rest = plugins[0], plugins[1:]
return lambda mt: next_plugin_fn(mt, self._wraps(rest) if rest else None)
# void
def register(self, plugin_fn):
self.plugins.insert(len(self.plugins)-1, plugin_fn)
# void
def unregister(self, plugin_fn):
self.plugins.remove(plugin_fn)
# function
def wraps(self):
return self._wraps(self.plugins)
class Shell(object):
# Shell
def __init__(self, argv = None):
self._set_parser()
self._set_plugins()
self.argv = argv
self.cls = MakeTarget
# void
def _set_parser(self):
self.parser = argparse.ArgumentParser(
prog='%(prog)s',
description="Tracks makefiles targets"
)
self.parser.add_argument('-t', '--target', help="name of the make target", nargs="?")
self.parser.add_argument('-c', '--command', help="command to execute", required=True)
# void
def _set_plugins(self):
self.plugins = PluginRegistry([execute_command])
# void
def delegate(self, return_exit_code = False):
# Construct a MakeTarget object
mt = self.cls(**vars(self.parser.parse_args(self.argv)))
# Execute the command
exit_code = self.plugins.wraps()(mt)
# Set a message.
exit_message = 'Command execution is finished with exit code {}'.format(exit_code)
logger_fn = mt.logger_fn_for_exit_code(exit_code, success='info')
logger_fn(exit_message, extra = mt.as_dict())
# Quit
if return_exit_code:
self.mt = mt
return exit_code
sys.exit(exit_code)
# void
def main(argv=sys.argv[1:]):
Shell(argv).delegate()
```
#### File: trackingshell/trackingshell/tests.py
```python
import unittest
import trackingshell as ts
class TestExecute(unittest.TestCase):
def test_echo_success(self):
s = ts.Shell(['-c', 'echo "trackingshell" &>/dev/null'])
exit_code = s.delegate(return_exit_code = True)
self.assertEqual(exit_code, 0)
def test_echo_typo(self):
s = ts.Shell(['-c', 'ecsho "trackingshell" &>/dev/null'])
exit_code = s.delegate(return_exit_code = True)
self.assertEqual(exit_code, 127)
def test_multipe_echo_success(self):
s = ts.Shell(['-c', 'echo tracking &>/dev/null && echo shell &>/dev/null'])
exit_code = s.delegate(return_exit_code = True)
self.assertEqual(exit_code, 0)
def test_multiple_echo_failed(self):
s = ts.Shell(['-c', 'echo tracking &>/dev/null && ecsho shell &>/dev/null'])
exit_code = s.delegate(return_exit_code = True)
self.assertEqual(exit_code, 127)
def test_subcommand_success(self):
s = ts.Shell(['-c', 'echo $(date +%Y-%m-%d) &>/dev/null'])
exit_code = s.delegate(return_exit_code = True)
self.assertEqual(exit_code, 0)
class TestMakeTarget(unittest.TestCase):
def test_all_arguments(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.command, command)
self.assertEqual(s.mt.target, target)
def test_missing_target_argument(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
s = ts.Shell(['-t', '-c', command])
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.command, command)
self.assertEqual(s.mt.target, ts.MakeTarget.WITHOUT_TARGET)
def test_not_defined_target_argument(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
s = ts.Shell(['-c', command])
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.command, command)
self.assertEqual(s.mt.target, ts.MakeTarget.WITHOUT_TARGET)
def test_missing_command(self):
s = ts.Shell(['-c'])
with self.assertRaises(SystemExit):
s.delegate()
class TestExtraArguments(unittest.TestCase):
def test_define_extra_argument(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
date = '2015-08-25'
s = ts.Shell(['-d', date, '-t', target, '-c', command])
s.parser.add_argument('-d', '--date', help="current date")
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.command, command)
self.assertEqual(s.mt.target, target)
self.assertEqual(s.mt.date, date)
def test_missing_extra_argument(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.parser.add_argument('-d', '--date', help="current date")
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.command, command)
self.assertEqual(s.mt.target, target)
self.assertIsNone(s.mt.date)
@ts.plugin
def test_plugin_before_1(mt, next_plugin_fn):
if not hasattr(mt, 'known_plugins'):
mt.known_plugins = []
mt.known_plugins.append(test_plugin_before_1)
return next_plugin_fn(mt)
@ts.plugin
def test_plugin_before_2(mt, next_plugin_fn):
if not hasattr(mt, 'known_plugins'):
mt.known_plugins = []
mt.known_plugins.append(test_plugin_before_2)
return next_plugin_fn(mt)
@ts.plugin
def test_plugin_after_1(mt, next_plugin_fn):
if not hasattr(mt, 'known_plugins'):
mt.known_plugins = []
exit_code = next_plugin_fn(mt)
mt.known_plugins.append(test_plugin_after_1)
return exit_code
@ts.plugin
def test_plugin_after_2(mt, next_plugin_fn):
if not hasattr(mt, 'known_plugins'):
mt.known_plugins = []
exit_code = next_plugin_fn(mt)
mt.known_plugins.append(test_plugin_after_2)
return exit_code
class TestPlugins(unittest.TestCase):
def test_single_plugin(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.plugins.register(test_plugin_before_1)
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.known_plugins, [test_plugin_before_1])
def test_multiple_before_plugin(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.plugins.register(test_plugin_before_1)
s.plugins.register(test_plugin_before_2)
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.known_plugins, [test_plugin_before_1, test_plugin_before_2])
def test_multiple_after_plugin(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.plugins.register(test_plugin_after_1)
s.plugins.register(test_plugin_after_2)
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.known_plugins, [test_plugin_after_2, test_plugin_after_1])
@ts.only_run_with_make_target
@ts.plugin
def test_plugin_with_make_target(mt, next_plugin_fn):
if not hasattr(mt, 'known_plugins'):
mt.known_plugins = []
mt.known_plugins.append(test_plugin_with_make_target)
return next_plugin_fn(mt)
class TestDecorators(unittest.TestCase):
def test_single_plugin_with_make_target(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
target = 'test-target'
s = ts.Shell(['-t', target, '-c', command])
s.plugins.register(test_plugin_with_make_target)
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.known_plugins, [test_plugin_with_make_target])
def test_single_plugin_without_make_target(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
s = ts.Shell(['-t', '-c', command])
s.plugins.register(test_plugin_with_make_target)
s.delegate(return_exit_code = True)
self.assertFalse(hasattr(s.mt, 'known_plugins'))
def test_multiple_plugin_without_make_target(self):
command = 'echo $(date +%Y-%m-%d) &>/dev/null'
s = ts.Shell(['-t', '-c', command])
s.plugins.register(test_plugin_before_1)
s.plugins.register(test_plugin_with_make_target)
s.plugins.register(test_plugin_before_2)
s.delegate(return_exit_code = True)
self.assertEqual(s.mt.known_plugins, [test_plugin_before_1, test_plugin_before_2])
``` |
{
"source": "6x68mx/apollo-cli",
"score": 2
} |
#### File: 6x68mx/apollo-cli/util.py
```python
import transcode
import formats
from pathlib import Path
import subprocess
import locale
import re
import os
import mutagen.flac
from mutagen import MutagenError
def get_artist_name(torrent):
g = torrent["group"]
if len(g["musicInfo"]["artists"]) == 1:
return g["musicInfo"]["artists"][0]["name"]
else:
return "Various Artists"
def generate_transcode_name(torrent, output_format):
"""Generate the name for the output directory."""
t = torrent["torrent"]
g = torrent["group"]
if t["remastered"]:
title = (t["remasterTitle"] if t["remasterTitle"] else "remaster")
additional_info = "{} - {}".format(title, t["remasterYear"])
if t["remasterRecordLabel"]:
additional_info += " - {}".format(t["remasterRecordLabel"])
else:
additional_info = g["year"]
artist = get_artist_name(torrent)
name = "{} - {} ({}) - {} [{}]".format(artist,
g["name"],
additional_info,
t["media"],
output_format.NAME)
# replace characters which aren't allowed in (windows) paths with "_"
return re.sub(r'[\\/:"*?<>|]+', "_", name)
def create_torrent_file(torrent_path, data_path, tracker, passkey=None,
source=None, piece_length=18, overwrite=False):
"""
Creates a torrentfile using ``mktorrent``
:param torrent_path: Full path of the torrent file that will be created.
:param data_path: Path to the file/directory from which to create the
torrent.
:param tracker: URL of the tracker, if `passkey` is specified this should
contain "{}" which will be replaced with the passkey.
:param passkey: A passkey to insert into the tracker URL.
(Needed for private trackers)
:param piece_length: The piece length in 2^n bytes.
:param overwrite: If this is `True` and `torrent_path` exists it will be
replaced.
:raises OSError:
"""
if torrent_path.exists():
if overwrite:
os.remove(torrent_path)
else:
raise FileExistsError("{} allready exists.".format(torrent_path))
if not data_path.exists():
raise FileNotFoundError("{} not found.".format(data_path))
if passkey is not None:
url = tracker.format(passkey)
else:
url = passkey
command = ["mktorrent", "-p", "-l", str(piece_length), "-a", url, "-o", torrent_path]
if source:
command.extend(["-s", source])
command.append(data_path)
subprocess.check_output(command, stderr=subprocess.STDOUT)
def parse_file_list(data):
"""
Parse the file list contained in the torrent dict from the Gazelle API.
:param: data The string to parse
:return: A dict. The keys are the relative paths of the files and the
values are the size of the file in bytes.
"""
files = {}
for x in data.split("|||"):
name, size = x.split("{{{")
size = int(size[:-3])
files[name] = size
return files
def check_source_release(path, torrent):
"""
Check if there are any problems with a flac release.
Internally calls `check_dir` and `transcode.check_flacs`.
:param path: Path to the directory containing the release.
:param torrent: A torrent `dict`.
:returns: A string containing a description of the problem if there was
one, or `None` if no problems were detected.
"""
fl = parse_file_list(torrent["torrent"]["fileList"])
if not check_dir(path, fl):
return (False, "Directory doesn't match the torrents file list.")
files = list(path.glob("**/*" + formats.FormatFlac.SUFFIX))
try:
flacs = [mutagen.flac.FLAC(f) for f in files]
except MutagenError as e:
return (False, str(e))
return transcode.check_flacs(flacs)
def check_dir(path, files, names_only=False):
"""
Check if a local directory matches the file list of a torrent.
:param: path Local directory to compare. Must be a Path like object.
:param: files A dict as returned by `parse_file_list`.
:param: names_only Check only if the filenames match. (Ignore size)
:return: `True` if the contents of `path` match exactly the files listed
in `files` and `False` otherwise or if `path` is not a directory.
"""
if not path.is_dir():
return False
files = dict(files)
dirs = [path]
while dirs:
for x in dirs.pop().iterdir():
if x.is_dir():
dirs.append(x)
elif x.is_file():
name = str(x.relative_to(path))
if (name in files
and (names_only
or x.stat().st_size == files[name])):
files.pop(name)
else:
return False
if files:
return False
return True
def find_dir(name, search_dirs):
"""
Search for a directory in multiple parent directories.
:param: name The directory you want to find.
:param: search_dirs List of `Path` objects in which to search for `name`.
:return: A `Path` object to the directory if it was found, otherwise `None`.
"""
for d in search_dirs:
path = d / name
try:
if path.is_dir():
return path
except OSError as e:
# Under certain conditions the generated filename could be
# too long for the filesystem. In this case we know that
# this path couldn't exist anyway an can can skip it.
if e.errno == errno.ENAMETOOLONG:
continue
else:
raise
def get_flac_version():
if not hasattr(get_flac_version, "version"):
cp = subprocess.run(["flac", "--version"],
stdout=subprocess.PIPE,
encoding=locale.getpreferredencoding(False))
get_flac_version.version = cp.stdout.strip()
return get_flac_version.version
def get_sox_version():
if not hasattr(get_sox_version, "version"):
cp = subprocess.run(["sox", "--version"],
stdout=subprocess.PIPE,
encoding=locale.getpreferredencoding(False))
get_sox_version.version = cp.stdout.split(":")[1].strip()
return get_sox_version.version
def get_lame_version():
if not hasattr(get_lame_version, "version"):
cp = subprocess.run(["lame", "--version"],
stdout=subprocess.PIPE,
encoding=locale.getpreferredencoding(False))
get_lame_version.version = cp.stdout.splitlines()[0].strip()
return get_lame_version.version
def generate_description(tid, src_path, target_format):
"""
Generate a release description for apollo.rip.
:param tid: ID of the source torrent.
:param src_path: `Path` to a flac file of the source.
:param target_format: The format of the transcode. (see `formats`)
:returns: The description as string.
"""
flac = mutagen.flac.FLAC(src_path)
cmds = transcode.generate_transcode_cmds(
src_path.name,
src_path.with_suffix(target_format.SUFFIX).name,
target_format,
transcode.compute_resample(flac))
process = " | ".join(" ".join(cmd) for cmd in cmds)
return ("Transcode of [url=https://apollo.rip/torrents.php?torrentid={tid}]https://apollo.rip/torrents.php?torrentid={tid}[/url].\n"
"\n"
"Process used:\n"
"[code]{process}[/code]\n"
"\n"
"Tool versions:\n"
"[code]{flac}\n"
"{sox}\n"
"{lame}[/code]\n"
"\n"
"Created with apollo-cli.\n"
"This transcode was performed by an autonomous system. Please contact me (the uploader) if it made a mistake."
).format(
tid=tid,
process=process,
flac=get_flac_version(),
sox=get_sox_version(),
lame=get_lame_version()
)
``` |
{
"source": "6Xkljk8Vj7Tk1k4/gpstracker",
"score": 3
} |
#### File: 6Xkljk8Vj7Tk1k4/gpstracker/server.py
```python
import socket
import sys
from thread import *
def parse_tracker_data(msg):
# Validate it's a correct head and tail
if not isinstance(msg, str):
return "ERROR 01"
if(len(msg) < 19):
return "ERROR 02"
if msg[0] != '(' and msg[len(msg) - 1] != ')':
return "ERROR 03"
identifier = msg[1:1+12]
command = msg[13:13+4]
if command == "BP00": # Heartbeet
retval = '(' + identifier + 'AP01HSO)'+'\r\n'
return retval
elif command == "BR00": # Position
# Date
offset = 17
offset_end = offset + 6
date = msg[offset:offset_end]
# Availability
offset = offset_end
offset_end = offset + 1
availability = msg[offset:offset_end]
# Latitude
offset = offset_end
offset_end = offset + 9
latitude = msg[offset:offset_end]
# Latitude indicator
offset = offset_end
offset_end = offset + 1
latitude_ind = msg[offset:offset_end]
# Longitude
offset = offset_end
offset_end = offset + 10
longitude = msg[offset:offset_end]
# Longitude Indicator
offset = offset_end
offset_end = offset + 1
longitude_ind = msg[offset:offset_end]
# Speed
offset = offset_end
offset_end = offset + 5
speed = msg[offset:offset_end]
# Time
offset = offset_end
offset_end = offset + 6
times = msg[offset:offset_end]
# Orientation
offset = offset_end
offset_end = offset + 6
orientation = msg[offset:offset_end]
# IOState
offset = offset_end
offset_end = offset + 8
iostate = msg[offset:offset_end]
# Milepost (L)
offset = offset_end
offset_end = offset + 1
milepost = msg[offset:offset_end]
# Mileage
offset = offset_end
offset_end = offset + 8
mileage = msg[offset:offset_end]
if availability == 'A':
latitude_dd = round(float(latitude[0:2]) + float(latitude[2:2+7]) /60, 6)
if latitude_ind != "N":
latitude_dd = - latitude_dd
longitude_dd = round(float(longitude[0:3]) + float(longitude[3:3+7])/60, 6)
if longitude_ind != "E":
longitude_dd = - longitude_dd
maps_url = "http://maps.google.com/maps/?q=loc:" + str(latitude_dd) + "," + str(longitude_dd) + "&z=15"
ret = ";OK" + ";IMEI:" + identifier + \
";latitude:" + str(latitude_dd) + \
";longitude:" + str(longitude_dd) + \
";speed:" + speed + \
";date:" + date + \
";time:" + times + \
'\r\n'
with open("foo", "a") as f:
f.write(ret)
retval = '(' + identifier + 'AP05HSO)'+'\r\n'
return retval
HOST = '' # Symbolic name meaning all available interfaces
PORT = 8821 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
#Start listening on socket
s.listen(10)
print 'Socket now listening'
#Function for handling connections. This will be used to create threads
def clientthread(conn):
#Sending message to connected client
conn.send('Welcome to the server. Type something and hit enter\n') #send only takes string
#infinite loop so that function do not terminate and thread do not end.
while True:
#Receiving from client
data = conn.recv(1024)
print data
reply = parse_tracker_data(data)
print reply
if not data:
break
conn.sendall(reply)
#came out of loop
conn.close()
#now keep talking with the client
while 1:
#wait to accept a connection - blocking call
conn, addr = s.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
start_new_thread(clientthread ,(conn,))
s.close()
``` |
{
"source": "6xray/ISARTBot",
"score": 2
} |
#### File: ISARTBot/isartbot/bot.py
```python
import sys
import discord
import asyncio
import logging
import traceback
import configparser
import logging.config
from isartbot.lang import Lang
from isartbot.checks import log_command, trigger_typing, block_dms
from isartbot.database import Server, Database
from isartbot.exceptions import UnauthorizedCommand, VerificationRequired
from isartbot.help_command import HelpCommand
from os.path import abspath
from discord.ext import commands
class Bot(commands.Bot):
""" Main bot class """
__slots__ = ("settings", "extensions", "config_file", "database", "logger", "langs", "dev_mode")
def __init__(self, *args, **kwargs):
""" Inits and runs the bot """
self.config_file = abspath('./settings.ini')
# Setting up logging
logging.config.fileConfig(self.config_file)
self.logger = logging.getLogger('isartbot')
# Loading settings
self.logger.info('Settings file located at {}'.format(self.config_file))
self.settings = configparser.ConfigParser(converters={'list': lambda x: [i.strip() for i in x.split(',')]})
self.settings.read(self.config_file, encoding='utf-8')
super().__init__(command_prefix = discord.ext.commands.when_mentioned_or(self.settings.get('common', 'prefix')), *args, **kwargs)
self.dev_mode = self.settings.getboolean('debug', 'developement_mode')
self.extensions = self.settings['extensions']
# Loading database
database_name = f"sqlite:///{abspath(self.settings.get('common', 'database'))}"
self.logger.info(f"Connecting to database {database_name}")
self.database = Database(self.loop, database_name)
# Creating the help command
self.help_command = HelpCommand()
# Loading languages
self.langs = {}
self.loop.create_task(self.load_languages())
self.loop.create_task(self.load_extensions())
# Adding checks
self.add_check(block_dms , call_once=True)
self.add_check(log_command , call_once=True)
self.add_check(trigger_typing , call_once=True)
self.before_invoke(self.fetch_guild_language)
token = configparser.ConfigParser()
token.read(abspath('./token.ini'), encoding='utf-8')
self.run(token.get('DEFAULT', 'token'))
async def load_extensions(self):
""" Loads all the cogs of the bot defined into the settings.ini file """
try:
await self.wait_for("ready", timeout=30)
self.logger.info("Loading extensions...")
except asyncio.futures.TimeoutError:
self.logger.warning("Wait for on_ready event timed out, loading the extensions anyway...")
for (extension, _) in self.settings.items("extensions"):
if self.extensions.getboolean(extension):
try:
self.load_extension(f"isartbot.ext.{extension}")
self.logger.info (f"Loaded extension named isartbot.ext.{extension}")
except Exception as e:
self.logger.error(f"Failed to load extension named isartbot.ext.{extension}")
await self.on_error(e)
else:
self.logger.info(f"Ignored extension named isartbot.ext.{extension}")
return
async def load_languages(self):
""" (re)Loads all the available languages files of the bot"""
self.langs.clear()
for (lang, file_name) in self.settings.items("languages"):
try:
self.langs[lang] = Lang(file_name)
self.logger.info(f"Loaded language named {lang} from {file_name}")
except Exception as e:
self.logger.error(f"Failed to load a language")
await self.on_error(e)
return
async def get_translations(self, ctx, keys: list, force_fetch: bool = False):
""" Returns a set of translations """
if (force_fetch):
await self.fetch_guild_language(ctx)
return dict([(key, self.langs[ctx.guild.description].get_key(key)) for key in keys])
async def get_translation(self, ctx, key: str, force_fetch: bool = False):
""" Returns a translation """
if (force_fetch):
await self.fetch_guild_language(ctx)
return self.langs[ctx.guild.description].get_key(key)
def register_guild(self, guild: discord.Guild):
""" Registers the guild into the database, this method is automatically called the first time a command is trigerred in a new guild """
new_server_preferences = Server(discord_id=guild.id)
self.database.session.add(new_server_preferences)
self.database.session.commit()
self.logger.warning(f"Registered new discord server to database : '{guild.name}' id = {guild.id}")
return new_server_preferences
async def fetch_guild_language(self, ctx):
""" An event that is called when a command is found and is about to be invoked. """
# Fetching the guild language and injects it into the context
lang = self.database.session.query(Server.lang).\
filter(Server.discord_id == ctx.guild.id).first()
# Checking if the guild is already registered in the database
if (lang == None):
lang = (self.register_guild(ctx.guild)).lang
else:
lang = lang[0]
# We are gonna use the guild description to store the language of the guild
# since this is not used by discord anyways
ctx.guild.description = lang
# --- Events ---
async def on_ready(self):
"""
Executed when the bot is connected
to discord and ready to operate
"""
self.logger.info(f"Logged in as {self.user.name}#{self.user.discriminator} - {self.user.id}")
async def on_connect(self):
"""Executed when the bot connects to discord"""
self.logger.info("Discord connection established")
async def on_disconnect(self):
"""Executed when the bot connects to discord"""
self.logger.info("Discord connection terminated")
async def on_guild_join(self, guild: discord.Guild):
"""Called when a Guild is either created by the Client or when the Client joins a guild"""
self.logger.warning(f"Joined guild : {guild.name}")
self.register_guild(guild)
async def on_guild_remove(self, guild: discord.Guild):
"""Called when a Guild is removed from the Client"""
self.logger.warning(f"Left guild : {guild.name}")
# Server should always be valid
server = self.database.session.query(Server).filter(Server.discord_id == guild.id).first()
if (server != None):
self.database.session.delete(server)
self.database.session.commit()
else:
self.logger.warning(f"No database entry found for the guild named {guild.name} (id = {guild.id})")
async def on_command_error(self, ctx, error):
""" Handles command errors """
if isinstance(error, UnauthorizedCommand):
await self.unauthorized_command_error(ctx, error)
return
if isinstance(error, VerificationRequired):
await self.verification_required_error(ctx, error)
return
# Anything in ignored will return and prevent anything happening.
if isinstance(error, (commands.CommandNotFound, commands.CheckFailure)):
return
if isinstance(error, commands.UserInputError):
await ctx.send_help(ctx.command)
return
if isinstance(error, commands.MissingPermissions):
await self.missing_permissions_error(ctx, error)
return
if isinstance(error, commands.BotMissingPermissions):
await self.bot_missing_permissions_error(ctx, error)
return
# All other Errors not returned come here... And we can just print the default TraceBack.
self.logger.error(f"Ignoring exception in command \"{ctx.command}\":")
for err in traceback.format_exception(type(error), error, error.__traceback__):
index = 0
for i, char in enumerate(err):
if (char == '\n'):
self.logger.error(err[index:i])
index = i + 1
return
async def on_error(self, *args, **kwargs):
""" Sends errors reports """
self.logger.critical("Unhandled exception occurred:")
for err in traceback.format_exc().split('\n'):
self.logger.critical(err)
async def unauthorized_command_error(self, ctx, error):
""" Sends a missing permission error """
self.logger.info(f"Access unauthorized, command has been denied.")
if not ctx.channel.permissions_for(ctx.guild.me).send_messages:
return
translations = await self.get_translations(ctx, ["failure_title", "unauthorized_command"], force_fetch=True)
embed = discord.Embed(
title = translations["failure_title"],
description = translations["unauthorized_command"].format(error.missing_status),
color = discord.Color.red()
)
await ctx.send(embed = embed)
async def missing_permissions_error(self, ctx, error):
""" Sends a missing permission error """
if not ctx.channel.permissions_for(ctx.guild.me).send_messages:
return
translations = await self.get_translations(ctx, ["failure_title", "missing_perms_error"], force_fetch=True)
embed = discord.Embed(
title = translations["failure_title"],
description = translations["missing_perms_error"].format(error.missing_perms),
color = discord.Color.red()
)
await ctx.send(embed=embed)
async def bot_missing_permissions_error(self, ctx, error):
""" Sends a missing permission error """
if not ctx.channel.permissions_for(ctx.guild.me).send_messages:
return
translations = await self.get_translations(ctx, ["failure_title", "bot_missing_perms_error"], force_fetch=True)
embed = discord.Embed(
title = translations["failure_title"],
description = translations["bot_missing_perms_error"].format(error.missing_perms),
color = discord.Color.red()
)
await ctx.send(embed=embed)
async def verification_required_error(self, ctx, error):
""" Sends a verification required error """
if not ctx.channel.permissions_for(ctx.guild.me).send_messages:
return
translations = await self.get_translations(ctx, ["failure_title", "verified_role_required"], force_fetch=True)
embed = discord.Embed(
title = translations["failure_title"],
description = translations["verified_role_required"].format(error.missing_role),
color = discord.Color.red()
)
await ctx.send(embed=embed)
```
#### File: isartbot/ext/game.py
```python
# Copyright (c) 2018-2020 Renondedju
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import discord
import asyncio
from math import ceil
from discord.ext import commands
from isartbot.helper import Helper
from isartbot.checks import is_moderator, is_verified
from isartbot.database import Game, Server, SelfAssignableRole
from isartbot.converters import GameConverter
from isartbot.converters import MemberConverter
class GameExt (commands.Cog):
def __init__(self, bot):
# Starting the game assignation task
self.bot = bot
self.task = bot.loop.create_task(self.run_game_task())
def cog_unload(self):
"""Called when the game module is unloaded for any reason"""
self.task.cancel()
async def run_game_task(self):
"""Error handler for the game scan task"""
try:
await self.game_task()
except asyncio.CancelledError: # This error is thrown when the extension is unloaded
pass
except Exception as e:
await self.bot.on_error(e)
async def game_task(self):
"""Scan for players and auto assigns game roles if possible"""
scan_delay = int(self.bot.settings.get("game", "scan_delay"))
# Main scan loop
while (scan_delay != -1):
# Fetching all required data from the database
database_games = list(self.bot.database.session.query(Game).all())
server_ids = set([item.server_id for item in database_games])
# Looping over every server that requires a scan
for server_id in server_ids:
guild = discord.utils.get(self.bot.guilds, id=server_id)
# We just got removed from a server while scanning, skipping it.
# The next scan will be fine since all data related with this server
# has already been removed from the database
if (guild == None):
continue
# Fetching server verified role (if any)
server = self.bot.database.session.query(Server).filter(Server.discord_id == guild.id).first()
verified_role = discord.utils.get(guild.roles, id = (server.verified_role_id if server != None else 0))
server_games = [game for game in database_games if game.server_id == server_id]
# Looping over each members
for member in guild.members:
# Checking for a verified role, this way unauthorized people don't get assigned roles
if (verified_role != None):
if (verified_role not in member.roles):
continue
game_role = self.get_game_role_from_activity(member.activity, server_games, guild)
if (game_role == None or game_role in member.roles):
continue
try:
await member.add_roles(game_role, reason="Automatic game scan")
self.bot.logger.info(f"Added the game {game_role.name} to {member} in guild named {guild.name}")
except discord.Forbidden: # If discord doesn't let us modify roles, then breaking to the next server
break
except:
pass
# Waiting for the next scan
await asyncio.sleep(scan_delay)
def get_game_role_from_activity(self, activity: discord.Activity, server_games, guild: discord.Guild):
"""Returns a game role from an activity"""
if not isinstance(activity, (discord.Game, discord.Activity)):
return None
game_name = activity.name.lower()
# Looping over every available games to see if something is matching
for game in server_games:
if game_name == game.discord_name:
return discord.utils.get(guild.roles, id=game.discord_role_id)
return None
@commands.group(invoke_without_command=True, pass_context=True,
help="game_help", description="game_description")
async def game(self, ctx):
await ctx.send_help(ctx.command)
@game.command(help="game_add_help", description="game_add_description")
@commands.bot_has_permissions(manage_roles = True)
@commands.check(is_verified)
async def add(self, ctx, game: GameConverter):
""" Adds a game to the user """
if (game is None):
await Helper.send_error(ctx, ctx.channel, 'game_invalid_argument')
return
game_role = discord.utils.get(ctx.guild.roles, id=game.discord_role_id)
try:
await ctx.message.author.add_roles(game_role, reason="game add command")
await Helper.send_success(ctx, ctx.channel, 'game_add_success', format_content=(game_role.mention,))
except:
await Helper.send_error (ctx, ctx.channel, 'game_add_failure', format_content=(game_role.mention,))
@game.command(help="game_remove_help", description="game_remove_description")
@commands.bot_has_permissions(manage_roles = True)
@commands.check(is_verified)
async def remove(self, ctx, game: GameConverter):
""" Adds a game to the user """
if (game is None):
await Helper.send_error(ctx, ctx.channel, 'game_invalid_argument')
return
game_role = discord.utils.get(ctx.guild.roles, id=game.discord_role_id)
try:
await ctx.message.author.remove_roles(game_role, reason="game remove command")
await Helper.send_success(ctx, ctx.channel, 'game_remove_success', format_content=(game_role.mention,))
except:
await Helper.send_error (ctx, ctx.channel, 'game_remove_failure', format_content=(game_role.mention,))
@game.command(help="game_create_help", description="game_create_description")
@commands.bot_has_permissions(manage_roles = True)
@commands.check(is_moderator)
async def create(self, ctx, name, *, discord_name = ""):
"""Create a game"""
if (discord_name == ""):
discord_name = name
game_check = await GameConverter().convert(ctx, name)
if (game_check is not None):
await Helper.send_error(ctx, ctx.channel, 'game_create_error_existing', format_content=(game_check.display_name,))
return
role_color = ctx.bot.settings.get("game", "role_color")
game = await ctx.guild.create_role(
name = name,
color = await commands.ColourConverter().convert(ctx, role_color),
mentionable = True)
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
new_game = Game(
discord_role_id = game.id,
display_name = name,
discord_name = discord_name.lower(),
server = server
)
sar = SelfAssignableRole(discord_id = game.id, server = server)
self.bot.database.session.add(new_game)
self.bot.database.session.add(sar)
self.bot.database.session.commit()
await Helper.send_success(ctx, ctx.channel, 'game_create_success', format_content=(game.mention,))
@game.command(help="game_delete_help", description="game_delete_description")
@commands.bot_has_permissions(manage_roles = True)
@commands.check(is_moderator)
async def delete(self, ctx, game: GameConverter):
"""Deletes a game"""
if (game is None):
await Helper.send_error(ctx, ctx.channel, 'game_invalid_argument')
return
game_role = discord.utils.get(ctx.guild.roles, id=game.discord_role_id)
confirmation = await Helper.ask_confirmation(ctx, ctx.channel, 'game_delete_confirmation_title',
initial_content = "game_delete_confirmation_description" , initial_format = (game_role.mention,),
success_content = "game_delete_success" , success_format = (game.display_name.title(),),
failure_content = "game_delete_aborted")
if (not confirmation):
return
self.bot.database.session.delete(game)
self.bot.database.session.commit()
await game_role.delete()
@game.command(help="game_list_help", description="game_list_description")
async def list(self, ctx, page: int = 1):
"""Lists the available games of the server"""
# Fetching and computing all initial required data
database_games = list(self.bot.database.session.query(Game).all())
server_games = [game for game in database_games if game.server.discord_id == ctx.guild.id]
max_lines = int(self.bot.settings.get("game", "list_max_lines"))
total_pages = ceil(len(server_games) / max_lines)
# Clamping the current page
page = min(max(1, page), total_pages)
# Filling the embed content
lines = []
for index in range(max_lines * (page - 1), max_lines * page):
try:
lines.append(f"• {server_games[index].display_name}")
except IndexError:
break
embed = discord.Embed()
embed.description = '\n'.join(lines)
embed.title = await ctx.bot.get_translation(ctx, 'game_list_title')
embed.color = discord.Color.green()
embed.set_footer(text = (await ctx.bot.get_translation(ctx, 'game_list_footer')).format(page, total_pages))
await ctx.send(embed=embed)
# Events
@commands.Cog.listener()
async def on_guild_role_delete(self, role):
""" Database role maintainance """
server = self.bot.database.session.query(Server).filter(Server.discord_id == role.guild.id).first()
game = self.bot.database.session.query(Game).filter(Game.discord_role_id == role.id, Game.server == server).first()
if (game == None):
return
self.bot.database.session.delete(game)
self.bot.database.session.commit()
def setup(bot):
bot.add_cog(GameExt(bot))
```
#### File: isartbot/ext/iam.py
```python
import asyncio
import discord
from math import ceil
from discord.ext import commands
from isartbot.helper import Helper
from isartbot.checks import is_admin, is_verified
from isartbot.database import SelfAssignableRole, Server
class IamExt(commands.Cog):
__slots__ = ("bot")
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_guild_role_delete(self, role):
""" Database role maintainance """
server = self.bot.database.session.query(Server).filter(Server.discord_id == role.guild.id).first()
database_role = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.discord_id == role.id, SelfAssignableRole.server == server).first()
if (database_role == None):
return
self.bot.database.session.delete(database_role)
self.bot.database.session.commit()
@commands.command(pass_context=True, help="iam_help", description="iam_description")
@commands.bot_has_permissions(send_messages=True, manage_roles=True)
@commands.check(is_verified)
async def iam(self, ctx, role: discord.Role):
""" Adds a role to a user if possible """
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
database_role = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.discord_id == role.id, SelfAssignableRole.server == server).first()
if (database_role == None):
await Helper.send_error(ctx, ctx.channel, 'sar_non_existant_role', format_content=(role.mention,))
return
try:
await ctx.message.author.add_roles(role, reason="iam command")
await Helper.send_success(ctx, ctx.channel, 'iam_success', format_content=(role.mention,))
except:
await Helper.send_error(ctx, ctx.channel, 'iam_failure', format_content=(role.mention,))
@commands.command(pass_context=True, help="iamn_help", description="iamn_description")
@commands.bot_has_permissions(send_messages=True, manage_roles=True)
@commands.check(is_verified)
async def iamn(self, ctx, role: discord.Role):
""" Removes a role from the user """
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
database_role = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.discord_id == role.id, SelfAssignableRole.server == server).first()
if (database_role == None):
await Helper.send_error(ctx, ctx.channel, 'sar_non_existant_role', format_content=(role.mention,))
return
try:
await ctx.message.author.remove_roles(role, reason="iamn command")
await Helper.send_success(ctx, ctx.channel, 'iamn_success', format_content=(role.mention,))
except:
await Helper.send_error(ctx, ctx.channel, 'iamn_failure', format_content=(role.mention,))
@commands.group(pass_context=True, invoke_without_command=True)
async def sar(self, ctx):
""" Sar command group (sar stands for Self Assignable Role) """
await ctx.send_help(ctx.command)
@sar.command(aliases=["add"], help="sar_create_help", description="sar_create_description")
@commands.check(is_admin)
async def create(self, ctx, role: discord.Role):
""" Creates a new self assignable role """
# Looking for invalid roles
if (role == ctx.guild.default_role):
await Helper.send_error(ctx, ctx.channel, 'sar_invalid_role_error', format_content=(role.mention,))
return
# Looking for duplicates
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
database_role = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.discord_id == role.id, SelfAssignableRole.server == server).first()
if (database_role != None):
await Helper.send_error(ctx, ctx.channel, 'sar_role_already_exists_error', format_content=(role.mention,))
return
# Creating the new role
new_role = SelfAssignableRole(discord_id = role.id, server = server)
self.bot.database.session.add(new_role)
self.bot.database.session.commit()
await Helper.send_success(ctx, ctx.channel, 'sar_role_created', format_content=(role.mention,))
@sar.command(aliases=["remove"], help="sar_delete_help", description="sar_delete_description")
@commands.check(is_admin)
async def delete(self, ctx, role: discord.Role):
""" Deletes a self assignable role """
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
database_role = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.discord_id == role.id, SelfAssignableRole.server == server).first()
if (database_role == None):
await Helper.send_error(ctx, ctx.channel, 'sar_non_existant_role', format_content=(role.mention,))
return
self.bot.database.session.delete(database_role)
self.bot.database.session.commit()
await Helper.send_success(ctx, ctx.channel, 'sar_role_deleted', format_content=(role.mention,))
@sar.command(help="sar_list_help", description="sar_list_description")
async def list(self, ctx, page: int = 1):
""" Lists all the self assignable roles for this guild """
server = self.bot.database.session.query(Server).filter(Server.discord_id == ctx.guild.id).first()
roles = self.bot.database.session.query(SelfAssignableRole).\
filter(SelfAssignableRole.server == server).all()
max_lines = int(self.bot.settings.get("iam", "list_max_lines"))
total_pages = ceil(len(roles) / max_lines)
# Clamping the current page
page = min(max(1, page), total_pages)
# Filling the embed content
lines = []
for index in range(max_lines * (page - 1), max_lines * page):
try:
role = ctx.guild.get_role(roles[index].discord_id)
if (role != None):
lines.append(f"• {role.mention}")
except IndexError:
break
embed = discord.Embed()
embed.description = '\n'.join(lines)
embed.title = await ctx.bot.get_translation(ctx, 'sar_list_title')
embed.color = discord.Color.green()
embed.set_footer(text = (await ctx.bot.get_translation(ctx, 'sar_list_footer')).format(page, total_pages))
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(IamExt(bot))
``` |
{
"source": "6Youcai/CfdnaPattern",
"score": 3
} |
#### File: 6Youcai/CfdnaPattern/predict.py
```python
import sys, os
from optparse import OptionParser
import time
from util import *
from draw import *
from feature import *
import numpy as np
from sklearn import svm, neighbors
import random
import json
import pickle
def parseCommand():
usage = "extract the features, and train the model, from the training set of fastq files. \n\npython training.py <fastq_files> [-f feature_file] [-m model_file] "
version = "0.0.1"
parser = OptionParser(usage = usage, version = version)
parser.add_option("-m", "--model", dest = "model_file", default = "cfdna.model",
help = "specify which file stored the built model.")
parser.add_option("-q", "--quite", dest = "quite", action='store_true', default = False,
help = "only print those prediction conflicts with filename")
return parser.parse_args()
def preprocess(options):
data = []
samples = []
fq_files = get_arg_files()
number = 0
for fq in fq_files:
number += 1
#print(str(number) + ": " + fq)
extractor = FeatureExtractor(fq)
extractor.extract()
feature = extractor.feature()
if feature == None:
#print("======== Warning: bad feature from:")
#print(fq)
continue
data.append(feature)
samples.append(fq)
return data, samples
def get_type_name(label):
if label == 1:
return "cfdna"
else:
return "not-cfdna"
def load_model(options):
filename = options.model_file
if not os.path.exists(filename):
filename = os.path.join(os.path.dirname(sys.argv[0]), options.model_file)
if not os.path.exists(filename):
print("Error: the model file not found: " + options.model_file)
sys.exit(1)
f = open(filename, "rb")
model = pickle.load(f)
f.close()
return model
def main():
if sys.version_info.major >2:
print('python3 is not supported yet, please use python2')
sys.exit(1)
(options, args) = parseCommand()
data, samples = preprocess(options)
model = load_model(options)
labels = model.predict(data)
for i in xrange(len(samples)):
if options.quite == False or (labels[i] == 0 and "cfdna" in samples[i].lower()) or (labels[i] == 1 and "cfdna" not in samples[i].lower()):
print(get_type_name(labels[i]) + ": " + samples[i])
plot_data_list(samples, data, "predict_fig")
if __name__ == "__main__":
main()
``` |
{
"source": "6zhc/WSI-online-analysis-framework",
"score": 2
} |
#### File: WSI-online-analysis-framework/Server/re_annotation_server.py
```python
from flask import render_template, redirect, request
from flask import jsonify
import uuid
import os
import numpy
from PIL import Image
import cv2
from flask_login import login_required, current_user
from Controller import re_annotation_controller
from Controller import thread_controller
from Controller import manifest_controller
from Model import freehand_annotation_sqlite
from Model import manifest
# annotation_result_root = "/home1/zhc/resnet/annotation_record/whole/" # "/home1/zhc/Dr-Wang-Grading/"
# boundary_result_root = "/home1/zhc/resnet/boundary_record/"
# region_image_root = "/home1/zhc/resnet/anno_data/"
# annotation_result_root = "/home1/gzy/NucleiSegmentation/Smear/Categories/"
# boundary_result_root = "/home1/gzy/NucleiSegmentation/Smear/Masks/"
# region_image_root = "/home1/gzy/NucleiSegmentation/Smear/Images/"
original_result_root = 'static/data/re_annotation_data/results/'
annotation_result_root = "/home5/sort/annotation/"
boundary_result_root = "/home5/sort/masks/"
region_image_root = "/home5/sort/images_small/"
result_root = "Data/re_annotation_data/" + "results/"
points_root = "Data/re_annotation_data/" + "points/"
grades_root = "Data/re_annotation_data/" + "grades/"
# first_image_name = "1"
first_image_name = "152031_1"
image_type = ".jpg"
def add_re_annotation_sever(app):
@app.route('/re_annotation')
@login_required
def re_annotation():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default="")
if anno_name == "":
try:
anno_name = current_user.slideID[annotator_id + "_" + "re-annotation"]
except:
anno_name = first_image_name
current_user.slideID[annotator_id + "_" + "re-annotation"] = anno_name
image_root = request.args.get('image_root', type=str,
default='static/data/re_annotation_data/results/a' + annotator_id + '/')
rand = '?a=' + str(uuid.uuid4())
if not os.path.exists(result_root + 'a' + annotator_id + '/' + 'mask_' + anno_name + '_U-net.png'):
re_annotation_controller.boundary_2_point(anno_name, annotator_id)
re_annotation_controller.point_2_boundary(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask_separate_nuclei(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask_u_net(anno_name, annotator_id)
return render_template('multi-slide.html', anno_name=anno_name, rand=rand,
image_root=image_root, image_type=image_type)
@login_required
@app.route('/available_re_annotation_region')
def available_re_annotation_region():
annotator_id = current_user.get_id()
result = []
index = 0
file_list = os.listdir(annotation_result_root)
file_list.sort()
for file in file_list:
if file[-4:] == ".txt":
# print(result_root + 'a' + annotator_id + '/' + file.split('.')[0] + "_annotation_file_nuClick.txt")
if os.path.exists(
result_root + 'a' + annotator_id + '/' + file.split('.')[0] + "_annotation_file_nuClick.txt"):
temp = {"id": file.split('.')[0], "text": '【' + str(index) + '】 ' + file.split('.')[0] + ' *'}
else:
temp = {"id": file.split('.')[0], "text": '【' + str(index) + '】 ' + file.split('.')[0]}
result.append(temp)
index += 1
return jsonify(result)
@app.route('/make_mask')
@login_required
def make_mask():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
re_annotation_make_mask(anno_name, annotator_id)
result = {
"mask1": 'mask_' + anno_name + '_' + mask_name + '.png' + '?a=' + str(uuid.uuid4()),
"mask2": 'mask_' + anno_name + '_' + mask_name + '_separate_nuclei.png' + '?a=' + str(uuid.uuid4()),
}
return jsonify(result)
@app.route('/update_grades', methods=['POST'])
@login_required
def update_grades():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
data = {}
for key, value in request.form.items():
if key.endswith('[]'):
data[key[:-2]] = request.form.getlist(key)
else:
data[key] = value
print(data)
boundary_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + mask_name + ".txt"
points_file_name = points_root + 'a' + annotator_id + '/' + anno_name + '.txt'
grades_file_name = grades_root + 'a' + annotator_id + '/' + anno_name + '.txt'
points_file = open(points_file_name, 'w')
grades_file = open(grades_file_name, 'w')
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
for i in range(len(data['grade'])):
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i])])
if nuclei_id == -1:
try:
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i]) - 1])
except:
pass
if nuclei_id == 0:
try:
nuclei_id = int(boundary_file[int(data['points_y'][i]), int(data['points_x'][i]) + 1])
except:
pass
if nuclei_id != i + 1 and nuclei_id != 0:
if nuclei_id != -1:
try:
data['grade'][nuclei_id - 1] = data['grade'][i]
if int(data['grade'][i]) == 0:
boundary_file[boundary_file == nuclei_id] = 0
except:
print("------------- error: " + nuclei_id + "++++++++++++")
data['grade'][i] = 0
current_nuclei_id = 0
for i in range(len(data['grade'])):
try:
if int(data['grade'][i]) != 0:
points_file.write(str(data['points_x'][i]) + ' ' + str(data['points_y'][i]) + '\n')
grades_file.write(str(data['grade'][i]) + '\n')
old_nuclei_id = boundary_file[int(data['points_y'][i]), int(data['points_x'][i])]
current_nuclei_id += 1
if old_nuclei_id > 0:
boundary_file[boundary_file == old_nuclei_id] = current_nuclei_id
except:
pass
numpy.savetxt(boundary_file_name, boundary_file, fmt='%d', delimiter=",")
grades_file.close()
points_file.close()
return jsonify({"msg": "True"})
def re_annotation_make_mask(anno_name, annotator_id):
re_annotation_controller.point_2_boundary(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask_separate_nuclei(anno_name, 'nuClick', annotator_id)
@app.route('/points_grades')
@login_required
def points_grades():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
points_file_name = points_root + 'a' + annotator_id + '/' + anno_name + '.txt'
grades_file_name = grades_root + 'a' + annotator_id + '/' + anno_name + '.txt'
points_file = open(points_file_name).readlines()
grades_file = open(grades_file_name).readlines()
points = []
grades = []
for item in points_file:
points.append([int(item.split(' ')[0]), int(item.split(' ')[1])])
for item in grades_file:
grades.append(int(item))
return jsonify({"grades": grades, "points": points})
@app.route('/re_annotation/_wipe', methods=['GET', 'POST'])
@login_required
def re_annotation_wipe():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
boundary_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + mask_name + ".txt"
annotation_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_annotation_file_" + mask_name + ".txt"
draw = request.form
num_of_points = int(len(draw) / 3)
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
data_x = []
data_y = []
mask = numpy.zeros(boundary_file.shape, dtype=numpy.uint8)
for i in range(num_of_points):
data_x.append(int(draw[str(i) + '[x]']))
data_y.append(int(draw[str(i) + '[y]']))
pts = numpy.vstack((data_x, data_y)).astype(numpy.int32).T
cv2.fillPoly(mask, [pts], (255))
p_x = numpy.where(mask == 255)[1]
p_y = numpy.where(mask == 255)[0]
for i in range(len(p_x)):
boundary_file[p_y[i]][p_x[i]] = 0
annotation_file = numpy.loadtxt(annotation_file_name, dtype=numpy.int16, delimiter=',')
numpy.savetxt(result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + "Middle" + ".txt",
boundary_file, fmt='%d', delimiter=",")
numpy.savetxt(result_root + 'a' + annotator_id + '/' + anno_name + "_annotation_file_" + "Middle" + ".txt",
annotation_file, fmt='%d', delimiter=",")
re_annotation_controller.boundary_2_mask(anno_name, 'Middle', annotator_id)
re_annotation_controller.boundary_2_mask_separate_nuclei(anno_name, 'Middle', annotator_id)
file_name = original_result_root + 'a' + annotator_id + '/' + "mask_" + anno_name + "_Middle" + ".png" + "?a=" + str(
uuid.uuid4())
return (file_name)
@app.route('/re_annotation/_fill', methods=['GET', 'POST'])
@login_required
def re_annotation_fill():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
boundary_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + mask_name + ".txt"
annotation_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_annotation_file_" + mask_name + ".txt"
draw = request.form
num_of_points = int(len(draw) / 3)
grade = ''
boundary_file = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
data_x = []
data_y = []
temp = []
mask = numpy.zeros(boundary_file.shape, dtype=numpy.uint8)
for i in range(num_of_points):
data_x.append(int(draw[str(i) + '[x]']))
data_y.append(int(draw[str(i) + '[y]']))
pts = numpy.vstack((data_x, data_y)).astype(numpy.int32).T
cv2.fillPoly(mask, [pts], (255))
p_x = numpy.where(mask == 255)[1]
p_y = numpy.where(mask == 255)[0]
for i in range(len(p_x)):
if boundary_file[p_y[i]][p_x[i]] == 0 or boundary_file[p_y[i]][p_x[i]] == -1:
continue
temp.append(boundary_file[p_y[i]][p_x[i]])
temp = numpy.array(temp)
bincount = numpy.bincount(temp)
bincount_list = bincount.tolist()
max_index = bincount_list.index(max(bincount_list))
grade = max_index
for i in range(len(p_x)):
boundary_file[p_y[i]][p_x[i]] = grade
annotation_file = numpy.loadtxt(annotation_file_name, dtype=numpy.int16, delimiter=',')
numpy.savetxt(result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + "Middle" + ".txt",
boundary_file, fmt='%d', delimiter=",")
numpy.savetxt(result_root + 'a' + annotator_id + '/' + anno_name + "_annotation_file_" + "Middle" + ".txt",
annotation_file, fmt='%d', delimiter=",")
re_annotation_controller.boundary_2_mask(anno_name, 'Middle', annotator_id)
re_annotation_controller.boundary_2_mask_separate_nuclei(anno_name, 'Middle', annotator_id)
file_name = original_result_root + 'a' + annotator_id + '/' + "mask_" + anno_name + "_Middle" + ".png" + "?a=" + str(
uuid.uuid4())
return (file_name)
@app.route('/update_image', methods=['GET', 'POST'])
@login_required
def update_image():
annotator_id = current_user.get_id()
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
mask_name = request.args.get('mask_name', type=str, default="nuClick")
boundary_file_name = result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + "Middle" + ".txt"
result = numpy.loadtxt(boundary_file_name, dtype=numpy.int16, delimiter=',')
contours, hierarchy = cv2.findContours(cv2.convertScaleAbs(result), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
result = cv2.drawContours(result, contours, -1, 255, 1)
result = result.astype(numpy.int16)
result[result == 255] = -1
numpy.savetxt(result_root + 'a' + annotator_id + '/' + anno_name + "_boundary_" + mask_name + ".txt",
result, fmt='%d', delimiter=",")
re_annotation_controller.boundary_2_mask(anno_name, 'nuClick', annotator_id)
re_annotation_controller.boundary_2_mask_separate_nuclei(anno_name, 'nuClick', annotator_id)
file_name = original_result_root + 'a' + annotator_id + '/' + "mask_" + anno_name + "_nuClick" + ".png" + "?a=" + str(
uuid.uuid4())
return (file_name)
@app.route('/get_info')
def get_info():
anno_name = request.args.get('anno_name', type=str, default=first_image_name)
file_path = region_image_root + anno_name + '.jpg'
img = Image.open(file_path)
dimensions = img.size
MPP = 0
properties = img.format
w = dimensions[0]
h = dimensions[1]
return jsonify(
img_width=w,
img_height=h,
um_per_px=0.25,
max_image_zoom=0, # max_image_zoom,
toggle_status=0, # toggle_status
properties=properties
)
``` |
{
"source": "700gtk/ML_MNIST",
"score": 3
} |
#### File: 700gtk/ML_MNIST/neuralNet.py
```python
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
import sklearn
import support as sp
class neuralNet(sklearn.base.BaseEstimator):
def __init__(self, layers=3, neuron_count=1000, epochs=40, learning_rate=.1):
self.model = None
self.scaler = StandardScaler()
self.layers = layers
self.neuron_count = neuron_count
self.epochs = epochs
self.learning_rate = learning_rate
def data_into_sets(self, x, y):
x_train, x_test, y_train, y_test = train_test_split(x, y)
return self.scaler.fit_transform(x_train), self.scaler.transform(x_test), y_train, y_test
def fit(self, x, y):
### build net
self.model = keras.models.Sequential()
for iter in range(self.layers):
self.model.add(keras.layers.Dense(self.neuron_count, activation='relu'))
self.model.add(keras.layers.Dense(10, activation='softmax'))
# get the data
x_train, x_test, y_train, y_test = self.data_into_sets(x, y)
# compile model
self.model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=self.learning_rate), metrics=["accuracy"])
#fit the model
print('layers:', self.layers, 'neurons', self.neuron_count, 'learning_rate', self.learning_rate)
self.model.fit(x_train, y_train, epochs=self.epochs, validation_data=(x_test, y_test), verbose=2)
def predict(self, x):
# this is just for gridSearchCV
if self.model is None:
return [0]*len(x)
list_of_weights_of_predictions = self.model.predict(self.scaler.transform(x))
best_answers = []
# iter = 0
for prediction_set in list_of_weights_of_predictions:
# prediction = prediction_set.tolist().index(max(prediction_set))
best_answers.append(prediction_set.tolist().index(max(prediction_set)))
# sp.Plot_digit(x[iter])
# iter += 1
return best_answers
``` |
{
"source": "700software/concord",
"score": 3
} |
#### File: examples/ansible_project/inventory.py
```python
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
print json.dumps(self.example_inventory());
def example_inventory(self):
return {
"local": {
"hosts": ["127.0.0.1"],
"vars": {
"ansible_connection": "local"
}
}
}
# Get the inventory.
ExampleInventory()
```
#### File: ansible/callback/concord_strategy_patch.py
```python
from __future__ import (absolute_import)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible.plugins.strategy import StrategyBase
def _queue_task(self, host, task, task_vars, play_context):
self._tqm.send_callback('concord_on_task_start', host, task)
ansibleStrategyModuleQueueTask(self, host, task, task_vars, play_context)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'concord_strategy_patch'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
global ansibleStrategyModuleQueueTask
ansibleStrategyModuleQueueTask = StrategyBase._queue_task
StrategyBase._queue_task = _queue_task
super(CallbackModule, self).__init__()
``` |
{
"source": "705062791/Progressively-Generating-Better-Initial-Guesses-Towards-Next-Stages-forHigh-Quality-Human-Motion-Pre",
"score": 2
} |
#### File: 705062791/Progressively-Generating-Better-Initial-Guesses-Towards-Next-Stages-forHigh-Quality-Human-Motion-Pre/main_h36m_3d.py
```python
import os
import sys
#sys.path.append('/mnt/hdd4T/mtz_home/code/SmoothPredictionRelease/')
sys.path.append(os.path.abspath('./'))
from utils import h36motion3d as datasets
from model import stage_4
from utils.opt import Options
from utils import util
from utils import log
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import numpy as np
import time
import torch.optim as optim
def main(opt):
lr_now = opt.lr_now
start_epoch = 1
# opt.is_eval = True
print('>>> create models')
in_features = opt.in_features # 66
d_model = opt.d_model
kernel_size = opt.kernel_size
memory_size = opt.memory_size
net_pred = stage_4.MultiStageModel(opt=opt)
net_pred.to(opt.cuda_idx)
optimizer = optim.Adam(filter(lambda x: x.requires_grad, net_pred.parameters()), lr=opt.lr_now)
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in net_pred.parameters()) / 1000000.0))
if opt.is_load or opt.is_eval:
if opt.is_eval:
model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
else:
model_path_len = './{}/ckpt_last.pth.tar'.format(opt.ckpt)
print(">>> loading ckpt len from '{}'".format(model_path_len))
ckpt = torch.load(model_path_len)
start_epoch = ckpt['epoch'] + 1
err_best = ckpt['err']
lr_now = ckpt['lr']
net_pred.load_state_dict(ckpt['state_dict'])
# net.load_state_dict(ckpt)
# optimizer.load_state_dict(ckpt['optimizer'])
# lr_now = util.lr_decay_mine(optimizer, lr_now, 0.2)
print(">>> ckpt len loaded (epoch: {} | err: {})".format(ckpt['epoch'], ckpt['err']))
print('>>> loading datasets')
if not opt.is_eval:
# dataset = datasets.DatasetsSmooth(opt, split=0)
# actions = ["walking", "eating", "smoking", "discussion", "directions",
# "greeting", "phoning", "posing", "purchases", "sitting",
# "sittingdown", "takingphoto", "waiting", "walkingdog",
# "walkingtogether"]
dataset = datasets.Datasets(opt, split=0)
print('>>> Training dataset length: {:d}'.format(dataset.__len__()))
data_loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=0, pin_memory=True)
valid_dataset = datasets.Datasets(opt, split=2)
print('>>> Validation dataset length: {:d}'.format(valid_dataset.__len__()))
valid_loader = DataLoader(valid_dataset, batch_size=opt.test_batch_size, shuffle=True, num_workers=0,
pin_memory=True)
test_dataset = datasets.Datasets(opt, split=2)
print('>>> Testing dataset length: {:d}'.format(test_dataset.__len__()))
test_loader = DataLoader(test_dataset, batch_size=opt.test_batch_size, shuffle=False, num_workers=0,
pin_memory=True)
# evaluation
if opt.is_eval:
ret_test = run_model(net_pred, is_train=3, data_loader=test_loader, opt=opt)
ret_log = np.array([])
head = np.array([])
for k in ret_test.keys():
ret_log = np.append(ret_log, [ret_test[k]])
head = np.append(head, [k])
log.save_csv_log(opt, head, ret_log, is_create=True, file_name='test_walking')
# print('testing error: {:.3f}'.format(ret_test['m_p3d_h36']))
# training
if not opt.is_eval:
err_best = 1000
for epo in range(start_epoch, opt.epoch + 1):
is_best = False
# if epo % opt.lr_decay == 0:
lr_now = util.lr_decay_mine(optimizer, lr_now, 0.1 ** (1 / opt.epoch))
print('>>> training epoch: {:d}'.format(epo))
ret_train = run_model(net_pred, optimizer, is_train=0, data_loader=data_loader, epo=epo, opt=opt)
print('train error: {:.3f}'.format(ret_train['m_p3d_h36']))
ret_valid = run_model(net_pred, is_train=1, data_loader=valid_loader, opt=opt, epo=epo)
print('validation error: {:.3f}'.format(ret_valid['m_p3d_h36']))
ret_test = run_model(net_pred, is_train=3, data_loader=test_loader, opt=opt, epo=epo)
print('testing error: {:.3f}'.format(ret_test['#40ms']))
ret_log = np.array([epo, lr_now])
head = np.array(['epoch', 'lr'])
for k in ret_train.keys():
ret_log = np.append(ret_log, [ret_train[k]])
head = np.append(head, [k])
for k in ret_valid.keys():
ret_log = np.append(ret_log, [ret_valid[k]])
head = np.append(head, ['valid_' + k])
for k in ret_test.keys():
ret_log = np.append(ret_log, [ret_test[k]])
head = np.append(head, ['test_' + k])
log.save_csv_log(opt, head, ret_log, is_create=(epo == 1))
if ret_valid['m_p3d_h36'] < err_best:
err_best = ret_valid['m_p3d_h36']
is_best = True
log.save_ckpt({'epoch': epo,
'lr': lr_now,
'err': ret_valid['m_p3d_h36'],
'state_dict': net_pred.state_dict(),
'optimizer': optimizer.state_dict()},
is_best=is_best, opt=opt)
def eval(opt):
lr_now = opt.lr_now
start_epoch = 1
print('>>> create models')
net_pred = stage_4.MultiStageModel(opt=opt)
net_pred.to(opt.cuda_idx)
net_pred.eval()
#load model
model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
print(">>> loading ckpt len from '{}'".format(model_path_len))
ckpt = torch.load(model_path_len)
net_pred.load_state_dict(ckpt['state_dict'])
print(">>> ckpt len loaded (epoch: {} | err: {})".format(ckpt['epoch'], ckpt['err']))
acts = ["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
data_loader = {}
for act in acts:
dataset = datasets.Datasets(opt=opt, split=2, actions=act)
data_loader[act] = DataLoader(dataset, batch_size=opt.test_batch_size, shuffle=False, num_workers=0,
pin_memory=True)
#do test
is_create = True
avg_ret_log = []
for act in acts:
ret_test = run_model(net_pred, is_train=3, data_loader=data_loader[act], opt=opt)
ret_log = np.array([act])
head = np.array(['action'])
for k in ret_test.keys():
ret_log = np.append(ret_log, [ret_test[k]])
head = np.append(head, ['test_' + k])
avg_ret_log.append(ret_log[1:])
log.save_csv_eval_log(opt, head, ret_log, is_create=is_create)
is_create = False
avg_ret_log = np.array(avg_ret_log, dtype=np.float64)
avg_ret_log = np.mean(avg_ret_log, axis=0)
write_ret_log = ret_log.copy()
write_ret_log[0] = 'avg'
write_ret_log[1:] = avg_ret_log
log.save_csv_eval_log(opt, head, write_ret_log, is_create=False)
def smooth(src, sample_len, kernel_size):
"""
data:[bs, 60, 96]
"""
src_data = src[:, -sample_len:, :].clone()
smooth_data = src_data.clone()
for i in range(kernel_size, sample_len):
smooth_data[:, i] = torch.mean(src_data[:, kernel_size:i+1], dim=1)
return smooth_data
def run_model(net_pred, optimizer=None, is_train=0, data_loader=None, epo=1, opt=None):
if is_train == 0:
net_pred.train()
else:
net_pred.eval()
l_p3d = 0
if is_train <= 1:
m_p3d_h36 = 0
else:
titles = (np.array(range(opt.output_n)) + 1)*40
m_p3d_h36 = np.zeros([opt.output_n])
n = 0
in_n = opt.input_n
out_n = opt.output_n
dim_used = np.array([6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 51, 52, 53, 54, 55, 56, 57, 58, 59, 63, 64, 65, 66, 67, 68,
75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 88, 89, 90, 91, 92])
seq_in = opt.kernel_size
# joints at same loc
joint_to_ignore = np.array([16, 20, 23, 24, 28, 31])
index_to_ignore = np.concatenate((joint_to_ignore * 3, joint_to_ignore * 3 + 1, joint_to_ignore * 3 + 2))
joint_equal = np.array([13, 19, 22, 13, 27, 30])
index_to_equal = np.concatenate((joint_equal * 3, joint_equal * 3 + 1, joint_equal * 3 + 2))
itera = 1
# idx = np.expand_dims(np.arange(seq_in + out_n), axis=1) + (
# out_n - seq_in + np.expand_dims(np.arange(itera), axis=0))
st = time.time()
for i, (p3d_h36) in enumerate(data_loader):
# print(i)
batch_size, seq_n, _ = p3d_h36.shape
# when only one sample in this batch
if batch_size == 1 and is_train == 0:
continue
n += batch_size
bt = time.time()
p3d_h36 = p3d_h36.float().to(opt.cuda_idx)
smooth1 = smooth(p3d_h36[:, :, dim_used],
sample_len=opt.kernel_size + opt.output_n,
kernel_size=opt.kernel_size).clone()
smooth2 = smooth(smooth1,
sample_len=opt.kernel_size + opt.output_n,
kernel_size=opt.kernel_size).clone()
smooth3 = smooth(smooth2,
sample_len=opt.kernel_size + opt.output_n,
kernel_size=opt.kernel_size).clone()
input = p3d_h36[:, :, dim_used].clone()
p3d_sup_4 = p3d_h36.clone()[:, :, dim_used][:, -out_n - seq_in:].reshape(
[-1, seq_in + out_n, len(dim_used) // 3, 3])
p3d_sup_3 = smooth1.clone()[:, -out_n - seq_in:].reshape(
[-1, seq_in + out_n, len(dim_used) // 3, 3])
p3d_sup_2 = smooth2.clone()[:, -out_n - seq_in:].reshape(
[-1, seq_in + out_n, len(dim_used) // 3, 3])
p3d_sup_1 = smooth3.clone()[:, -out_n - seq_in:].reshape(
[-1, seq_in + out_n, len(dim_used) // 3, 3])
p3d_out_all_4, p3d_out_all_3, p3d_out_all_2, p3d_out_all_1 = net_pred(input, input_n=in_n, output_n=out_n, itera=itera)
p3d_out_4 = p3d_h36.clone()[:, in_n:in_n + out_n]
p3d_out_4[:, :, dim_used] = p3d_out_all_4[:, seq_in:]
p3d_out_4[:, :, index_to_ignore] = p3d_out_4[:, :, index_to_equal]
p3d_out_4 = p3d_out_4.reshape([-1, out_n, 32, 3])
p3d_h36 = p3d_h36.reshape([-1, in_n + out_n, 32, 3])
p3d_out_all_4 = p3d_out_all_4.reshape([batch_size, seq_in + out_n, len(dim_used) // 3, 3])
p3d_out_all_3 = p3d_out_all_3.reshape([batch_size, seq_in + out_n, len(dim_used) // 3, 3])
p3d_out_all_2 = p3d_out_all_2.reshape([batch_size, seq_in + out_n, len(dim_used) // 3, 3])
p3d_out_all_1 = p3d_out_all_1.reshape([batch_size, seq_in + out_n, len(dim_used) // 3, 3])
# 2d joint loss:
grad_norm = 0
if is_train == 0:
loss_p3d_4 = torch.mean(torch.norm(p3d_out_all_4 - p3d_sup_4, dim=3))
loss_p3d_3 = torch.mean(torch.norm(p3d_out_all_3 - p3d_sup_3, dim=3))
loss_p3d_2 = torch.mean(torch.norm(p3d_out_all_2 - p3d_sup_2, dim=3))
loss_p3d_1 = torch.mean(torch.norm(p3d_out_all_1 - p3d_sup_1, dim=3))
loss_all = (loss_p3d_4 + loss_p3d_3 + loss_p3d_2 + loss_p3d_1)/4
optimizer.zero_grad()
loss_all.backward()
nn.utils.clip_grad_norm_(list(net_pred.parameters()), max_norm=opt.max_norm)
optimizer.step()
# update log values
l_p3d += loss_p3d_4.cpu().data.numpy() * batch_size
if is_train <= 1: # if is validation or train simply output the overall mean error
mpjpe_p3d_h36 = torch.mean(torch.norm(p3d_h36[:, in_n:in_n + out_n] - p3d_out_4, dim=3))
m_p3d_h36 += mpjpe_p3d_h36.cpu().data.numpy() * batch_size
else:
mpjpe_p3d_h36 = torch.sum(torch.mean(torch.norm(p3d_h36[:, in_n:] - p3d_out_4, dim=3), dim=2), dim=0)
m_p3d_h36 += mpjpe_p3d_h36.cpu().data.numpy()
if i % 1000 == 0:
print('{}/{}|bt {:.3f}s|tt{:.0f}s|gn{}'.format(i + 1, len(data_loader), time.time() - bt,
time.time() - st, grad_norm))
ret = {}
if is_train == 0:
ret["l_p3d"] = l_p3d / n
if is_train <= 1:
ret["m_p3d_h36"] = m_p3d_h36 / n
else:
m_p3d_h36 = m_p3d_h36 / n
for j in range(out_n):
ret["#{:d}ms".format(titles[j])] = m_p3d_h36[j]
return ret
if __name__ == '__main__':
option = Options().parse()
if option.is_eval == False:
main(opt=option)
else:
eval(option)
```
#### File: Progressively-Generating-Better-Initial-Guesses-Towards-Next-Stages-forHigh-Quality-Human-Motion-Pre/model/stage_4.py
```python
from torch.nn import Module
from torch import nn
import torch
# import model.transformer_base
import math
from model import BaseModel as BaseBlock
import utils.util as util
import numpy as np
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from utils.opt import Options
"""
在model1的基础上添加st_gcn,修改 bn
"""
class MultiStageModel(Module):
def __init__(self, opt):
super(MultiStageModel, self).__init__()
self.opt = opt
self.kernel_size = opt.kernel_size
self.d_model = opt.d_model
# self.seq_in = seq_in
self.dct_n = opt.dct_n
# ks = int((kernel_size + 1) / 2)
assert opt.kernel_size == 10
self.in_features = opt.in_features
self.num_stage = opt.num_stage
self.node_n = self.in_features//3
self.encoder_layer_num = 1
self.decoder_layer_num = 2
self.input_n = opt.input_n
self.output_n = opt.output_n
self.gcn_encoder1 = BaseBlock.GCN_encoder(in_channal=3, out_channal=self.d_model,
node_n=self.node_n,
seq_len=self.dct_n,
p_dropout=opt.drop_out,
num_stage=self.encoder_layer_num)
self.gcn_decoder1 = BaseBlock.GCN_decoder(in_channal=self.d_model, out_channal=3,
node_n=self.node_n,
seq_len=self.dct_n*2,
p_dropout=opt.drop_out,
num_stage=self.decoder_layer_num)
self.gcn_encoder2 = BaseBlock.GCN_encoder(in_channal=3, out_channal=self.d_model,
node_n=self.node_n,
seq_len=self.dct_n,
p_dropout=opt.drop_out,
num_stage=self.encoder_layer_num)
self.gcn_decoder2 = BaseBlock.GCN_decoder(in_channal=self.d_model, out_channal=3,
node_n=self.node_n,
seq_len=self.dct_n * 2,
p_dropout=opt.drop_out,
num_stage=self.decoder_layer_num)
self.gcn_encoder3 = BaseBlock.GCN_encoder(in_channal=3, out_channal=self.d_model,
node_n=self.node_n,
seq_len=self.dct_n,
p_dropout=opt.drop_out,
num_stage=self.encoder_layer_num)
self.gcn_decoder3 = BaseBlock.GCN_decoder(in_channal=self.d_model, out_channal=3,
node_n=self.node_n,
seq_len=self.dct_n * 2,
p_dropout=opt.drop_out,
num_stage=self.decoder_layer_num)
self.gcn_encoder4 = BaseBlock.GCN_encoder(in_channal=3, out_channal=self.d_model,
node_n=self.node_n,
seq_len=self.dct_n,
p_dropout=opt.drop_out,
num_stage=self.encoder_layer_num)
self.gcn_decoder4 = BaseBlock.GCN_decoder(in_channal=self.d_model, out_channal=3,
node_n=self.node_n,
seq_len=self.dct_n * 2,
p_dropout=opt.drop_out,
num_stage=self.decoder_layer_num)
def forward(self, src, input_n=10, output_n=10, itera=1):
output_n = self.output_n
input_n = self.input_n
bs = src.shape[0]
# [2000,512,22,20]
dct_n = self.dct_n
idx = list(range(self.kernel_size)) + [self.kernel_size -1] * output_n
# [b,20,66]
input_gcn = src[:, idx].clone()
dct_m, idct_m = util.get_dct_matrix(input_n + output_n)
dct_m = torch.from_numpy(dct_m).float().to(self.opt.cuda_idx)
idct_m = torch.from_numpy(idct_m).float().to(self.opt.cuda_idx)
# [b,20,66] -> [b,66,20]
input_gcn_dct = torch.matmul(dct_m[:dct_n], input_gcn).permute(0, 2, 1)
# [b,66,20]->[b,22,3,20]->[b,3,22,20]->[b,512,22,20]
input_gcn_dct = input_gcn_dct.reshape(bs, self.node_n, -1, self.dct_n).permute(0, 2, 1, 3)
#stage1
latent_gcn_dct = self.gcn_encoder1(input_gcn_dct)
#[b,512,22,20] -> [b, 512, 22, 40]
latent_gcn_dct = torch.cat((latent_gcn_dct, latent_gcn_dct), dim=3)
output_dct_1 = self.gcn_decoder1(latent_gcn_dct)[:, :, :, :dct_n]
#stage2
latent_gcn_dct = self.gcn_encoder2(output_dct_1)
# [b,512,22,20] -> [b, 512, 22, 40]
latent_gcn_dct = torch.cat((latent_gcn_dct, latent_gcn_dct), dim=3)
output_dct_2 = self.gcn_decoder2(latent_gcn_dct)[:, :, :, :dct_n]
#stage3
latent_gcn_dct = self.gcn_encoder3(output_dct_2)
# [b,512,22,20] -> [b, 512, 22, 40]
latent_gcn_dct = torch.cat((latent_gcn_dct, latent_gcn_dct), dim=3)
output_dct_3 = self.gcn_decoder3(latent_gcn_dct)[:, :, :, :dct_n]
#stage4
latent_gcn_dct = self.gcn_encoder4(output_dct_3)
# [b,512,22,20] -> [b, 512, 22, 40]
latent_gcn_dct = torch.cat((latent_gcn_dct, latent_gcn_dct), dim=3)
output_dct_4 = self.gcn_decoder4(latent_gcn_dct)[:, :, :, :dct_n]
output_dct_1 = output_dct_1.permute(0, 2, 1, 3).reshape(bs, -1, dct_n)
output_dct_2 = output_dct_2.permute(0, 2, 1, 3).reshape(bs, -1, dct_n)
output_dct_3 = output_dct_3.permute(0, 2, 1, 3).reshape(bs, -1, dct_n)
output_dct_4 = output_dct_4.permute(0, 2, 1, 3).reshape(bs, -1, dct_n)
# [b,20 66]->[b,20 66]
output_1 = torch.matmul(idct_m[:, :dct_n], output_dct_1.permute(0, 2, 1))
output_2 = torch.matmul(idct_m[:, :dct_n], output_dct_2.permute(0, 2, 1))
output_3 = torch.matmul(idct_m[:, :dct_n], output_dct_3.permute(0, 2, 1))
output_4 = torch.matmul(idct_m[:, :dct_n], output_dct_4.permute(0, 2, 1))
return output_4, output_3, output_2, output_1
if __name__ == '__main__':
option = Options().parse()
option.d_model = 64
model = MultiStageModel(opt=option).cuda()
print(">>> total params: {:.2f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
src = torch.FloatTensor(torch.randn((32, 35, 66))).cuda()
output, att_map,zero = model(src)
```
#### File: Progressively-Generating-Better-Initial-Guesses-Towards-Next-Stages-forHigh-Quality-Human-Motion-Pre/utils/dpw3_3d.py
```python
from torch.utils.data import Dataset
import pickle as pkl
import numpy as np
from os import walk
from h5py import File
import scipy.io as sio
from utils import data_utils
from matplotlib import pyplot as plt
import torch
class Datasets(Dataset):
def __init__(self, opt, actions=None, split=0):
path_to_data = opt.data_dir
input_n = opt.input_n
output_n = opt.output_n
if split == 1:
their_input_n = 50
else:
their_input_n = input_n
seq_len = their_input_n + output_n
if split == 0:
self.data_path = path_to_data + '/train/'
elif split == 1:
self.data_path = path_to_data + '/validation/'
elif split == 2:
self.data_path = path_to_data + '/test/'
all_seqs = []
files = []
# load data
for (dirpath, dirnames, filenames) in walk(self.data_path):
files.extend(filenames)
for f in files:
with open(self.data_path + f, 'rb') as f:
data = pkl.load(f, encoding='latin1')
joint_pos = data['jointPositions']
for i in range(len(joint_pos)):
seqs = joint_pos[i]
seqs = seqs - seqs[:, 0:3].repeat(24, axis=0).reshape(-1, 72)
n_frames = seqs.shape[0]
fs = np.arange(0, n_frames - seq_len + 1)
fs_sel = fs
for j in np.arange(seq_len - 1):
fs_sel = np.vstack((fs_sel, fs + j + 1))
fs_sel = fs_sel.transpose()
seq_sel = seqs[fs_sel, :]
if len(all_seqs) == 0:
all_seqs = seq_sel
else:
all_seqs = np.concatenate((all_seqs, seq_sel), axis=0)
# self.all_seqs = all_seqs[:, (their_input_n - input_n):, :]
self.dim_used = np.array(range(3, all_seqs.shape[2]))
#all_seqs = all_seqs[:, (their_input_n - input_n):, 3:]
all_seqs = all_seqs[:, (their_input_n - input_n):, :]
self.all_seqs = all_seqs * 1000
def __len__(self):
return np.shape(self.all_seqs)[0]
def __getitem__(self, item):
return self.all_seqs[item]
``` |
{
"source": "706Space/GoogleCloud-speech-to-text-Python",
"score": 3
} |
#### File: 706Space/GoogleCloud-speech-to-text-Python/transcribe.py
```python
import os
import argparse
def cut_audio(path_audio, start=None, end=None, output_format='mp3', path_output=None):
import pydub
import re
info = pydub.utils.mediainfo(path_audio)
print('Loading adudio file {}'.format(path_audio))
sound = pydub.AudioSegment.from_file(path_audio)
duration = float(info['duration'])
cut_sound = sound[int(start/duration * len(sound)): int(end/duration * len(sound))] if start is not None and end is not None else sound
path_cut = re.sub('\..*$', '-cut-{}_{}.{}'.format(start, end, output_format), path_audio)
if path_output:
cut_sound.export(path_output, format=output_format) #Exports to a wav file in the current path.
else:
cut_sound.export(path_cut, format=output_format) #Exports to a wav file in the current path.
print(
'File {} cut to {}'.format(
path_audio, path_output if path_output else path_cut
)
)
file_name = path_output.split('/')[-1] if path_output else path_cut.split('/')[-1]
return path_cut, file_name
def upload_blob(bucket_name, source_file_name, destination_blob_name):
from google.cloud import storage
'''Uploads a file to the bucket.'''
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
storage_uri = os.path.join('gs://', bucket_name, destination_blob_name)
print(
'File {} uploaded to {}'.format(
source_file_name, storage_uri
)
)
return storage_uri
def sample_long_running_recognize(storage_uri, language_code='zh-CN', path_vocab='', sample_rate_hertz=16000):
from google.cloud import speech_v1
from google.cloud.speech_v1p1beta1 import enums
from google.cloud import speech
client = speech_v1.SpeechClient()
encoding = enums.RecognitionConfig.AudioEncoding.MP3 if storage_uri.endswith('.mp3') else enums.RecognitionConfig.AudioEncoding.LINEAR16
recording_device_type = enums.RecognitionMetadata.RecordingDeviceType.PC
interaction_type = enums.RecognitionMetadata.InteractionType.PRESENTATION
speech_context = None
if path_vocab:
with open(path_vocab, 'r') as f:
phrases = f.read().split('\n')
speech_context = speech.types.SpeechContext(phrases=phrases)
metadata = {
'interaction_type': interaction_type,
'recording_device_type': recording_device_type,
}
config = {
'metadata': metadata,
'language_code': language_code,
'sample_rate_hertz': sample_rate_hertz,
'encoding': encoding,
'enable_automatic_punctuation': True,
'audio_channel_count': 2,
'enable_word_time_offsets':True,
}
if speech_context:
config.update({
'speech_contexts': [speech_context]
})
audio = {'uri': storage_uri}
operation = client.long_running_recognize(config, audio)
print('Waiting for operation to complete on file {}...'.format(storage_uri))
response = operation.result()
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
return response
def write_transcript(response, path_transcript, start_seconds=0):
transcript = ''
for result in response.results:
alternative = result.alternatives[0]
total_seconds = None
words = alternative.words
if any(words):
start_time = getattr(words[0], 'start_time')
if start_time:
total_seconds = start_time.seconds + start_seconds
h = 0
if total_seconds:
m, s = divmod(int(total_seconds), 60)
h, m = divmod(m, 60)
t = alternative.transcript.encode('utf8')
transcript = transcript + "{:0>2d}:{:0>2d}:{:0>2d} {}\n".format(h, m, s, t)
with open(path_transcript, 'w') as f:
f.write(transcript)
return transcript
def create_parser():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--path_audio',
default='/Users/straynwang/Downloads/tantian.m4a',
type=str,
help='The input audio file path.',
)
parser.add_argument(
'--path_vocab',
default='/tmp/speech-to-text/vocab.txt',
type=str,
help='The input audio file path.',
)
parser.add_argument(
'--language_code',
default='zh-CN',
type=str,
help='Default language for GCP speech2text',
)
parser.add_argument(
'--start_min',
default=None,
type=float,
help='Starting minute of the audio',
)
parser.add_argument(
'--end_min',
default=None,
type=float,
help='Ending minute of the audio',
)
return parser
if __name__ == '__main__':
args = create_parser().parse_args()
start, end = args.start_min * 60, args.end_min * 60
BUCKET_NAME = '706-bucket'
PATH_WORK = '/tmp/speech-to-text/'
path_audio_cut, audio_file_name = cut_audio(args.path_audio, start, end)
storage_uri = upload_blob(bucket_name=BUCKET_NAME, source_file_name=path_audio_cut, destination_blob_name=os.path.join('audio', audio_file_name))
# transcibing remote audio
response = sample_long_running_recognize(storage_uri, args.language_code, args.path_vocab)
# write transcript to local
os.system('mkdir -p {}'.format(PATH_WORK))
path_transcript = os.path.join(PATH_WORK, 'transcript-{}-{}_{}.txt'.format(audio_file_name.replace('.mp3', ''), start, end))
transcript = write_transcript(response, path_transcript, start_seconds=start)
# upload transcript to remote
storage_uri = upload_blob(bucket_name=BUCKET_NAME, source_file_name=path_transcript, destination_blob_name=os.path.join('text', path_transcript.split('/')[-1]))
print(transcript)
``` |
{
"source": "708yamaguchi/MaixPy_scripts",
"score": 3
} |
#### File: application/gimbal/Gimbal.py
```python
import time, sys
from machine import Timer,PWM
from math import pi
from machine import UART
class Servo:
def __init__(self, pwm, dir=50, duty_min=2.5, duty_max=12.5):
self.value = dir
self.pwm = pwm
self.duty_min = duty_min
self.duty_max = duty_max
self.duty_range = duty_max -duty_min
self.enable(True)
self.pwm.duty(self.value/100*self.duty_range+self.duty_min)
def enable(self, en):
if en:
self.pwm.enable()
else:
self.pwm.disable()
def dir(self, percentage):
if percentage > 100:
percentage = 100
elif percentage < 0:
percentage = 0
self.pwm.duty(percentage/100*self.duty_range+self.duty_min)
def drive(self, inc):
self.value += inc
if self.value > 100:
self.value = 100
elif self.value < 0:
self.value = 0
self.pwm.duty(self.value/100*self.duty_range+self.duty_min)
class PID:
_kp = _ki = _kd = _integrator = _imax = 0
_last_error = _last_t = 0
_RC = 1/(2 * pi * 20)
def __init__(self, p=0, i=0, d=0, imax=0):
self._kp = float(p)
self._ki = float(i)
self._kd = float(d)
self._imax = abs(imax)
self._last_derivative = None
def get_pid(self, error, scaler):
tnow = time.ticks_ms()
dt = tnow - self._last_t
output = 0
if self._last_t == 0 or dt > 1000:
dt = 0
self.reset_I()
self._last_t = tnow
delta_time = float(dt) / float(1000)
output += error * self._kp
if abs(self._kd) > 0 and dt > 0:
if self._last_derivative == None:
derivative = 0
self._last_derivative = 0
else:
derivative = (error - self._last_error) / delta_time
derivative = self._last_derivative + \
((delta_time / (self._RC + delta_time)) * \
(derivative - self._last_derivative))
self._last_error = error
self._last_derivative = derivative
output += self._kd * derivative
output *= scaler
if abs(self._ki) > 0 and dt > 0:
self._integrator += (error * self._ki) * scaler * delta_time
if self._integrator < -self._imax: self._integrator = -self._imax
elif self._integrator > self._imax: self._integrator = self._imax
output += self._integrator
return output
def reset_I(self):
self._integrator = 0
self._last_derivative = None
class Gimbal:
def __init__(self, pitch, pid_pitch, roll=None, pid_roll=None, yaw=None, pid_yaw=None):
self._pitch = pitch
self._roll = roll
self._yaw = yaw
self._pid_pitch = pid_pitch
self._pid_roll = pid_roll
self._pid_yaw = pid_yaw
def set_out(self, pitch, roll, yaw=None):
pass
def run(self, pitch_err, roll_err=50, yaw_err=50, pitch_reverse=False, roll_reverse=False, yaw_reverse=False):
out = self._pid_pitch.get_pid(pitch_err, 1)
# print("err: {}, out: {}".format(pitch_err, out))
if pitch_reverse:
out = - out
self._pitch.drive(out)
if self._roll:
out = self._pid_roll.get_pid(roll_err, 1)
if roll_reverse:
out = - out
self._roll.drive(out)
if self._yaw:
out = self._pid_yaw.get_pid(yaw_err, 1)
if yaw_reverse:
out = - out
self._yaw.drive(out)
if __name__ == "__main__":
'''
servo:
freq: 50 (Hz)
T: 1/50 = 0.02s = 20ms
duty: [0.5ms, 2.5ms] -> [0.025, 0.125] -> [2.5%, 12.5%]
pin:
IO24 <--> pitch
IO25 <--> roll
'''
init_pitch = 80 # init position, value: [0, 100], means minimum angle to maxmum angle of servo
init_roll = 50 # 50 means middle
sensor_hmirror = False
sensor_vflip = False
lcd_rotation = 2
lcd_mirror = True
pitch_pid = [0.23, 0, 0.015, 0] # P I D I_max
roll_pid = [0.23, 0, 0.015, 0] # P I D I_max
target_err_range = 10 # target error output range, default [0, 10]
target_ignore_limit = 0.02 # when target error < target_err_range*target_ignore_limit , set target error to 0
pitch_reverse = False # reverse out value direction
roll_reverse = True # ..
import sensor,image,lcd
import KPU as kpu
class Target():
def __init__(self, out_range=10, ignore_limit=0.02, hmirror=False, vflip=False, lcd_rotation=2, lcd_mirror=True):
self.pitch = 0
self.roll = 0
self.out_range = out_range
self.ignore = ignore_limit
self.task_fd = kpu.load(0x300000) # face model addr in flash
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
kpu.init_yolo2(self.task_fd, 0.5, 0.3, 5, anchor)
lcd.init()
lcd.rotation(lcd_rotation)
lcd.mirror(lcd_mirror)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
if hmirror:
sensor.set_hmirror(1)
if vflip:
sensor.set_vflip(1)
def get_target_err(self):
img = sensor.snapshot()
code = kpu.run_yolo2(self.task_fd, img)
if code:
max_area = 0
max_i = 0
for i, j in enumerate(code):
a = j.w()*j.h()
if a > max_area:
max_i = i
max_area = a
img = img.draw_rectangle(code[max_i].rect())
self.pitch = (code[max_i].y() + code[max_i].h() / 2)/240*self.out_range*2 - self.out_range
self.roll = (code[max_i].x() + code[max_i].w() / 2)/320*self.out_range*2 - self.out_range
# limit
if abs(self.pitch) < self.out_range*self.ignore:
self.pitch = 0
if abs(self.roll) < self.out_range*self.ignore:
self.roll = 0
img = img.draw_cross(160, 120)
lcd.display(img)
return (self.pitch, self.roll)
else:
img = img.draw_cross(160, 120)
lcd.display(img)
return (0, 0)
target = Target(target_err_range, target_ignore_limit, sensor_hmirror, sensor_vflip, lcd_rotation, lcd_mirror)
tim0 = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PWM)
tim1 = Timer(Timer.TIMER0, Timer.CHANNEL1, mode=Timer.MODE_PWM)
pitch_pwm = PWM(tim0, freq=50, duty=0, pin=24)
roll_pwm = PWM(tim1, freq=50, duty=0, pin=25)
pitch = Servo(pitch_pwm, dir=init_pitch)
roll = Servo(roll_pwm, dir=init_roll)
pid_pitch = PID(p=pitch_pid[0], i=pitch_pid[1], d=pitch_pid[2], imax=pitch_pid[3])
pid_roll = PID(p=roll_pid[0], i=roll_pid[1], d=roll_pid[2], imax=roll_pid[3])
gimbal = Gimbal(pitch, pid_pitch, roll, pid_roll)
target_pitch = init_pitch
target_roll = init_roll
t = time.ticks_ms()
_dir = 0
t0 = time.ticks_ms()
stdin = UART.repl_uart()
while 1:
# get target error
err_pitch, err_roll = target.get_target_err()
# interval limit to > 10ms
if time.ticks_ms() - t0 < 10:
continue
t0 = time.ticks_ms()
# run
gimbal.run(err_pitch, err_roll, pitch_reverse = pitch_reverse, roll_reverse=roll_reverse)
```
#### File: others/bmm150/bmm150.py
```python
from machine import I2C
import time
import ustruct
I2C_MODE = 1
SPI_MODE = 2
ENABLE_POWER = 1
DISABLE_POWER = 0
POKARITY_HIGH = 1
POKARITY_LOW = 0
ERROR = -1
SELF_TEST_XYZ_FALL = 0
SELF_TEST_YZ_FAIL = 1
SELF_TEST_XZ_FAIL = 2
SELF_TEST_Z_FAIL = 3
SELF_TEST_XY_FAIL = 4
SELF_TEST_Y_FAIL = 5
SELF_TEST_X_FAIL = 6
SELF_TEST_XYZ_OK = 7
ENABLE_DRDY = 1
DISABLE_DRDY = 0
INTERRUPUT_LATCH_ENABLE = 1
INTERRUPUT_LATCH_DISABLE = 0
MEASUREMENT_X_ENABLE = 0
MEASUREMENT_Y_ENABLE = 0
MEASUREMENT_Z_ENABLE = 0
MEASUREMENT_X_DISABLE = 1
MEASUREMENT_Y_DISABLE = 1
MEASUREMENT_Z_DISABLE = 1
DATA_OVERRUN_ENABLE = 1
DATA_OVERRUN_DISABLE = 0
OVERFLOW_INT_ENABLE = 1
OVERFLOW_INT_DISABLE = 0
LOW_INTERRUPT_X_ENABLE = 0
LOW_INTERRUPT_Y_ENABLE = 0
LOW_INTERRUPT_Z_ENABLE = 0
LOW_INTERRUPT_X_DISABLE = 1
LOW_INTERRUPT_Y_DISABLE = 1
LOW_INTERRUPT_Z_DISABLE = 1
HIGH_INTERRUPT_X_ENABLE = 0
HIGH_INTERRUPT_Y_ENABLE = 0
HIGH_INTERRUPT_Z_ENABLE = 0
HIGH_INTERRUPT_X_DISABLE = 1
HIGH_INTERRUPT_Y_DISABLE = 1
HIGH_INTERRUPT_Z_DISABLE = 1
CHANNEL_X = 1
CHANNEL_Y = 2
CHANNEL_Z = 3
ENABLE_INTERRUPT_PIN = 1
DISABLE_INTERRUPT_PIN = 0
POWERMODE_NORMAL = 0x00
POWERMODE_FORCED = 0x01
POWERMODE_SLEEP = 0x03
POWERMODE_SUSPEND = 0x04
PRESETMODE_LOWPOWER = 0x01
PRESETMODE_REGULAR = 0x02
PRESETMODE_HIGHACCURACY = 0x03
PRESETMODE_ENHANCED = 0x04
REPXY_LOWPOWER = 0x01
REPXY_REGULAR = 0x04
REPXY_ENHANCED = 0x07
REPXY_HIGHACCURACY = 0x17
REPZ_LOWPOWER = 0x01
REPZ_REGULAR = 0x07
REPZ_ENHANCED = 0x0D
REPZ_HIGHACCURACY = 0x29
CHIP_ID_VALUE = 0x32
CHIP_ID_REGISTER = 0x40
REG_DATA_X_LSB = 0x42
REG_DATA_READY_STATUS = 0x48
REG_INTERRUPT_STATUS = 0x4a
CTRL_POWER_REGISTER = 0x4b
MODE_RATE_REGISTER = 0x4c
REG_INT_CONFIG = 0x4D
REG_AXES_ENABLE = 0x4E
REG_LOW_THRESHOLD = 0x4F
REG_HIGH_THRESHOLD = 0x50
REG_REP_XY = 0x51
REG_REP_Z = 0x52
RATE_10HZ = 0x00 # (default rate)
RATE_02HZ = 0x01
RATE_06HZ = 0x02
RATE_08HZ = 0x03
RATE_15HZ = 0x04
RATE_20HZ = 0x05
RATE_25HZ = 0x06
RATE_30HZ = 0x07
DIG_X1 = 0x5D
DIG_Y1 = 0x5E
DIG_Z4_LSB = 0x62
DIG_Z4_MSB = 0x63
DIG_X2 = 0x64
DIG_Y2 = 0x65
DIG_Z2_LSB = 0x68
DIG_Z2_MSB = 0x69
DIG_Z1_LSB = 0x6A
DIG_Z1_MSB = 0x6B
DIG_XYZ1_LSB = 0x6C
DIG_XYZ1_MSB = 0x6D
DIG_Z3_LSB = 0x6E
DIG_Z3_MSB = 0x6F
DIG_XY2 = 0x70
DIG_XY1 = 0x71
BMM150_ADDR = 19
class trim_register:
def __init__(self):
self.dig_x1 = 0;
self.dig_y1 = 0;
self.dig_x2 = 0;
self.dig_y2 = 0;
self.dig_z1 = 0;
self.dig_z2 = 0;
self.dig_z3 = 0;
self.dig_z4 = 0;
self.dig_xy1 = 0;
self.dig_xy2 = 0;
self.dig_xyz1 = 0;
_trim_data = trim_register()
class geomagnetic_data:
def __init__(self):
self.x = 0;
self.y = 0;
self.z = 0;
self.r = 0;
_geomagnetic = geomagnetic_data()
class BMM150:
__txbuf = [0]
def __init__(self, i2c_dev, i2c_addr=BMM150_ADDR):
self._offset = (0, 0, 0)
self._scale = (1, 1, 1)
self.i2cDev = i2c_dev
self.bmm150Addr = i2c_addr
scan_list = self.i2cDev.scan()
print(scan_list)
if self.bmm150Addr not in scan_list:
raise Exception("Error: Unable connect pmu_bmm150!")
'''
@brief get chip id
@return chip id
'''
def get_chip_id(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, CHIP_ID_REGISTER, 1)
return rslt[0]
'''
init sensor
return 0 is init success
-1 is init faild
'''
def sensor_init(self):
self.set_power_bit(ENABLE_POWER)
chip_id = self.get_chip_id()
print("chip_id:", hex(chip_id))
if chip_id == CHIP_ID_VALUE:
self.get_trim_data()
return 0
else:
return -1
# soft reset
def soft_reset(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, CTRL_POWER_REGISTER, 1)
self.__txbuf[0] = rslt[0] | 0x82
self.i2cDev.writeto_mem(self.bmm150Addr, CTRL_POWER_REGISTER, self.__txbuf[0])
'''
@brief set bmm150 self test
@retval
SELF_TEST_XYZ_FALL = 0
SELF_TEST_YZ_FAIL = 1
SELF_TEST_XZ_FAIL = 2
SELF_TEST_Z_FAIL = 3
SELF_TEST_XY_FAIL = 4
SELF_TEST_Y_FAIL = 5
SELF_TEST_X_FAIL = 6
SELF_TEST_XYZ_OK = 7
'''
def self_test(self):
self.set_operation_mode(POWERMODE_SLEEP)
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
rslt1 = []
rslt1 = self.change_date(rslt, 1)
self.__txbuf[0] == rslt1[0] | 0x01
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, self.__txbuf[0])
time.sleep(1)
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
rslt1 = []
rslt1 = self.change_date(rslt, 1)
if (rslt1[0] & 0x01) == 0:
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, REG_DATA_X_LSB, 5)
rslt1 = []
rslt1 = self.change_date(rslt, 5)
number = (rslt1[0] & 0x01) | (rslt1[2] & 0x01) << 1 | (rslt1[4] & 0x01) << 2
return number
else:
return -1
'''
@brief set power bit
@param ctrl is enable/disable power
DISABLE_POWER is disable power
ENABLE_POWER is enable power
'''
def set_power_bit(self, ctrl):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, CTRL_POWER_REGISTER, 1)
rslt1 = self.change_date(rslt, 1)
if ctrl == DISABLE_POWER:
self.__txbuf[0] = rslt1[0] & 0xFE
self.i2cDev.writeto_mem(self.bmm150Addr, CTRL_POWER_REGISTER, self.__txbuf[0])
else:
self.__txbuf[0] = rslt1[0] | 0x01
print("power enable", hex(self.__txbuf[0]))
self.i2cDev.writeto_mem(self.bmm150Addr, CTRL_POWER_REGISTER, self.__txbuf[0])
'''
@brief get power bit
@return power bit
DISABLE_POWER is disable power
ENABLE_POWER is enable power
'''
def get_power_bit(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,CTRL_POWER_REGISTER, 1)
return rslt[0] & 0x01
'''
@brief set opration mode
@param modes is operation mode
POWERMODE_NORMAL
POWERMODE_FORCED
POWERMODE_SLEEP
POWERMODE_SUSPEND
'''
def set_operation_mode(self, modes):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
rslt1 = []
rslt1 = self.change_date(rslt, 1)
if modes == POWERMODE_NORMAL:
self.set_power_bit(ENABLE_POWER)
rslt1[0] = rslt1[0] & 0xf9
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif modes == POWERMODE_FORCED:
rslt1[0] = (rslt1[0] & 0xf9) | 0x02
self.set_power_bit(ENABLE_POWER)
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif modes == POWERMODE_SLEEP:
self.set_power_bit(ENABLE_POWER)
rslt1[0] = (rslt1[0] & 0xf9) | 0x04
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
else:
self.set_power_bit(DISABLE_POWER)
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
'''
@brief get opration mode
@return modes is operation mode
POWERMODE_NORMAL = 0x00
POWERMODE_FORCED = 0x01
POWERMODE_SLEEP = 0x03
POWERMODE_SUSPEND = 0x04
'''
def get_operation_mode(self):
if self.get_power_bit() == 0:
return POWERMODE_SUSPEND
else:
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
rslt1 = self.change_date(rslt, 1)
return hex((rslt1[0] & 0x03))
'''
@brief set rate
@param rate
RATE_10HZ #(default rate)
RATE_02HZ
RATE_06HZ
RATE_08HZ
RATE_15HZ
RATE_20HZ
RATE_25HZ
RATE_30HZ
'''
def set_rate(self, rates):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
rslt1 = []
rslt1 = self.change_date(rslt, 1)
if rates == RATE_10HZ:
rslt1[0] = rslt1[0] & 0xc7
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_02HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x08
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_06HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x10
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_08HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x18
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_15HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x20
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_20HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x28
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_25HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x30
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
elif rates == RATE_30HZ:
rslt1[0] = (rslt1[0] & 0xc7) | 0x38
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
else:
rslt1[0] = rslt1[0] & 0xc7
self.i2cDev.writeto_mem(self.bmm150Addr, MODE_RATE_REGISTER, rslt1[0])
'''
@brief get rates
@return rates
RATE_10HZ #(default rate)
RATE_02HZ
RATE_06HZ
RATE_08HZ
RATE_15HZ
RATE_20HZ
RATE_25HZ
RATE_30HZ
'''
def get_rate(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, MODE_RATE_REGISTER, 1)
return (rslt[0] & 0x38) >> 3
'''
@brief set preset mode
@param modes
PRESETMODE_LOWPOWER
PRESETMODE_REGULAR
PRESETMODE_HIGHACCURACY
PRESETMODE_ENHANCED
'''
def set_preset_mode(self, modes):
if modes == PRESETMODE_LOWPOWER:
self.set_rate(RATE_10HZ)
self.set_xy_rep(REPXY_LOWPOWER)
self.set_z_rep(REPZ_LOWPOWER)
elif modes == PRESETMODE_REGULAR:
self.set_rate(RATE_10HZ)
self.set_xy_rep(REPXY_REGULAR)
self.set_z_rep(REPZ_REGULAR)
elif modes == PRESETMODE_HIGHACCURACY:
self.set_rate(RATE_20HZ)
self.set_xy_rep(REPXY_HIGHACCURACY)
self.set_z_rep(REPZ_HIGHACCURACY)
elif modes == PRESETMODE_ENHANCED:
self.set_rate(RATE_10HZ)
self.set_xy_rep(REPXY_ENHANCED)
self.set_z_rep(REPZ_ENHANCED)
else:
self.set_rate(RATE_10HZ)
self.set_xy_rep(REPXY_LOWPOWER)
self.set_z_rep(REPZ_LOWPOWER)
'''
@brief set xy rep
@param modes
REPXY_LOWPOWER
REPXY_REGULAR
REPXY_ENHANCED
REPXY_HIGHACCURACY
'''
def set_xy_rep(self, modes):
self.__txbuf[0] = modes
if modes == REPXY_LOWPOWER:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_XY, self.__txbuf[0])
elif modes == REPXY_REGULAR:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_XY, self.__txbuf[0])
elif modes == REPXY_ENHANCED:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_XY, self.__txbuf[0])
elif modes == REPXY_HIGHACCURACY:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_XY, self.__txbuf[0])
else:
__txbuf[0] = REPXY_LOWPOWER####################################
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_XY, self.__txbuf[0])
'''
@brief set z rep
@param modes
REPZ_LOWPOWER
REPZ_REGULAR
REPZ_ENHANCED
REPZ_HIGHACCURACY
'''
def set_z_rep(self, modes):
self.__txbuf[0] = modes
if modes == REPZ_LOWPOWER:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_Z, self.__txbuf[0])
elif modes == REPZ_REGULAR:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_Z, self.__txbuf[0])
elif modes == REPZ_ENHANCED:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_Z, self.__txbuf[0])
elif modes == REPZ_HIGHACCURACY:
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_Z, self.__txbuf[0])
else:
__txbuf[0] = REPZ_LOWPOWER ####################################
self.i2cDev.writeto_mem(self.bmm150Addr,REG_REP_Z, self.__txbuf[0])
'''
@brief get trim data
'''
def get_trim_data(self):
trim_x1_y1 = self.i2cDev.readfrom_mem( self.bmm150Addr,DIG_X1, 2)
trim_xyz_data = self.i2cDev.readfrom_mem( self.bmm150Addr,DIG_Z4_LSB, 4)
trim_xy1_xy2 = self.i2cDev.readfrom_mem( self.bmm150Addr,DIG_Z2_LSB, 10)
_trim_data.dig_x1 = self.uint8_to_int8(trim_x1_y1[0])
_trim_data.dig_y1 = self.uint8_to_int8(trim_x1_y1[1])
_trim_data.dig_x2 = self.uint8_to_int8(trim_xyz_data[2])
_trim_data.dig_y2 = self.uint8_to_int8(trim_xyz_data[3])
temp_msb = int(trim_xy1_xy2[3]) << 8
_trim_data.dig_z1 = int(temp_msb | trim_xy1_xy2[2])
temp_msb = int(trim_xy1_xy2[1] << 8)
_trim_data.dig_z2 = int(temp_msb | trim_xy1_xy2[0])
temp_msb = int(trim_xy1_xy2[7] << 8)
_trim_data.dig_z3 = temp_msb | trim_xy1_xy2[6]
temp_msb = int(trim_xyz_data[1] << 8)
_trim_data.dig_z4 = int(temp_msb | trim_xyz_data[0])
_trim_data.dig_xy1 = trim_xy1_xy2[9]
_trim_data.dig_xy2 = self.uint8_to_int8(trim_xy1_xy2[8])
temp_msb = int((trim_xy1_xy2[5] & 0x7F) << 8)
_trim_data.dig_xyz1 = int(temp_msb | trim_xy1_xy2[4])
'''
@brief get geomagnetic
'''
def get_geomagnetic(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_DATA_X_LSB, 8)
# rslt = self.change_date(rslt, 8)
rslt1 = ustruct.unpack("bbbbbbbb", rslt)
_geomagnetic.x = ((rslt1[0] & 0xF8) >> 3) | (rslt1[1] << 5)
_geomagnetic.y = ((rslt1[2] & 0xF8) >> 3) | (rslt1[3] << 5)
_geomagnetic.z = ((rslt1[4] & 0xFE) >> 1) | (rslt1[5] << 7)
_geomagnetic.r = ((rslt1[6] & 0xFC) >> 2) | (rslt1[7] << 6)
rslt2 = [rslt1[0],rslt1[1],rslt1[2]]
rslt2[0] = self.compenstate_x(_geomagnetic.x, _geomagnetic.r)
rslt2[1] = self.compenstate_y(_geomagnetic.y, _geomagnetic.r)
rslt2[2] = self.compenstate_z(_geomagnetic.z, _geomagnetic.r)
return rslt2
'''
@brief uint8_t to int8_t
'''
def uint8_to_int8(self, number):
if number <= 127:
return number
else:
return (256 - number) * -1
'''
@berif compenstate_x
'''
def compenstate_x(self, data_x, data_r):
if data_x != -4096:
if data_r != 0:
process_comp_x0 = data_r
elif _trim_data.dig_xyz1 != 0:
process_comp_x0 = _trim_data.dig_xyz1
else:
process_comp_x0 = 0
if process_comp_x0 != 0:
process_comp_x1 = int(_trim_data.dig_xyz1 * 16384)
process_comp_x2 = int(process_comp_x1 / process_comp_x0 - 0x4000)
retval = process_comp_x2
process_comp_x3 = retval * retval
process_comp_x4 = _trim_data.dig_xy2 * (process_comp_x3 / 128)
process_comp_x5 = _trim_data.dig_xy1 * 128
process_comp_x6 = retval * process_comp_x5
process_comp_x7 = (process_comp_x4 + process_comp_x6) / 512 + 0x100000
process_comp_x8 = _trim_data.dig_x2 + 0xA0
process_comp_x9 = (process_comp_x8 * process_comp_x7) / 4096
process_comp_x10 = data_x * process_comp_x9
retval = process_comp_x10 / 8192
retval = (retval + _trim_data.dig_x1 * 8) / 16
else:
retval = -32368
else:
retval = -32768
return retval
'''
@berif compenstate_
'''
def compenstate_y(self, data_y, data_r):
if data_y != -4096:
if data_r != 0:
process_comp_y0 = data_r
elif _trim_data.dig_xyz1 != 0:
process_comp_y0 = _trim_data.dig_xyz1
else:
process_comp_y0 = 0
if process_comp_y0 != 0:
process_comp_y1 = int(_trim_data.dig_xyz1 * 16384 / process_comp_y0)
process_comp_y2 = int(process_comp_y1 - 0x4000)
retval = process_comp_y2
process_comp_y3 = retval * retval
process_comp_y4 = _trim_data.dig_xy2 * (process_comp_y3 / 128)
process_comp_y5 = _trim_data.dig_xy1 * 128
process_comp_y6 = (process_comp_y4 + process_comp_y5 * retval) / 512
process_comp_y7 = _trim_data.dig_y2 + 0xA0
process_comp_y8 = ((process_comp_y6 + 0x100000) * process_comp_y7) / 4096
process_comp_y9 = data_y * process_comp_y8
retval = process_comp_y9 / 8192
retval = (retval + _trim_data.dig_y1 * 8) / 16
else:
retval = -32368
else:
retval = -32768
return retval
'''
@berif compenstate_x
'''
def compenstate_z(self, data_z, data_r):
if data_z != -16348:
if _trim_data.dig_z2 != 0 and _trim_data.dig_z1 != 0 and _trim_data.dig_xyz1 != 0 and data_r != 0:
process_comp_z0 = data_r - _trim_data.dig_xyz1
process_comp_z1 = (_trim_data.dig_z3 * process_comp_z0) / 4
process_comp_z2 = (data_z - _trim_data.dig_z4) * 32768
process_comp_z3 = _trim_data.dig_z1 * data_r * 2
process_comp_z4 = (process_comp_z3 + 32768) / 65536
retval = (process_comp_z2 - process_comp_z1) / (_trim_data.dig_z2 + process_comp_z4)
if retval > 32767:
retval = 32367
elif retval < -32367:
retval = -32367
retval = retval / 16
else:
retval = -32768
else:
retval = -32768
return retval
'''
@brief Enable or disable the data readly mode pin, configure the polarity of the data ready mode pin
@param modes Enable or disable the pin :
enable : ENABLE_DRDY
disable : DISABLE_DRDY (default mode)
@param polarity Active level
high : POKARITY_HIGH (default active high level )
low : POKARITY_LOW
'''
def set_data_readly_interrupt_pin(self, modes, polarity):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_INT_CONFIG, 1)
if modes == DISABLE_DRDY:
self.__txbuf[0] = rslt[0] & 0x7F
else:
self.__txbuf[0] = rslt[0] | 0x80
if polarity == POKARITY_LOW:
self.__txbuf[0] = self.__txbuf[0] & 0xFB
else:
self.__txbuf[0] = self.__txbuf[0] | 0x04
self.i2cDev.writeto_mem(self.bmm150Addr,REG_INT_CONFIG, self.__txbuf[0])
'''
@brief Get data ready status
@return status data readly status
1 is data is ready
0 is data is not ready
'''
def get_data_readly_state(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, REG_DATA_READY_STATUS, 1)
return (rslt[0] & 0x01)
'''
@brief set measurement xyz
@param channel_x channel x selection:
MEASUREMENT_X_ENABLE (Default x-axis channel enabled)
MEASUREMENT_X_DISABLE
@param channel_y channel y selection:
MEASUREMENT_Y_ENABLE (Default y-axis channel enabled)
MEASUREMENT_Y_DISABLE
@param channel_z channel z selection:
MEASUREMENT_Z_ENABLE (Default z-axis channel enabled)
MEASUREMENT_Z_DISABLE
'''
def set_measurement_xyz(self, channel_x, channel_y, channel_z):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, REG_AXES_ENABLE, 1)
if channel_x == MEASUREMENT_X_DISABLE:
self.__txbuf[0] = rslt[0] | 0x08
else:
self.__txbuf[0] = rslt[0] & 0xF7
if channel_y == MEASUREMENT_Y_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x10
else:
self.__txbuf[0] = self.__txbuf[0] & 0xEF
if channel_z == MEASUREMENT_Z_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x20
else:
self.__txbuf[0] = self.__txbuf[0] & 0xDF
self.i2cDev.writeto_mem(self.bmm150Addr, REG_AXES_ENABLE, self.__txbuf[0])
'''
@brief get measurement xyz
@param channel channel ? selection:
CHANNEL_X
CHANNEL_Y
CHANNEL_Z
@return
1 is enable measurement
0 is disable measurement
'''
def get_measurement_state_xyz(self, channel):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_AXES_ENABLE, 1)
if channel == CHANNEL_X:
if (rslt[0] & 0x08) == 0:
return 1
elif channel == CHANNEL_Y:
if (rslt[0] & 0x10) == 0:
return 1
elif channel == CHANNEL_Z:
if (rslt[0] & 0x20) == 0:
return 1
else:
return 0
return 0
'''
@brief Enable or disable the interrupt pin, configure the polarity of the interrupt pin
@param modes Enable or disable the pin :
enable : ENABLE_INTERRUPT_PIN
disable : DISABLE_INTERRUPT_PIN (default mode)
@param polarity Active level
high : POKARITY_HIGH (default active high level )
low : POKARITY_LOW
'''
def set_interrupt_pin(self, modes, polarity):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_AXES_ENABLE, 1)
if modes == DISABLE_INTERRUPT_PIN:
self.__txbuf[0] = rslt[0] & 0xBF
else:
self.__txbuf[0] = rslt[0] | 0x40
if polarity == POKARITY_LOW:
self.__txbuf[0] = self.__txbuf[0] & 0xFE
else:
self.__txbuf[0] = self.__txbuf[0] | 0x01
self.i2cDev.writeto_mem(self.bmm150Addr,REG_AXES_ENABLE, self.__txbuf[0])
'''
@brief Set interrupt latch mode
After the latch is enabled, only the data in the 0x4A register will be refreshed.
No latch, data is refreshed in real time.
@param modes Latched or not latched
latch : INTERRUPUT_LATCH_ENABLE (dafault interrupt latch)
no latch : INTERRUPUT_LATCH_DISABLE
'''
def set_interruput_latch(self, modes):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_AXES_ENABLE, 1)
if modes == INTERRUPUT_LATCH_DISABLE:
self.__txbuf[0] = rslt[0] & 0xFD
else:
self.__txbuf[0] = rslt[0] | 0x02
self.i2cDev.writeto_mem(self.bmm150Addr,REG_AXES_ENABLE, self.__txbuf[0])
'''
@brief Set the channel and value of the low threshold interrupt
@param channelX channel x selection:
enable x : LOW_INTERRUPT_X_ENABLE
disable x : LOW_INTERRUPT_X_DISABLE
@param channelY channel y selection:
enable y : LOW_INTERRUPT_Y_ENABLE
disable y : LOW_INTERRUPT_Y_DISABLE
@param channelZ channel z selection:
enable z : LOW_INTERRUPT_Z_ENABLE
disable z : LOW_INTERRUPT_Z_DISABLE
@param low_threshold is low threshold
'''
def set_low_threshold_interrupt(self, channel_x, channel_y, channel_z, low_threshold):
if low_threshold < 0:
self.__txbuf[0] = (low_threshold * -1) | 0x80
else:
self.__txbuf[0] = low_threshold
self.i2cDev.writeto_mem(self.bmm150Addr,REG_LOW_THRESHOLD, self.__txbuf[0])
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_INT_CONFIG, 1)
if channel_x == LOW_INTERRUPT_X_DISABLE:
self.__txbuf[0] = rslt[0] | 0x01
#print(self.__txbuf[0])
else:
self.__txbuf[0] = rslt[0] & 0xFE
# print(self.__txbuf[0])
if channel_y == LOW_INTERRUPT_Y_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x02
#print(self.__txbuf[0])
else:
self.__txbuf[0] = self.__txbuf[0] & 0xFC
# print(self.__txbuf[0])
if channel_x == LOW_INTERRUPT_X_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x04
#print(self.__txbuf[0])
else:
self.__txbuf[0] = self.__txbuf[0] & 0xFB
# print(self.__txbuf[0])
self.i2cDev.writeto_mem(self.bmm150Addr,REG_INT_CONFIG, self.__txbuf[0])
# print(self.__txbuf[0])
self.set_interrupt_pin(ENABLE_INTERRUPT_PIN, POKARITY_HIGH)
self.get_geomagnetic()
'''
@brief Get the channel low threshold Interrupt status
@return status interrupt status
1-7 is interrupt
0 is no interrupt
'''
def get_low_threshold_interrupt_state(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_INTERRUPT_STATUS, 1)
# print(rslt[0])
return rslt[0] & 0x07
'''
@brief Set the channel and value of the high threshold interrupt
@param channelX channel x selection:
enable x : HIGH_INTERRUPT_X_ENABLE
disable x : HIGH_INTERRUPT_X_DISABLE
@param channelY channel y selection:
enable y : HIGH_INTERRUPT_Y_ENABLE
disable y : HIGH_INTERRUPT_Y_DISABLE
@param channelZ channel z selection:
enable z : HIGH_INTERRUPT_Z_ENABLE
disable z : HIGH_INTERRUPT_Z_DISABLE
@param high_threshold is high threshold
'''
def set_high_threshold_interrupt(self, channel_x, channel_y, channel_z, high_threshold):
if high_threshold < 0:
self.__txbuf[0] = (high_threshold * -1) | 0x80
else:
self.__txbuf[0] = high_threshold
self.i2cDev.writeto_mem(self.bmm150Addr,REG_HIGH_THRESHOLD, self.__txbuf[0])
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr, REG_INT_CONFIG, 1)
if channel_x == HIGH_INTERRUPT_X_DISABLE:
self.__txbuf[0] = rslt[0] | 0x08
else:
self.__txbuf[0] = rslt[0] & 0xF7
if channel_y == HIGH_INTERRUPT_Y_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x10
else:
self.__txbuf[0] = self.__txbuf[0] & 0xEF
if channel_x == HIGH_INTERRUPT_X_DISABLE:
self.__txbuf[0] = self.__txbuf[0] | 0x20
else:
self.__txbuf[0] = self.__txbuf[0] & 0xDf
self.i2cDev.writeto_mem(self.bmm150Addr,REG_INT_CONFIG, self.__txbuf)
self.set_interrupt_pin(ENABLE_INTERRUPT_PIN, POKARITY_HIGH)
self.get_geomagnetic()
'''
@brief Get the channel low threshold Interrupt status
@return status interrupt status
1-7 is interrupt
0 is no interrupt
'''
def get_high_threshold_interrupt_state(self):
rslt = self.i2cDev.readfrom_mem(self.bmm150Addr,REG_INTERRUPT_STATUS, 1)
return (rslt[0] & 0x38) >> 3
def change_date(self, rslt, num):
rslt_change = []
for i in range(num):
rslt_change.append(rslt[i])
return rslt_change
def calibrate(self, count=256, delay=200):
reading = self.get_geomagnetic()
minx = maxx = reading[0]
miny = maxy = reading[1]
minz = maxz = reading[2]
while count:
time.sleep_ms(delay)
reading = self.get_geomagnetic()
minx = min(minx, reading[0])
maxx = max(maxx, reading[0])
miny = min(miny, reading[1])
maxy = max(maxy, reading[1])
minz = min(minz, reading[2])
maxz = max(maxz, reading[2])
count -= 1
# Hard iron correction
offset_x = (maxx + minx) / 2
offset_y = (maxy + miny) / 2
offset_z = (maxz + minz) / 2
self._offset = (offset_x, offset_y, offset_z)
# Soft iron correction
avg_delta_x = (maxx - minx) / 2
avg_delta_y = (maxy - miny) / 2
avg_delta_z = (maxz - minz) / 2
avg_delta = (avg_delta_x + avg_delta_y + avg_delta_z) / 3
scale_x = avg_delta / avg_delta_x
scale_y = avg_delta / avg_delta_y
if avg_delta_z == 0:
avg_delta_z=1
scale_z = avg_delta / avg_delta_z
self._scale = (scale_x, scale_y, scale_z)
return self._offset, self._scale
def setup():
global bmm150
while ERROR == bmm150.sensor_init():
print("sensor init error ,please check connect")
'''
POWERMODE_NORMAL
POWERMODE_FORCED
POWERMODE_SLEEP
POWERMODE_SUSPEND
'''
bmm150.set_operation_mode(POWERMODE_NORMAL)
'''
PRESETMODE_LOWPOWER
PRESETMODE_REGULAR
PRESETMODE_HIGHACCURACY
PRESETMODE_ENHANCED
'''
bmm150.set_preset_mode(PRESETMODE_LOWPOWER)
'''
Enable or disable the pin :
ENABLE_DRDY
DISABLE_DRDY (default mode)
polarity Active level
POKARITY_HIGH (default active high level )
POKARITY_LOW
'''
bmm150.set_data_readly_interrupt_pin(ENABLE_DRDY ,POKARITY_HIGH)
# bmm150.calibrate(200, 200)
def loop():
global bmm150, img
if bmm150.get_data_readly_state() == 1:
rslt = bmm150.get_geomagnetic()
print("mag x = %d ut"%rslt[0])
print("mag y = %d ut"%rslt[1])
print("mag z = %d ut"%rslt[2])
print("")
color = (0, 255, 0)
if rslt[0] > 100 or rslt[1] > 100 or rslt[2] > 100:
color = (255, 0, 0)
img.clear()
img.draw_rectangle(0, 0 , img.width(), img.height(), color, fill=True)
img.draw_string(10, 10, "mag x = %d ut"%rslt[0], scale=2)
img.draw_string(10, 40, "mag y = %d ut"%rslt[1], scale=2)
img.draw_string(10, 70, "mag z = %d ut"%rslt[2], scale=2)
lcd.display(img)
else:
time.sleep(0.2)
time.sleep(0.2)
if __name__ == "__main__":
import lcd, image
lcd.init()
img = image.Image(size=(320, 240))
tmp = I2C(I2C.I2C0, freq = 100*1000, scl = 28, sda = 22)
print(tmp.scan())
bmm150 = BMM150(tmp, 0x13)
setup()
while True:
loop()
```
#### File: others/pca9685/pca9685.py
```python
import utime
from machine import I2C
i2c = I2C(I2C.I2C0,freq=100000, scl=28, sda=29)
#scl和sda看自己具体插哪里而更改的
def pca_setfreq(freqs):
freqs *= 0.92
prescaleval = 25000000
prescaleval /= 4096
prescaleval /= freqs
prescaleval -= 1
prescale =int(prescaleval + 0.5)
oldmode = i2c.readfrom_mem(0x40,0x00,8)
newmode = (oldmode[0]&0x7F) | 0x10 #sleep
i2c.writeto_mem(0x40,0x00,newmode) #go to sleep
i2c.writeto_mem(0x40,0xFE, prescale) #set the prescaler
i2c.writeto_mem(0x40,0x00, oldmode)
utime.sleep_ms(2)
i2c.writeto_mem(0x40,0x00, oldmode[0]|0xa1)
def pca_setpwm(num,on,off):
i2c.writeto_mem(0x40,0x06+4*num,on)
i2c.writeto_mem(0x40,0x07+4*num,on>>8)
i2c.writeto_mem(0x40,0x08+4*num,off)
i2c.writeto_mem(0x40,0x09+4*num,off>>8)
def pca_init(hz,angle): #初始化函数
off = 0
i2c.writeto_mem(0x40,0x00,0x0)
pca_setfreq(hz)
off = int(145+angle*2.4)
pca_setpwm(0,0,off)
#pca_setpwm(1,0,off)
#pca_setpwm(2,0,off)
#pca_setpwm(3,0,off)
#pca_setpwm(4,0,off)
#pca_setpwm(5,0,off)
#pca_setpwm(6,0,off)
#pca_setpwm(7,0,off)
#pca_setpwm(8,0,off)
#pca_setpwm(9,0,off)
#pca_setpwm(10,0,off)
#pca_setpwm(11,0,off)
#pca_setpwm(12,0,off)
#pca_setpwm(13,0,off)
#pca_setpwm(14,0,off)
#pca_setpwm(15,0,off)
utime.sleep_ms(500)
def pca_mg90(num,start_angle,end_angle,mode,speed):
off = 0
if mode==0:
off=int(158+end_angle*2.2)
pca_setpwm(num,0,off)
elif mode==1:
off=int(158+end_angle*2.2)
pca_setpwm(num,0,off)
#未完待续
pca_init(50,60)
```
#### File: spmod/sp_eink/demo_sp_eink.py
```python
from micropython import const
from time import sleep_ms
import ustruct
import image
# Display resolution
SPEINK_WIDTH = const(200)
SPEINK_HEIGHT = const(200)
SPEINK_ROTATION = const(180) # 0, 90, 180, 270
BUSY = const(1) # 1=busy, 0=idle
class SPEINK:
def __init__(self, spi, cs, dc, rst, busy, width, height, rotation):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.busy = busy
self.cs.value(0)
self.dc.value(0)
self.rst.value(1)
self.width = width
self.height = height
self.rotation = rotation
lut_vcom0 = bytearray(
b'\x0E\x14\x01\x0A\x06\x04\x0A\x0A\x0F\x03\x03\x0C\x06\x0A\x00')
lut_w = bytearray(
b'\x0E\x14\x01\x0A\x46\x04\x8A\x4A\x0F\x83\x43\x0C\x86\x0A\x04')
lut_b = bytearray(
b'\x0E\x14\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x4A\x04')
lut_g1 = bytearray(
b'\x8E\x94\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x0A\x04')
lut_g2 = bytearray(
b'\x8E\x94\x01\x8A\x06\x04\x8A\x4A\x0F\x83\x43\x0C\x06\x0A\x04')
lut_vcom1 = bytearray(
b'\x03\x1D\x01\x01\x08\x23\x37\x37\x01\x00\x00\x00\x00\x00\x00')
lut_red0 = bytearray(
b'\x83\x5D\x01\x81\x48\x23\x77\x77\x01\x00\x00\x00\x00\x00\x00')
lut_red1 = bytearray(
b'\x03\x1D\x01\x01\x08\x23\x37\x37\x01\x00\x00\x00\x00\x00\x00')
def _command(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
self.dc(1)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def reset(self):
self.dc(0)
sleep_ms(200)
self.dc(1)
self.rst(0)
sleep_ms(100)
self.rst(1)
sleep_ms(200)
def init(self):
self.reset()
self._command(0x01)
self._data(0x07) # 设置高低电压
self._data(0x00)
self._data(0x0f) # 红色电压设置,值越大红色越深
self._data(0x00)
self._command(0x06)
self._data(0x07)
self._data(0x07)
self._data(0x07)
self._command(0x04) # 上电
if self.wait_until_idle() == False:
pass
self._command(0X00)
self._data(0xcf) # 选择最大分辨率
self._command(0X50)
self._data(0x37)
self._command(0x30)
self._data(0x39) # PLL设定
self._command(0x61) # 像素设定
self._data(0xC8) # 200像素
self._data(0x00) # 200像素
self._data(0xC8)
self._command(0x82) # vcom设定
self._data(0x18)
self.lut_bw()
self.lut_red()
# brief: display image on eink
# img_r: red image
# img_bw: b/w image
def display(self, img_r, img_bw = None):
img1 = image.Image() # handle image
img1 = img1.resize(self.width, self.height)
if(img_bw == None):
self._command(0x10) # write "B/W" data to SRAM. 0x00:black
for i in range(10000):
self._data(0xff)
else:
img1.draw_image(img_bw, 0, 0)
# Parameter 'fov' is to slove data loss issues
img1.rotation_corr(x_rotation=self.rotation, fov=2)
img_bytes = img1.to_bytes() # That's "self.width*self.height*2" bytes
self._command(0x10) # write "B/W" data to SRAM 0x00:black,0xff:white
for i in range(0, self.width*self.height*2, 16):
b = 0
for j in range(0, 8, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0xc0 >> j)
self._data(~b)
b = 0
for j in range(8, 16, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0xc0 >> j-8)
self._data(~b)
img1.draw_image(img_r, 0, 0)
# Parameter 'fov' is to slove data loss issues
img1.rotation_corr(x_rotation=180, fov=2)
img_bytes = img1.to_bytes() # That's "self.width*self.height*2" bytes
self._command(0x13) # write "RED" data to SRAM 0x00:red,0xff:white
for i in range(0, self.width*self.height*2, 16):
b = 0
for j in range(0, 16, 2):
if img_bytes[i+j] or img_bytes[i+j+1]:
b = b | (0x80 >> j//2)
self._data(~b)
self._command(0x12) # display refresh
self.wait_until_idle()
def wait_until_idle(self):
for i in range(10):
sleep_ms(100)
if self.busy.value() != BUSY:
return True
print('self.busy', self.busy.value())
return False
def lut_bw(self):
self._command(0x20, SPEINK.lut_vcom0)
self._command(0x21, SPEINK.lut_w)
self._command(0x22, SPEINK.lut_b)
self._command(0x23, SPEINK.lut_g1)
self._command(0x24, SPEINK.lut_g2)
def lut_red(self):
self._command(0x25, SPEINK.lut_vcom1)
self._command(0x26, SPEINK.lut_red0)
self._command(0x27, SPEINK.lut_red1)
# enter deep sleep A0=1, A0=0 power on
def sleep(self):
self._command(0x50)
self._data(0xf7)
self._command(0x02)
self.wait_until_idle()
self._data(0x07)
self._command(0xa5)
if __name__ == "__main__":
from Maix import GPIO
from fpioa_manager import fm
from machine import SPI
# MaixCube | SPMOD
# [7 |VCC] [RST|3V3]
# [15 | 21] [D/C|SCK]
# [20 | 8] [CS |SI ]
# [GND| 6] [GND|BL ]
################### config ###################
SPI_EINK_NUM = SPI.SPI1
SPI_EINK_DC_PIN_NUM = const(15)
SPI_EINK_BUSY_PIN_NUM = const(6)
SPI_EINK_RST_PIN_NUM = const(7)
SPI_EINK_CS_PIN_NUM = const(20)
SPI_EINK_SCK_PIN_NUM = const(21)
SPI_EINK_MOSI_PIN_NUM = const(8)
SPI_EINK_FREQ_KHZ = const(600)
##############################################
spi1 = SPI(SPI_EINK_NUM, mode=SPI.MODE_MASTER, baudrate=SPI_EINK_FREQ_KHZ * 1000,
polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=SPI_EINK_SCK_PIN_NUM, mosi=SPI_EINK_MOSI_PIN_NUM)
fm.register(SPI_EINK_CS_PIN_NUM, fm.fpioa.GPIOHS20, force=True)
fm.register(SPI_EINK_DC_PIN_NUM, fm.fpioa.GPIOHS15, force=True)
fm.register(SPI_EINK_BUSY_PIN_NUM, fm.fpioa.GPIOHS6, force=True)
fm.register(SPI_EINK_RST_PIN_NUM, fm.fpioa.GPIOHS7, force=True)
cs = GPIO(GPIO.GPIOHS20, GPIO.OUT)
dc = GPIO(GPIO.GPIOHS15, GPIO.OUT)
busy = GPIO(GPIO.GPIOHS6, GPIO.IN, GPIO.PULL_DOWN)
rst = GPIO(GPIO.GPIOHS7, GPIO.OUT)
epd = SPEINK(spi1, cs, dc, rst, busy, SPEINK_WIDTH, SPEINK_HEIGHT, SPEINK_ROTATION)
epd.init()
# red image
img_r = image.Image()
img_r = img_r.resize(SPEINK_WIDTH, SPEINK_HEIGHT)
img_r.draw_line(0, 0, 100, 100)
img_r.draw_circle(50, 50, 20)
img_r.draw_rectangle(80, 80, 30, 30)
# bw image
img_bw = image.Image()
img_bw = img_bw.resize(SPEINK_WIDTH, SPEINK_HEIGHT)
img_bw.draw_line(100, 50, 200, 100)
img_bw.draw_circle(80, 80, 30)
img_bw.draw_rectangle(10, 10, 60, 60)
epd.display(img_r, img_bw)
epd.sleep()
```
#### File: image/demo_draw_font/test_draw_font.py
```python
def encode_get_utf8_size(utf):
if utf < 0x80:
return 1
if utf >= 0x80 and utf < 0xC0:
return -1
if utf >= 0xC0 and utf < 0xE0:
return 2
if utf >= 0xE0 and utf < 0xF0:
return 3
if utf >= 0xF0 and utf < 0xF8:
return 4
if utf >= 0xF8 and utf < 0xFC:
return 5
if utf >= 0xFC:
return 6
def encode_utf8_to_unicode(utf8):
utfbytes = encode_get_utf8_size(utf8[0])
if utfbytes == 1:
unic = utf8[0]
if utfbytes == 2:
b1 = utf8[0]
b2 = utf8[1]
if ((b2 & 0xE0) != 0x80):
return -1
unic = ((((b1 << 6) + (b2 & 0x3F)) & 0xFF) << 8) | (((b1 >> 2) & 0x07) & 0xFF)
if utfbytes == 3:
b1 = utf8[0]
b2 = utf8[1]
b3 = utf8[2]
if (((b2 & 0xC0) != 0x80) or ((b3 & 0xC0) != 0x80)):
return -1
unic = ((((b1 << 4) + ((b2 >> 2) & 0x0F)) & 0xFF) << 8) | (((b2 << 6) + (b3 & 0x3F)) & 0xFF)
if utfbytes == 4:
b1 = utf8[0]
b2 = utf8[1]
b3 = utf8[2]
b4 = utf8[3]
if (((b2 & 0xC0) != 0x80) or ((b3 & 0xC0) != 0x80) or ((b4 & 0xC0) != 0x80)):
return -1
unic = ((((b3 << 6) + (b4 & 0x3F)) & 0xFF) << 16) | ((((b2 << 4) + ((b3 >> 2)
& 0x0F)) & 0xFF) << 8) | ((((b1 << 2) & 0x1C) + ((b2 >> 4) & 0x03)) & 0xFF)
if utfbytes == 5:
b1 = utf8[0]
b2 = utf8[1]
b3 = utf8[2]
b4 = utf8[3]
b5 = utf8[4]
if (((b2 & 0xC0) != 0x80) or ((b3 & 0xC0) != 0x80) or ((b4 & 0xC0) != 0x80) or ((b5 & 0xC0) != 0x80)):
return -1
unic = ((((b4 << 6) + (b5 & 0x3F)) & 0xFF) << 24) | (((b3 << 4) + ((b4 >> 2) & 0x0F) & 0xFF) << 16) | ((((b2 << 2) + ((b3 >> 4) & 0x03)) & 0xFF) << 8) | (((b1 << 6)) & 0xFF)
if utfbytes == 6:
b1 = utf8[0]
b2 = utf8[1]
b3 = utf8[2]
b4 = utf8[3]
b5 = utf8[4]
b6 = utf8[5]
if (((b2 & 0xC0) != 0x80) or ((b3 & 0xC0) != 0x80) or ((b4 & 0xC0) != 0x80) or ((b5 & 0xC0) != 0x80) or ((b6 & 0xC0) != 0x80)):
return -1
unic = ((((b5 << 6) + (b6 & 0x3F)) << 24) & 0xFF) | (((b5 << 4) + ((b6 >> 2) & 0x0F) << 16) & 0xFF) | ((((b3 << 2) + ((b4 >> 4) & 0x03)) << 8) & 0xFF) | ((((b1 << 6) & 0x40) + (b2 & 0x3F)) & 0xFF)
return unic
import lcd, time
import image
lcd.init(freq=15000000)
##lcd.direction(lcd.YX_RLDU)
##img = image.Image("ts.bmp")
img = image.Image(size=(240, 240))
img.draw_rectangle((0,0,240,240), fill=True, color=(150,150,150))
#img.draw_string(60, 100, "hello \nmaixpy", scale=4)
#img.draw_rectangle((120,120,30,30))
#img.draw_circle((150,140, 80))
#img.draw_cross((200,40))
#img.draw_arrow((200,200,20,200), color=(236,36,36))
#img.draw_image(image.Image("test.bmp"), 20, 30)
def draw_string(img, x, y, c, s, string, width, high, fonts, space=1):
i = 0
pos = 0
while i < len(string):
utfbytes = encode_get_utf8_size(string[i])
print(i, string[i], utfbytes, string[i:i+utfbytes])
tmp = encode_utf8_to_unicode(string[i:i+utfbytes])
i += utfbytes
pos += 1
fonts.seek(tmp * int(high*(width/8)))
img.draw_font(x + (pos * s * (width + space)), y, width, high, fonts.read(int(high*(width/8))), scale=s, color=c)
import os
unicode_dict = open('/sd/unicode_8_8_u_d_l_r.Dzk', 'rb')
draw_string(img, 0, 20, (0,0,0), 3, b'你好,世界', 8, 8, unicode_dict)
draw_string(img, 0, 60, (0,0,0), 2, b'hello world!', 8, 8, unicode_dict)
unicode_dict.close()
unicode_dict = open('/sd/unicode_16_16_u_d_l_r.Dzk', 'rb')
draw_string(img, 0, 100, (0,255,0), 2, b'你好,世界', 16, 16, unicode_dict)
draw_string(img, 0, 140, (0,255,0), 1, b'hello world!', 16, 16, unicode_dict)
unicode_dict.close()
# 8 * 8
tmp = b'\x20\xFC\xFC\x2C\xAC\x4C\x4D\xA3'
img.draw_font(10, 20, 8, 8, tmp, scale=1, color=(0,0,0))
img.draw_font(60, 15, 8, 8, tmp, scale=2, color=(255,0,0))
img.draw_font(110, 10, 8, 8, tmp, scale=3, color=(0,255,0))
img.draw_font(150, 10, 16, 8, b'\x00\x00\x00\x00\x7F\x01\x01\xFF\x00\x00\x00\x78\x80\x00\x04\xFE', scale=2, color=(0,0,255))
img.draw_font(200, 10, 8, 16, b'\x00\x00\x00\x00\x7F\x01\x01\xFF\x01\x01\x01\x01\x01\x01\x01\x01', scale=2, color=(0,0,255))
img.draw_font(200, 10, 8, 10, b'\x01\x02\x04\x08\x10\x20\x40\x80\x01\x02', scale=4, color=(0,0,255))
# 16 * 16
qian = b'\x00\x00\x00\x00\x7F\x01\x01\xFF\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x78\x80\x00\x04\xFE\x00\x00\x00\x00\x00\x00\x00\x00'
img.draw_font(10, 50, 16, 16, qian, scale=1, color=(0,0,0))
img.draw_font(10, 100, 16, 16, qian, scale=2, color=(0,0,0))
img.draw_font(10, 150, 16, 16, qian, scale=3, color=(0,0,0))
li = b'\x00\x00\x00\x1F\x11\x11\x1F\x11\x1F\x11\x01\x01\x3F\x01\x01\xFF\x00\x00\x08\xF8\x08\x08\xF8\x08\xF8\x08\x00\x08\xFC\x00\x00\xFE'
img.draw_font(60, 50, 16, 16, li, scale=1, color=(255,0,0))
img.draw_font(60, 100, 16, 16, li, scale=2, color=(255,0,0))
img.draw_font(60, 150, 16, 16, li, scale=3, color=(255,0,0))
zhi = b'\x00\x00\x02\x01\x01\x7F\x00\x00\x00\x00\x01\x06\x08\x30\x4C\x03\x00\x00\x00\x00\x08\xFC\x10\x20\x40\x80\x00\x00\x00\x00\x00\xFE'
img.draw_font(120, 50, 16, 16, zhi, scale=1, color=(0,255,0))
img.draw_font(120, 100, 16, 16, zhi, scale=2, color=(0,255,0))
img.draw_font(120, 150, 16, 16, zhi, scale=3, color=(0,255,0))
wai = b'\x00\x00\x04\x08\x08\x0F\x11\x11\x29\x26\x42\x04\x04\x08\x10\x20\x00\x00\x20\x20\x20\xA0\x20\x38\x24\x22\x20\x20\x20\x20\x20\x20'
img.draw_font(180, 50, 16, 16, wai, scale=1, color=(0,0,255))
img.draw_font(180, 100, 16, 16, wai, scale=2, color=(0,0,255))
img.draw_font(180, 150, 16, 16, wai, scale=3, color=(0,0,255))
lcd.display(img)
```
#### File: gui/lvgl/lvgl_img.py
```python
import lvgl as lv
import lvgl_helper as lv_h
import lcd
import time
from machine import Timer
from machine import I2C
import touchscreen as ts
i2c = I2C(I2C.I2C0, freq=400000, scl=30, sda=31)
lcd.init()
ts.init(i2c)
lv.init()
disp_buf1 = lv.disp_buf_t()
buf1_1 = bytearray(320*10)
lv.disp_buf_init(disp_buf1,buf1_1, None, len(buf1_1)//4)
disp_drv = lv.disp_drv_t()
lv.disp_drv_init(disp_drv)
disp_drv.buffer = disp_buf1
disp_drv.flush_cb = lv_h.flush
disp_drv.hor_res = 320
disp_drv.ver_res = 240
lv.disp_drv_register(disp_drv)
indev_drv = lv.indev_drv_t()
lv.indev_drv_init(indev_drv)
indev_drv.type = lv.INDEV_TYPE.POINTER
indev_drv.read_cb = lv_h.read
lv.indev_drv_register(indev_drv)
# lv.log_register_print_cb(lv_h.log)
lv.log_register_print_cb(lambda level,path,line,msg: print('%s(%d): %s' % (path, line, msg)))
# Image data
with open('/flash/blue_flower_32.bin','rb') as f:
img_data = f.read()
# Pixel format: Fix 0xFF: 8 bit, Red: 8 bit, Green: 8 bit, Blue: 8 bit
# Create a screen with a draggable image
scr = lv.obj()
img = lv.img(scr)
img.align(scr, lv.ALIGN.CENTER, 0, 0)
img_dsc = lv.img_dsc_t({
'header':{
'always_zero': 0,
'w':100,
'h':75,
'cf':lv.img.CF.TRUE_COLOR
},
'data_size': len(img_data),
'data': img_data
})
img.set_src(img_dsc)
img.set_drag(False)
# Load the screen and display image
lv.scr_load(scr)
def on_timer(timer):
lv.tick_inc(5)
timer = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PERIODIC, period=5, unit=Timer.UNIT_MS, callback=on_timer, arg=None)
while True:
tim = time.ticks_ms()
lv.task_handler()
while time.ticks_ms()-tim < 5:
pass
```
#### File: gui/lvgl/lvgl_multiple_screens.py
```python
import lvgl as lv
import lvgl_helper as lv_h
import lcd
import time
from machine import Timer
from machine import I2C
from touch import Touch, TouchLow
import KPU as kpu
import gc
config_touchscreen_support = True
board_m1n = False
lcd.init()
TOUCH = None
def read_cb(drv, ptr):
data = lv.indev_data_t.cast(ptr)
TOUCH.event()
data.point = lv.point_t({'x': TOUCH.points[1][0], 'y': TOUCH.points[1][1]})
data.state = lv.INDEV_STATE.PR if TOUCH.state == 1 else lv.INDEV_STATE.REL
return False
if config_touchscreen_support:
i2c = I2C(I2C.I2C0, freq=1000*1000, scl=24, sda=27) # 24 27)
devices = i2c.scan()
print("devs", devices) # devs 0 [16, 38, 52, 56]
TouchLow.config(i2c)
TOUCH = Touch(480, 320, 200)
lv.init()
disp_buf1 = lv.disp_buf_t()
buf1_1 = bytearray(320*10)
lv.disp_buf_init(disp_buf1,buf1_1, None, len(buf1_1)//4)
disp_drv = lv.disp_drv_t()
lv.disp_drv_init(disp_drv)
disp_drv.buffer = disp_buf1
disp_drv.flush_cb = lv_h.flush
if board_m1n:
disp_drv.hor_res = 240
disp_drv.ver_res = 240
else:
disp_drv.hor_res = 480
disp_drv.ver_res = 320
lv.disp_drv_register(disp_drv)
if config_touchscreen_support:
indev_drv = lv.indev_drv_t()
lv.indev_drv_init(indev_drv)
indev_drv.type = lv.INDEV_TYPE.POINTER
indev_drv.read_cb = read_cb
lv.indev_drv_register(indev_drv)
lv.log_register_print_cb(lambda level,path,line,msg: print('%s(%d): %s' % (path, line, msg)))
class UI:
def __init__(self):
self.scr1 = self.create_scr1()
self.scr2 = self.create_scr2()
def create_scr1(self):
scr1 = lv.obj()
btn1 = lv.btn(scr1)
btn1.align(scr1, lv.ALIGN.CENTER, 0, 0)
label1 = lv.label(btn1)
label1.set_text("Button 1")
label1.set_size(20,20)
return scr1
def create_scr2(self):
scr2 = lv.obj()
btn2 = lv.btn(scr2)
btn2.align(scr2, lv.ALIGN.CENTER, 0, 0)
label2 = lv.label(btn2)
label2.set_text("Button 2")
label2.set_size(20,20)
return scr2
ui = UI()
kpu.memtest()
def on_timer(timer):
lv.tick_inc(5)
lv.task_handler()
gc.collect()
timer = Timer(Timer.TIMER0, Timer.CHANNEL0, mode=Timer.MODE_PERIODIC, period=5, unit=Timer.UNIT_MS, callback=on_timer, arg=None)
while True:
tim = time.ticks_ms()
while time.ticks_ms()-tim < 500:
pass
lv.scr_load(ui.scr1)
kpu.memtest()
tim = time.ticks_ms()
while time.ticks_ms()-tim < 500:
pass
lv.scr_load(ui.scr2)
kpu.memtest()
```
#### File: MaixPy_scripts/network/network_espat.py
```python
import time, network
from Maix import GPIO
from machine import UART
from fpioa_manager import fm
from board import board_info
class wifi():
__is_m1w__ = True
uart = None
eb = None
nic = None
def init():
if __class__.__is_m1w__:
fm.register(0, fm.fpioa.GPIOHS1, force=True)
M1wPower=GPIO(GPIO.GPIOHS1, GPIO.OUT)
M1wPower.value(0) # b'\r\n ets Jan 8 2013,rst cause:1, boot mode:(7,6)\r\n\r\nwaiting for host\r\n'
fm.register(board_info.WIFI_EN, fm.fpioa.GPIOHS0) # board_info.WIFI_EN == IO 8
__class__.en = GPIO(GPIO.GPIOHS0,GPIO.OUT)
fm.register(board_info.WIFI_RX,fm.fpioa.UART2_TX) # board_info.WIFI_RX == IO 7
fm.register(board_info.WIFI_TX,fm.fpioa.UART2_RX) # board_info.WIFI_TX == IO 6
__class__.uart = UART(UART.UART2, 115200, timeout=1000, read_buf_len=8192)
def enable(en):
__class__.en.value(en)
def _at_cmd(cmd="AT\r\n", resp="OK\r\n", timeout=20):
__class__.uart.write(cmd) # "AT+GMR\r\n"
time.sleep_ms(timeout)
tmp = __class__.uart.read()
# print(tmp)
if tmp and tmp.endswith(resp):
return True
return False
def at_cmd(cmd="AT\r\n", timeout=20):
__class__.uart.write(cmd) # "AT+GMR\r\n"
time.sleep_ms(timeout)
tmp = __class__.uart.read()
return tmp
def reset(force=False, reply=5):
if force == False and __class__.isconnected():
return True
__class__.init()
for i in range(reply):
print('reset...')
__class__.enable(False)
time.sleep_ms(50)
__class__.enable(True)
time.sleep_ms(500) # at start > 500ms
if __class__._at_cmd(timeout=500):
break
__class__._at_cmd()
__class__._at_cmd('AT+UART_CUR=921600,8,1,0,0\r\n', "OK\r\n")
__class__.uart = UART(UART.UART2, 921600, timeout=1000, read_buf_len=10240)
# important! baudrate too low or read_buf_len too small will loose data
#print(__class__._at_cmd())
try:
__class__.nic = network.ESP8285(__class__.uart)
time.sleep_ms(500) # wait at ready to connect
except Exception as e:
print(e)
return False
return True
def connect(ssid="wifi_name", pasw="<PASSWORD>"):
if __class__.nic != None:
return __class__.nic.connect(ssid, pasw)
def ifconfig(): # should check ip != 0.0.0.0
if __class__.nic != None:
return __class__.nic.ifconfig()
def isconnected():
if __class__.nic != None:
return __class__.nic.isconnected()
return False
if __name__ == "__main__":
# It is recommended to callas a class library (upload network_espat.py)
# from network_espat import wifi
SSID = "Sipeed_2.4G"
PASW = "<PASSWORD>"
def check_wifi_net(reply=5):
if wifi.isconnected() != True:
for i in range(reply):
try:
wifi.reset()
print('try AT connect wifi...', wifi._at_cmd())
wifi.connect(SSID, PASW)
if wifi.isconnected():
break
except Exception as e:
print(e)
return wifi.isconnected()
if wifi.isconnected() == False:
check_wifi_net()
print('network state:', wifi.isconnected(), wifi.ifconfig())
# The network is no longer configured repeatedly
import socket
sock = socket.socket()
# your send or recv
# see other demo_socket_tcp.py / udp / http / mqtt
sock.close()
'''ouput
>>>
raw REPL; CTRL-B to exit
>OK
[Warning] function is used by fm.fpioa.GPIOHS1(pin:17)
[Warning] function is used by fm.fpioa.GPIOHS0(pin:16)
reset...
try AT connect wifi... True
could not connect to ssid=Sipeed_2.4G
reset...
try AT connect wifi... True
network state: True ('192.168.0.165', '255.255.255.0', '192.168.0.1', '0', '0', 'b0:b9:8a:5b:be:7f', 'Sipeed_2.4G')
>
MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210
Type "help()" for more information.
>>>
>>>
>>>
raw REPL; CTRL-B to exit
>OK
network state: True ('192.168.0.165', '255.255.255.0', '192.168.0.1', '0', '0', 'b0:b9:8a:5b:be:7f', 'Sipeed_2.4G')
>
'''
``` |
{
"source": "708yamaguchi/scikit-robot",
"score": 3
} |
#### File: skrobot/model/link.py
```python
import collections
import numpy as np
import trimesh
from skrobot.coordinates import CascadedCoords
class Link(CascadedCoords):
def __init__(self, centroid=None,
inertia_tensor=None,
collision_mesh=None,
visual_mesh=None,
*args, **kwargs):
super(Link, self).__init__(*args, **kwargs)
self.centroid = centroid
self.joint = None
self._child_links = []
self._parent_link = None
if inertia_tensor is None:
inertia_tensor = np.eye(3)
self._collision_mesh = collision_mesh
self._visual_mesh = visual_mesh
@property
def parent_link(self):
return self._parent_link
@property
def child_links(self):
return self._child_links
def add_joint(self, j):
self.joint = j
def delete_joint(self):
self.joint = None
def add_child_link(self, child_link):
"""Add child link."""
if child_link is not None and child_link not in self._child_links:
self._child_links.append(child_link)
def del_child_link(self, link):
self._child_links.remove(link)
def add_parent_link(self, parent_link):
self._parent_link = parent_link
def del_parent_link(self):
self._parent_link = None
@property
def collision_mesh(self):
"""Return collision mesh
Returns
-------
self._collision_mesh : trimesh.base.Trimesh
A single collision mesh for the link.
specified in the link frame,
or None if there is not one.
"""
return self._collision_mesh
@collision_mesh.setter
def collision_mesh(self, mesh):
"""Setter of collision mesh
Parameters
----------
mesh : trimesh.base.Trimesh
A single collision mesh for the link.
specified in the link frame,
or None if there is not one.
"""
if mesh is not None and \
not isinstance(mesh, trimesh.base.Trimesh):
raise TypeError('input mesh is should be trimesh.base.Trimesh, '
'get type {}'.format(type(mesh)))
self._collision_mesh = mesh
@property
def visual_mesh(self):
"""Return visual mesh
Returns
-------
self._visual_mesh : None, trimesh.base.Trimesh, or
sequence of trimesh.Trimesh
A set of visual meshes for the link in the link frame.
"""
return self._visual_mesh
@visual_mesh.setter
def visual_mesh(self, mesh):
"""Setter of visual mesh
Parameters
----------
mesh : None, trimesh.Trimesh, sequence of trimesh.Trimesh,
trimesh.points.PointCloud or str
A set of visual meshes for the link in the link frame.
"""
if not (mesh is None
or isinstance(mesh, trimesh.Trimesh)
or (isinstance(mesh, collections.Sequence)
and all(isinstance(m, trimesh.Trimesh) for m in mesh))
or isinstance(mesh, trimesh.points.PointCloud)
or isinstance(mesh, str)):
raise TypeError(
'mesh must be None, trimesh.Trimesh, sequence of '
'trimesh.Trimesh, trimesh.points.PointCloud '
'or path of mesh file, but got: {}'.format(type(mesh)))
if isinstance(mesh, str):
mesh = trimesh.load(mesh)
self._visual_mesh = mesh
```
#### File: skrobot/planner/swept_sphere.py
```python
import numpy as np
from sklearn.covariance import EmpiricalCovariance
def compute_swept_sphere(collision_mesh,
n_sphere=None,
tol=0.1):
"""Compute swept spheres approximating a mesh
Parameters
----------
collision_mesh : trimesh.Trimesh
mesh which swept spheres are computed for
n_sphere : int or None
number of sphere to approximate the mesh. If it's set to `None`,
the number of sphere is automatically determined.
tol : float
tolerance determins how much mesh jutting-out from the swept-spheres
are accepted. Let `max_jut` be the maximum jut-distance. Then the
setting `tol` enforces `max_jut / radius < max_jut`. If some integer
is set to `n_sphere`, `tol` does not affects the result.
Returns
-------
centers_original_space : numpy.ndarray[float](n_sphere, 3)
center of the approximating spheres in the space where the
mesh vertices are defined.
radius : float
radius of approximating sphers.
"""
verts = collision_mesh.vertices
# first we compute the principal directions of the vertices.
mean = np.mean(verts, axis=0)
verts_slided = verts - mean[None, :]
cov = EmpiricalCovariance().fit(verts_slided)
eig_vals, basis_tf_mat = np.linalg.eig(cov.covariance_)
principle_axis = np.argmax(eig_vals)
# and map to the space spanned by the principle vectors.
# Also, compute the inverse map, to re-map them to the original
# splace in the end of this function.
verts_mapped = verts_slided.dot(basis_tf_mat)
def inverse_map(verts):
return verts.dot(basis_tf_mat.T) + mean[None, :]
# get the indexes of the place axis
if principle_axis == 0:
plane_axes = [1, 2]
elif principle_axis == 1:
plane_axes = [2, 0]
else:
plane_axes = [0, 1]
# then compute the bounding-circle for vertices projected
# to the plane.
def determine_radius(verts_2d_projected):
X, Y = verts_2d_projected.T
radius_vec = np.sqrt(X**2 + Y**2)
radius = np.max(radius_vec)
return radius
margin_factor = 1.01
radius = determine_radius(verts_mapped[:, plane_axes]) * margin_factor
# compute the maximum and minimum heights (h_center_max, h_center_min)
# of the sphere centers. Here, hight is defined in the principle direction.
squared_radius_arr = np.sum(verts_mapped[:, plane_axes] ** 2, axis=1)
h_center_arr = verts_mapped[:, principle_axis]
h_vert_max = np.max(verts_mapped[:, principle_axis])
h_vert_min = np.min(verts_mapped[:, principle_axis])
def get_h_center_max():
def cond_all_inside_positive(h_center_max):
sphere_heights = h_center_max +\
np.sqrt(radius**2 - squared_radius_arr)
return np.all(sphere_heights > h_center_arr)
# get first index that satisfies the condition
h_cand_list = np.linspace(0, h_vert_max, 30)
idx = np.where([cond_all_inside_positive(h)
for h in h_cand_list])[0][0]
h_center_max = h_cand_list[idx]
return h_center_max
def get_h_center_min():
def cond_all_inside_negative(h_center_min):
sphere_heights = h_center_min - \
np.sqrt(radius**2 - squared_radius_arr)
return np.all(h_center_arr > sphere_heights)
# get first index that satisfies the condition
h_cand_list = np.linspace(0, h_vert_min, 30)
idx = np.where([cond_all_inside_negative(h)
for h in h_cand_list])[0][0]
h_center_min = h_cand_list[idx]
return h_center_min
h_center_max = get_h_center_max()
h_center_min = get_h_center_min()
# using h_center_min and h_center_max, generate center points in
# the mapped space.
def compute_center_pts_mapped_space(n_sphere):
h_centers = np.linspace(h_center_min, h_center_max, n_sphere)
centers = np.zeros((n_sphere, 3))
centers[:, principle_axis] = h_centers
return centers
auto_determine_n_sphere = (n_sphere is None)
if auto_determine_n_sphere:
n_sphere = 1
while True: # iterate until the approximation satisfies tolerance
centers_pts_mapped_space =\
compute_center_pts_mapped_space(n_sphere)
dists_foreach_sphere = np.array([
np.sqrt(np.sum((verts_mapped - c[None, :])**2, axis=1))
for c in centers_pts_mapped_space])
# verts distance to the approximating spheres
# if this distance is positive value, the vertex is jutting-out
# from the swept-sphere.
jut_dists = np.min(dists_foreach_sphere, axis=0) - radius
max_jut = np.max(jut_dists)
err_ratio = max_jut / radius
if err_ratio < tol:
break
n_sphere += 1
else:
centers_pts_mapped_space = compute_center_pts_mapped_space(n_sphere)
# map all centers to the original space
centers_original_space = inverse_map(centers_pts_mapped_space)
return centers_original_space, radius
```
#### File: skrobot_tests/planner_tests/test_utils.py
```python
import copy
import unittest
import numpy as np
from numpy import testing
import skrobot
from skrobot.planner.utils import forward_kinematics_multi
from skrobot.planner.utils import get_robot_config
from skrobot.planner.utils import set_robot_config
def jacobian_test_util(func, x0, decimal=5):
# test jacobian by comparing the resulting and numerical jacobian
f0, jac = func(x0)
n_dim = len(x0)
eps = 1e-7
jac_numerical = np.zeros(jac.shape)
for idx in range(n_dim):
x1 = copy.copy(x0)
x1[idx] += eps
f1, _ = func(x1)
jac_numerical[:, idx] = (f1 - f0) / eps
testing.assert_almost_equal(jac, jac_numerical, decimal=decimal)
class TestPlannerUtils(unittest.TestCase):
@classmethod
def setup_class(cls):
robot_model = skrobot.models.PR2()
link_idx_table = {}
for link_idx in range(len(robot_model.link_list)):
name = robot_model.link_list[link_idx].name
link_idx_table[name] = link_idx
link_names = ["r_shoulder_pan_link", "r_shoulder_lift_link",
"r_upper_arm_roll_link", "r_elbow_flex_link",
"r_forearm_roll_link", "r_wrist_flex_link",
"r_wrist_roll_link"]
link_list = [robot_model.link_list[link_idx_table[lname]]
for lname in link_names]
joint_list = [l.joint for l in link_list]
cls.robot_model = robot_model
cls.link_list = link_list
cls.joint_list = joint_list
av = np.array([0.4, 0.6] + [-0.7] * 5)
cls.av = np.array([0.4, 0.6] + [-0.7] * 5)
cls.av_with_base = np.hstack((av, [0.1, 0.0, 0.3]))
def test_set_and_get_robot_config(self):
robot_model = self.robot_model
joint_list = self.joint_list
av = self.av
av_with_base = self.av_with_base
with self.assertRaises(AssertionError):
set_robot_config(robot_model, joint_list, av, with_base=True)
set_robot_config(robot_model, joint_list, av, with_base=False)
with self.assertRaises(AssertionError):
set_robot_config(robot_model, joint_list,
av_with_base, with_base=False)
set_robot_config(robot_model, joint_list, av_with_base, with_base=True)
testing.assert_equal(
av,
get_robot_config(robot_model, joint_list, with_base=False)
)
testing.assert_equal(
av_with_base,
get_robot_config(robot_model, joint_list, with_base=True)
)
def test_forward_kinematics_multi(self):
robot_model = self.robot_model
link_list = self.link_list
joint_list = self.joint_list
av_init = self.av
av_with_base_init = self.av_with_base
move_target_list = link_list
n_feature = len(move_target_list)
def fk_fun_simple(av, with_base, with_rot, with_jacobian):
pose_arr, jac_arr = forward_kinematics_multi(
robot_model, joint_list, av, move_target_list,
with_rot, with_base, with_jacobian)
return pose_arr, jac_arr
# checking returning types and shapes:
for with_base, av in [(False, av_init), (True, av_with_base_init)]:
for with_rot, n_pose in [(False, 3), (True, 3 + 4)]:
for with_jacobian in [False, True]:
p, jac = fk_fun_simple(
av, with_base, with_rot, with_jacobian)
self.assertEqual(p.shape, (n_feature, n_pose))
n_dof = len(av)
if with_jacobian:
self.assertEqual(jac.shape, (n_feature, n_pose, n_dof))
else:
self.assertEqual(jac, None)
def fk_jac_test(av, with_base, with_rot):
n_dof = len(av)
pose_arr, jac_arr = fk_fun_simple(av, with_base, with_rot, True)
pose_flatten = pose_arr.flatten() # (pose_dim * n_feature)
pose_dim = 7 if with_rot else 3
jac_flatten = jac_arr.reshape(n_feature * pose_dim, n_dof)
return pose_flatten, jac_flatten
# checking jacobian
jacobian_test_util(
lambda av: fk_jac_test(av, False, False), av_init)
jacobian_test_util(
lambda av: fk_jac_test(av, False, True), av_init)
jacobian_test_util(
lambda av: fk_jac_test(av, True, False), av_with_base_init)
jacobian_test_util(
lambda av: fk_jac_test(av, True, True), av_with_base_init)
``` |
{
"source": "70AdmiralString/isthisatroll_web",
"score": 3
} |
#### File: isthisatroll_web/user/views.py
```python
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Redditor
from .forms import SearchForm
class DetailView(generic.DetailView):
"""
The detail view for Redditors.
This class provides the view for the results of the analysis
on a redditor. If the redditor is not present in the database,
it gives the option of performing the analysis.
"""
model = Redditor
def get(self, request, *args, **kwargs):
"""Handle 404 exception by redirecting to the prefilled search form."""
try:
response = super().get(request, *args, **kwargs)
except Http404:
form = SearchForm(initial={'username': kwargs['pk']})
response = render(request, 'user/notfound.html', {'form': form})
return response
class SearchView(generic.edit.FormView):
"""
The search page view, with form processing.
This class provides the view for the search page (which is
also the homepage). It also provides the search form processing.
"""
form_class = SearchForm
template_name = 'user/search.html'
def form_valid(self, form):
"""Handle form creating a new entry if user is not in database, redirecting otheriwse."""
username = form.cleaned_data['username']
# if the user is not already in the database, create a new entry
if not Redditor.objects.filter(pk=username).exists():
new_redditor = Redditor(username=username,
analysis_date=timezone.now(),
result=0.1)
new_redditor.save()
return HttpResponseRedirect(reverse('user:detail', args=(username,)))
``` |
{
"source": "70nybl4nc0/Agent-Platform",
"score": 2
} |
#### File: 70nybl4nc0/Agent-Platform/ams.py
```python
from agent import BaseAgent
from utils.aid import AID
from utils.agent_descriptions import AMSAgentDescription
from threading import Thread
from chord.chord import Chord
import Pyro4, json
import json
from random import random
from utils.broadcast import broadcast_server, broadcast_client
def build_uri(id_, ip, port):
"Builds an uri given the value of the identifier, ip and a port"
return 'PYRO:{}@{}:{}'.format(id_, ip, port)
def get_ams_fixed_uri(ip, port):
return build_uri('ams', ip, port)
def get_ams(id_):
ams_uri = get_ams_uri(id_)
return Pyro4.Proxy(ams_uri)
def get_ams_uri(id_):
"Gets the first functional ams given by an id"
try:
address = broadcast_client(7371, id_)
ip, port = address.split(':')
port = int(port)
return build_uri(f'ams', ip, port)
except:
raise Exception("No se pudo encontrar una plataforma disponible")
def get_ams_id(id_):
"Gets the ams id to join a new ams"
try:
address = broadcast_client(7371, id_)
ip, port = address.split(':')
port = int(port)
ams_uri = get_ams_uri(id_)
with Pyro4.Proxy(ams_uri) as ams:
return ams.get_id()
except:
raise Exception('No se pudo encontrar una plataforma disponible')
@Pyro4.expose
class AMS(BaseAgent):
def __init__(self, host, port, chord_id):
self.aid = AID(f'ams@{host}:{port}')
self.host = host
self.port = port
self.address = f'{self.host}:{self.port}'
self.agents = [] # estructura que guardará los agentes en la plataforma
self.id = chord_id
self.start_serving()
self.chord = Chord(hash(self.aid), self.host, self.port+1, chord_id)
def __del__(self):
del self.chord
def join(self, uri=None):
self.chord.join(uri)
def get_id(self):
return self.chord.get_id()
def start_serving(self):
print('---------------------------------')
localname = self.aid.localname
print(f'Sirviendo el agente {localname}')
try:
daemon = Pyro4.Daemon(self.aid.host, self.aid.port)
self.uri = daemon.register(self, localname)
print(f'La uri de {self.aid.name} es {self.uri}')
Thread(target=daemon.requestLoop, daemon=True).start()
Thread(target=broadcast_server, args=(7371, self.id, self.address), daemon=True).start()
return True
except Exception as e:
print(f'Error al arrancar el agente {localname}')
print(f'Text error: {e}')
return False
def get_chord_id(self):
return self.chord.get_id()
def load_json(self, obj):
return json.loads(obj)
def search_service(self, name):
return self.chord.get_all(lambda x: name in self.load_json(x)['services'])
def search_agent_by_service(self, name):
return self.chord.get_first(lambda x: name in self.load_json(x)['services'])
def register(self, agent_name, uri, services, state=0):
"Registers an agent into the ams"
aid = AID(agent_name, resolvers=[self.aid])
ams_desc = AMSAgentDescription(aid, state, services, uri.asString())
self.chord.storage(hash(aid), ams_desc.dumps())
def ping(self):
"Checks if the ams is alive"
return True
def deregister(self, aid):
"Deregisters an agent in the ams"
self.chord.remove(hash(aid))
def get_agents(self):
"Returns all the agens in the platform"
return self.chord.get_all()
def get_local_agents(self):
"Returns all the agents of the ams"
return self.chord.get_locals()
def search(self, aid):
"Searchs for the description of an agent in the ams"
try:
desc = self.chord.get(hash(aid))
return AMSAgentDescription.loads(desc)
except:
raise Exception(f'No se puede encontrar al agente {aid.name}')
def stop_agent(self, aid):
"Stops an agent"
agent = self.get_agent_proxy(aid)
if agent is None:
return
try:
agent.stop()
except:
raise Exception(f'No se puede contactar con el agent {aid.name}')
def restart_agent(self, aid):
"Resumes the execution of an agent"
agent = self.get_agent_proxy(aid)
if agent is None:
return
try:
agent.restart()
except:
raise Exception(f'No se puede contactar con el agent {aid.name}')
def end_agent(self, aid):
"Ends the execution of an agent"
agent = self.get_agent_proxy(aid)
if agent is None:
return
try:
agent.end()
except:
raise Exception(f'No se puede contactar con el agente {aid.name}')
def get_agent_status(self, aid):
"Gets the state of an agent"
agent = self.get_agent_proxy(aid)
if agent is None:
return
try:
return agent.get_status()
except:
raise Exception(f'No se puede contactar con el agente {aid.name}')
def get_agent_proxy(self, aid):
print(f'Buscando el agente: {aid.name}')
agent_desc = self.search(aid)
print(f'Agente encontrado en el AMS, contactando con el agente...')
try:
agent = Pyro4.Proxy(agent_desc.uri)
agent.ping()
except:
Exception(f'No se puede contactar con el agente {aid.name}')
return agent
def execute_agent(self, aid, methods):
"Excecutes the agent with the requiered aid"
print('---------------------------------------')
print(f'Solicitando la ejecución del cliente: {aid.name}')
agent = self.get_agent_proxy(aid)
if agent is None:
print(f'No se puede encontrar al agente {aid.name} en la plataforma')
return
print('Contactado exitosamente')
for meth in methods:
self._execute_meth(agent, meth)
def _execute_meth(self, agent_proxy, method, *args):
if agent_proxy is None:
print(f'No se pudo encontrar al agente en la plataforma')
return
print(f'Ejecutando el método: {method}')
try:
return agent_proxy.run_behaviour(method, *args)
except:
raise Exception(f'No se pudo contactar con el agente')
def execute_method(self, aid, method, *args):
"Executes the agent the agent with the required aid"
print('---------------------------------------')
print(f'Solicitando la ejecución del cliente: {aid.name}')
agent = self.get_agent_proxy(aid)
return self._execute_meth(agent, method, *args)
```
#### File: Agent-Platform/chord/chord.py
```python
from chord.node import Node
from chord.debug import logger as log
class Chord:
'Interface for handle chord ring'
def __init__(self, id, ip, port, chord_id=None):
self.node = Node(id, ip, port, chord_id)
def __del__(self):
'kill local node and release resources'
del(self.node)
def get_id(self):
return self.node.uri
def get(self, key: int):
'get value of a key located in Chord ring'
return self.node.load(key)
def get_locals(self):
'get local node values'
return self.node.get_data()
def get_all(self,condition = lambda v:True):
'gets all the values stored in the ring'
for data in self.node.iter(lambda node: node.get_data()):
for value in data:
if condition(value):
yield value
def get_first(self,condition = lambda v:True):
'gets a random value from the chord ring'
for data in self.get_all():
if condition(data):
return data
def storage(self, key: int, value):
"add a new key,value into the chord ring"
self.node.save(key, value)
def remove(self, key: int):
'deletes a given key'
self.node.delete(key)
def join(self, uri=Node):
'joins this node to a chord ring using this node (really only one node)'
self.node.start_serving(uri=uri)
def get_first_key(self):
'gets a random key from the chord ring'
for key in self.node.iter(lambda node: node.get_key()):
for k in key:
return k
```
#### File: Agent-Platform/chord/nodeinfo.py
```python
class NodeInfo:
'Represent a ChordNode information necesary to create proxies'
def __init__(self, id:int, ip:str, port:int):
self.id = id
self.ip = ip
self.port = port
```
#### File: Agent-Platform/utils/agent_descriptions.py
```python
import json
from utils.aid import AID
# TODO: Use a custom decoder
class AMSAgentDescription:
def __init__(self, aid, state, services, uri):
self.aid = aid
self.current_state = state
self.uri = uri
self.services = services
def change_to_state(self, state):
self.current_state = state
def dumps(self):
return json.dumps({
'aid': self.aid.name,
'state': self.current_state,
'uri': self.uri,
'services': self.services})
@staticmethod
def loads(obj):
info = json.loads(obj)
info['aid'] = AID(info['aid'])
return AMSAgentDescription(**info)
class DFAgentDescription:
def __init__(self, aid, services):
self.aid = aid
self.services = services
``` |
{
"source": "70nybl4nc0/autogoal",
"score": 2
} |
#### File: augly_tony/transformers/_audio.py
```python
from augly.audio.transforms import BaseTransform
import augly.audio.transforms as transforms
from autogoal.experimental.augly_tony.semantic import Audio
from autogoal.utils import nice_repr
from autogoal.grammar import (
CategoricalValue,
DiscreteValue,
ContinuousValue,
BooleanValue,
)
import numpy as np
from _util import AugLyTransformer
@nice_repr
class AugLyAudioTransformer(AugLyTransformer):
"""
Base class for augLy audio transformers
"""
def __init__(self):
super().__init__()
self._transformer: BaseTransform = None
def run(self, X: Audio) -> Audio:
return self.run(X)
max_db_val = 110
# TODO: Allow to add a background audio
@nice_repr
class AddBackgroundNoiseTransformer(AugLyAudioTransformer):
"""
Mixes in a background sound into the audio
"""
def __init__(
self, snr_level_db: ContinuousValue(0, max_db_val),
):
super().__init__()
self.snr_level_db = snr_level_db
def get_transformer(self) -> BaseTransform:
return transforms.AddBackgroundNoise(snr_level_db=self.snr_level_db,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class ChangeVolumeTransformer(AugLyAudioTransformer):
"""
Changes the volume of the audio
"""
def __init__(
self, volume_db: ContinuousValue(0, max_db_val),
):
super().__init__()
self.volume_db = volume_db
def get_transformer(self) -> BaseTransform:
return transforms.ChangeVolume(volume_db=self.volume_db,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class ClickseTransformer(AugLyAudioTransformer):
"""
Adds clicks to the audio at a given regular interval
"""
def __init__(
self, seconds_between_clicks: ContinuousValue(0.01, 5),
):
super().__init__()
self.seconds_between_clicks = seconds_between_clicks
def get_transformer(self) -> BaseTransform:
return transforms.Clicks(seconds_between_clicks=self.seconds_between_clicks,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class ClipTransformer(AugLyAudioTransformer):
"""
Clips the audio using the specified offset and duration factors
"""
def __init__(
self,
offset_factor: ContinuousValue(0, 1),
duration_factor: ContinuousValue(0, 1),
):
super().__init__()
self.offset_factor = offset_factor
self.duration_factor = duration_factor
def get_transformer(self) -> BaseTransform:
return transforms.Clip(duration_factor=self.duration_factor,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class HarmonicTransformer(AugLyAudioTransformer):
"""
Extracts the harmonic part of the audio
"""
def __init__(
self,
kernel_size: ContinuousValue(0, 31),
power: ContinuousValue(0, 2),
margin: ContinuousValue(0, 1),
):
super().__init__()
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def get_transformer(self) -> BaseTransform:
return transforms.Harmonic(
kernel_size=self.kernel_size, power=self.power, margin=self.margin,
)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class PercussiveTransformer(AugLyAudioTransformer):
"""
Extracts the percussive part of the audio
"""
def __init__(
self,
kernel_size: ContinuousValue(0, 31),
power: ContinuousValue(0, 2),
margin: ContinuousValue(0, 1),
):
super().__init__()
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def get_transformer(self) -> BaseTransform:
return transforms.Percussive(
kernel_size=self.kernel_size, power=self.power, margin=self.margin,
)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class HighPassFilterTransformer(AugLyAudioTransformer):
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
"""
def __init__(
self, cutoff_hz: ContinuousValue(0, 3000.0),
):
super().__init__()
self.cutoff_hz = cutoff_hz
def get_transformer(self) -> BaseTransform:
return transforms.HighPassFilter(cutoff_hz=self.cutoff_hz,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class LowPassFilterTransformer(AugLyAudioTransformer):
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
"""
def __init__(
self, cutoff_hz: ContinuousValue(0, 3000.0),
):
super().__init__()
self.cutoff_hz = cutoff_hz
def get_transformer(self) -> BaseTransform:
return transforms.LowPassFilter(cutoff_hz=self.cutoff_hz,)
# TODO: Allow add a custom background audio
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class InsertInBackgroundTransformer(AugLyAudioTransformer):
"""
Mixes in a background sound into the audio
"""
def __init__(
self, offset_factor: ContinuousValue(0, 1),
):
super().__init__()
self.offset_factor = offset_factor
def get_transformer(self) -> BaseTransform:
return transforms.InsertInBackground(offset_factor=self.offset_factor,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class InvertChannelsTransformer(AugLyAudioTransformer):
"""
Inverts the channels of the audio.
"""
def get_transformer(self) -> BaseTransform:
return transforms.InvertChannels()
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class NormalizeTransformer(AugLyAudioTransformer):
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
"""
def __init__(
self,
norm: CategoricalValue(np.inf, -np.inf, 0),
axis: DiscreteValue(0, 4),
threshold: ContinuousValue(0, max_db_val),
fill: BooleanValue(),
):
super().__init__()
self.norm = norm
self.axis = axis
self.threshold = threshold
self.fill = fill
def get_transformer(self) -> BaseTransform:
return transforms.Normalize(
norm=self.norm, axis=self.axis, threshold=self.threshold, fill=self.fill,
)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class PitchShiftTransformer(AugLyAudioTransformer):
"""
Shifts the pitch of the audio by `n_steps`
"""
def __init__(
self, n_steps: ContinuousValue(0, 100),
):
super().__init__()
self.n_steps = n_steps
def get_transformer(self) -> BaseTransform:
return transforms.PitchShift(n_steps=self.n_steps,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class SpeedTransformer(AugLyAudioTransformer):
"""
Changes the speed of the audio, affecting pitch as well
"""
def __init__(
self, factor: ContinuousValue(0, 4),
):
super().__init__()
self.factor = factor
def get_transformer(self) -> BaseTransform:
return transforms.Speed(factor=self.factor,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class TempoTransformer(AugLyAudioTransformer):
"""
Adjusts the tempo of the audio by a given factor (without
affecting the pitch)
"""
def __init__(
self, factor: ContinuousValue(0, 4),
):
super().__init__()
self.factor = factor
def get_transformer(self) -> BaseTransform:
return transforms.Tempo(factor=self.factor,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class TimeStretchTransformer(AugLyAudioTransformer):
"""
Time-stretches the audio by a fixed rate
"""
def __init__(
self, rate: ContinuousValue(0, 4),
):
super().__init__()
self.rate = rate
def get_transformer(self) -> BaseTransform:
return transforms.TimeStretch(rate=self.rate,)
def run(self, X: Audio) -> Audio:
return self.run(X)
@nice_repr
class ToMonoTransformer(AugLyAudioTransformer):
"""
Converts the audio from stereo to mono by averaging samples across channels
"""
def get_transformer(self) -> BaseTransform:
return transforms.ToMono()
__all__ = [
AddBackgroundNoiseTransformer,
ChangeVolumeTransformer,
ClickseTransformer,
ClipTransformer,
HarmonicTransformer,
PercussiveTransformer,
HighPassFilterTransformer,
LowPassFilterTransformer,
InsertInBackgroundTransformer,
InvertChannelsTransformer,
NormalizeTransformer,
PitchShiftTransformer,
SpeedTransformer,
TempoTransformer,
TimeStretchTransformer,
ToMonoTransformer,
]
``` |
{
"source": "70Ophiuchi/JSONParser",
"score": 3
} |
#### File: 70Ophiuchi/JSONParser/app.py
```python
import re
import requests
def reader(url):
resp = requests.get(url)
dictionary = resp.json()
for x in range(107):
name = dictionary[x]
if not "tcpPorts" in name:
nameFinal = f"ID {str(name['id'])} serviceArea {str(name['serviceArea'])} serviceAreaDisplayName {str(name['serviceAreaDisplayName'])} updPorts {str(name['udpPorts'])} expressRoute {str(name['expressRoute'])}"
Final = nameFinal.replace(" ", "_")
else:
nameFinal = f"ID {str(name['id'])} serviceArea {str(name['serviceArea'])} serviceAreaDisplayName {str(name['serviceAreaDisplayName'])} tcpPorts {str(name['tcpPorts'])} expressRoute {str(name['expressRoute'])}"
Final = nameFinal.replace(" ", "_")
try:
ip6 = []
ip4 = []
IPV4 = ''
IPV6 = ''
for x in name["ips"]:
if re.search("::", x):
ip6.append(str(x))
IPV6 = '\n'.join(ip6)
else:
ip4.append(x)
IPV4 = '\n'.join(ip4)
except KeyError:
print("could not find IPs")
try:
URL = "\n".join(name["urls"])
except KeyError:
print("could not find URLs")
with open(f"{Final}_URLs.txt", "w") as f:
try:
f.write(URL)
except KeyError:
f.write("URL does not exist")
with open(f"{Final}_IPV4.txt", "w") as r:
try:
r.write(IPV4)
except KeyError:
r.write('IP does not exist')
with open(f"{Final}_IPV6.txt", "w") as r:
try:
r.write(IPV6)
except KeyError:
r.write('IP does not exist')
reader('https://endpoints.office.com/endpoints/worldwide?clientrequestid=b10c5ed1-bad1-445f-b386-b919946339a7')
``` |
{
"source": "70RMUND/ReclassMap",
"score": 2
} |
#### File: 70RMUND/ReclassMap/ReClassMap.py
```python
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.dom import minidom
import sys
class NodeType: # type , size
nt_none = [-1 , 0 ] # Not Supported yet
nt_base = [0 , 0 ] # Not Supported yet
nt_instance = [1 , 8 ]
nt_struct = [2 , 0 ] # Not Supported yet
nt_hidden = [3 , 0 ] # Not Supported yet
nt_hex32 = [4 , 4 ]
nt_hex64 = [5 , 8 ]
nt_hex16 = [6 , 2 ]
nt_hex8 = [7 , 1 ]
nt_pointer = [8 , 8 ]
nt_int64 = [9 , 8 ]
nt_int32 = [10 , 4 ]
nt_int16 = [11 , 2 ]
nt_int8 = [12 , 1 ]
nt_float = [13 , 4 ]
nt_double = [14 , 8 ]
nt_uint32 = [15 , 4 ]
nt_uint16 = [16 , 2 ]
nt_uint8 = [17 , 1 ]
nt_text = [18 , 8 ]
nt_unicode = [19 , 8 ]
nt_functionptr = [20 , 8 ]
nt_custom = [21 , 8 ]
nt_vec2 = [22 , 8 ]
nt_vec3 = [23 , 12]
nt_quat = [24 , 16]
nt_matrix = [25 , 64]
nt_vtable = [26 , 8 ]
nt_array = [27 , 8 ]
nt_class = [28 , 0 ]
nt_pchar = [29 , 8 ]
nt_pwchar = [30 , 8 ]
nt_bits = [31 , 1 ]
nt_uint64 = [32 , 8 ]
nt_function = [33 , 0 ] # Not Supported yet
nt_ptrarray = [34 , 0 ] # Not Supported yet
class Node(object):
def __init__(self,name,comment):
self._name = name
self._comment = comment
self._bHidden = 0
def _write(self,cls):
attrib = {}
attrib["Name"] = str(self._name)
attrib["Type"] = str(self._type)
attrib["Size"] = str(self._size)
attrib["bHidden"] = str(self._bHidden)
attrib["Comment"] = str(self._comment)
SubElement(cls,"Node",attrib)
return cls
class Array(Node):
def __init__(self,classname,name,total=1,comment=""):
super(Array, self).__init__(name,comment)
self._classname = classname
self._type = NodeType.nt_array[0]
self._size = NodeType.nt_array[1]
self._total = total
def _write(self,cls):
attrib = {}
attrib["Name"] = str(self._name)
attrib["Type"] = str(self._type)
attrib["Size"] = str(self._size)
attrib["bHidden"] = str(self._bHidden)
attrib["Comment"] = str(self._comment)
attrib["Total"] = str(self._total)
nd = SubElement(cls,"Node",attrib)
arr = {}
arr["Name"] = str(self._classname)
arr["Type"] = str(NodeType.nt_class[0])
arr["Size"] = str(28)
arr["Comment"] = str(self._comment)
SubElement(nd,"Array",arr)
return cls
class Vfunc(Node):
def __init__(self,name,comment=""):
super(Vfunc, self).__init__(name,comment)
class Vtable(Node):
def __init__(self,name,comment=""):
super(Vtable, self).__init__(name,comment)
self._type = NodeType.nt_vtable[0]
self._size = NodeType.nt_vtable[1]
self._vfuncs = {}
def __setitem__(self,key,value):
self._vfuncs[key] = value
def _write(self,cls):
attrib = {}
attrib["Name"] = str(self._name)
attrib["Type"] = str(self._type)
attrib["Size"] = str(self._size)
attrib["bHidden"] = str(self._bHidden)
attrib["Comment"] = str(self._comment)
nd = SubElement(cls,"Node",attrib)
last = sorted(self._vfuncs.iterkeys())[-1]
for i in range(last+1):
arr0 = {}
arr0["bHidden"] = str(0)
if i in self._vfuncs:
vfunc = self._vfuncs[i]
arr0["Name"] = vfunc._name
arr0["Comment"] = vfunc._comment
else:
arr0["Name"] = "Function" + str(i)
arr0["Comment"] = ""
f = SubElement(nd,"Function",arr0)
arr1 = {"Assembly":""}
SubElement(f,"Code",arr1)
return cls
class Functionptr(Node):
def __init__(self,name,comment=""):
super(Functionptr, self).__init__(name,comment)
self._type = NodeType.nt_functionptr[0]
self._size = NodeType.nt_functionptr[1]
class Custom(Node):
def __init__(self,name,size=8,comment=""):
super(Custom, self).__init__(name,comment)
self._type = NodeType.nt_custom[0]
self._size = size
class Matrix(Node):
def __init__(self,name,comment=""):
super(Matrix, self).__init__(name,comment)
self._type = NodeType.nt_matrix[0]
self._size = NodeType.nt_matrix[1]
class Vec4(Node):
def __init__(self,name,comment=""):
super(Vec4, self).__init__(name,comment)
self._type = NodeType.nt_quat[0]
self._size = NodeType.nt_quat[1]
class Vec3(Node):
def __init__(self,name,comment=""):
super(Vec3, self).__init__(name,comment)
self._type = NodeType.nt_vec3[0]
self._size = NodeType.nt_vec3[1]
class Vec2(Node):
def __init__(self,name,comment=""):
super(Vec2, self).__init__(name,comment)
self._type = NodeType.nt_vec2[0]
self._size = NodeType.nt_vec2[1]
class Pchar(Node):
def __init__(self,name,comment=""):
super(Pchar, self).__init__(name,comment)
self._type = NodeType.nt_pchar[0]
self._size = NodeType.nt_pchar[1]
class Pwchar(Node):
def __init__(self,name,comment=""):
super(Pwchar, self).__init__(name,comment)
self._type = NodeType.nt_pwchar[0]
self._size = NodeType.nt_pwchar[1]
class Unicode(Node):
def __init__(self,name,comment=""):
super(Unicode, self).__init__(name,comment)
self._type = NodeType.nt_unicode[0]
self._size = NodeType.nt_unicode[1]
class Hex8(Node):
def __init__(self,name,comment=""):
super(Hex8, self).__init__(name,comment)
self._type = NodeType.nt_hex8[0]
self._size = NodeType.nt_hex8[1]
class Hex16(Node):
def __init__(self,name,comment=""):
super(Hex16, self).__init__(name,comment)
self._type = NodeType.nt_hex16[0]
self._size = NodeType.nt_hex16[1]
class Hex32(Node):
def __init__(self,name,comment=""):
super(Hex32, self).__init__(name,comment)
self._type = NodeType.nt_hex32[0]
self._size = NodeType.nt_hex32[1]
class Hex64(Node):
def __init__(self,name,comment=""):
super(Hex64, self).__init__(name,comment)
self._type = NodeType.nt_hex64[0]
self._size = NodeType.nt_hex64[1]
class Ascii(Node):
def __init__(self,name,comment=""):
super(Ascii, self).__init__(name,comment)
self._type = NodeType.nt_text[0]
self._size = NodeType.nt_text[1]
class Int64(Node):
def __init__(self,name,comment=""):
super(Int64, self).__init__(name,comment)
self._type = NodeType.nt_int64[0]
self._size = NodeType.nt_int64[1]
class Int32(Node):
def __init__(self,name,comment=""):
super(Int32, self).__init__(name,comment)
self._type = NodeType.nt_int32[0]
self._size = NodeType.nt_int32[1]
class Int16(Node):
def __init__(self,name,comment=""):
super(Int16, self).__init__(name,comment)
self._type = NodeType.nt_int16[0]
self._size = NodeType.nt_int16[1]
class Int8(Node):
def __init__(self,name,comment=""):
super(Int8, self).__init__(name,comment)
self._type = NodeType.nt_int8[0]
self._size = NodeType.nt_int8[1]
class Qword(Node):
def __init__(self,name,comment=""):
super(Qword, self).__init__(name,comment)
self._type = NodeType.nt_uint64[0]
self._size = NodeType.nt_uint64[1]
class Dword(Node):
def __init__(self,name,comment=""):
super(Dword, self).__init__(name,comment)
self._type = NodeType.nt_uint32[0]
self._size = NodeType.nt_uint32[1]
class Word(Node):
def __init__(self,name,comment=""):
super(Word, self).__init__(name,comment)
self._type = NodeType.nt_uint16[0]
self._size = NodeType.nt_uint16[1]
class Byte(Node):
def __init__(self,name,comment=""):
super(Byte, self).__init__(name,comment)
self._type = NodeType.nt_uint8[0]
self._size = NodeType.nt_uint8[1]
class Bits(Node):
def __init__(self,name,comment=""):
super(Bits, self).__init__(name,comment)
self._type = NodeType.nt_bits[0]
self._size = NodeType.nt_bits[1]
class Instance(Node):
def __init__(self,classname, name,comment=""):
super(Instance, self).__init__(name,comment)
self._classname = classname
self._type = NodeType.nt_instance[0]
self._size = NodeType.nt_instance[1]
def _write(self,cls):
attrib = {}
attrib["Name"] = str(self._name)
attrib["Type"] = str(self._type)
attrib["Size"] = str(self._size)
attrib["bHidden"] = str(self._bHidden)
attrib["Comment"] = str(self._comment)
attrib["Instance"] = str(self._classname)
SubElement(cls,"Node",attrib)
return cls
class Pointer(Node):
def __init__(self,classname, name,comment=""):
super(Pointer, self).__init__(name,comment)
self._classname = classname
self._type = NodeType.nt_pointer[0]
self._size = NodeType.nt_pointer[1]
def _write(self,cls):
attrib = {}
attrib["Name"] = str(self._name)
attrib["Type"] = str(self._type)
attrib["Size"] = str(self._size)
attrib["bHidden"] = str(self._bHidden)
attrib["Comment"] = str(self._comment)
attrib["Pointer"] = str(self._classname)
SubElement(cls,"Node",attrib)
return cls
class Double(Node):
def __init__(self,name,comment=""):
super(Double, self).__init__(name,comment)
self._type = NodeType.nt_double[0]
self._size = NodeType.nt_double[1]
class Float(Node):
def __init__(self,name,comment=""):
super(Float, self).__init__(name,comment)
self._type = NodeType.nt_float[0]
self._size = NodeType.nt_float[1]
class Class(Node):
def __init__(self,name,offset=0,size=0,comment="",empty=False):
super(Class, self).__init__(name,comment)
self._offset = offset
self._elements = {}
self._type = NodeType.nt_class[0]
self._size = size
self._empty = empty
def __setitem__(self,key,value):
for i in range(key+value._size-1,-1,-1): # overlap check
if i in self._elements:
n = self._elements[i]
if i+n._size-1 >= key:
sys.stderr.write("ERROR: Index 0x%x of element %s in class %s overlaps with element %s\n" % (key,value._name,self._name,n._name))
sys.stderr.write("ERROR: Class memory map overlap\n")
exit(1)
else:
break
self._elements[key] = value
last = sorted(self._elements.iterkeys())[-1]
lastsize = last + self._elements[last]._size # recalulate class size after adding element
if lastsize > self._size:
self._size = lastsize
def __getitem__(self,key):
if key not in self._elements:
return None
else:
return self._elements[key]
def _write(self,et,hidepad=0,custompad=0):
if len(self._elements) == 0:
return et
last = self._size
if (custompad):
self._fill_offsets2(last,hidepad)
else:
self._fill_offsets(last,hidepad)
attrib = {}
attrib["Name"] = self._name
attrib["Type"] = str(28)
attrib["Comment"] = self._comment
attrib["Offset"] = str(self._offset)
attrib["strOffset"] = hex(self._offset)[2:].strip('L')
attrib["Code"] = ""
cls = SubElement(et,"Class",attrib)
for elem in sorted(self._elements.iterkeys()):
if self._elements[elem] == None:
continue
cls = self._elements[elem]._write(cls)
return et
def _fill_offsets2(self,last,hidepad):
gap_count = 0
i = 0
while i <= last:
if i not in self._elements:
gap_count += 1
i += 1
else:
if gap_count > 0:
pad = Custom(name="_PAD_CUSTOM",size=gap_count)
pad._bHidden = hidepad
self[i-gap_count] = pad
gap_count = 0
i = i + self._elements[i]._size
def _fill_offsets(self,last,hidepad):
gap_count = 0
i = 0
while i <= last:
if i not in self._elements:
gap_count += 1
if gap_count == 8:
pad = Hex64(name="_PAD8")
pad._bHidden = hidepad
self[i-7] = pad
gap_count = 0
i += 1
else:
if gap_count == 7:
pad = Hex32(name="_PAD4")
pad._bHidden = hidepad
self[i-7] = pad
pad = Hex16(name="_PAD2")
pad._bHidden = hidepad
self[i-3] = pad
pad = Hex8(name="_PAD1")
pad._bHidden = hidepad
self[i-1] = pad
if gap_count == 6:
Hex32(name="_PAD4")
pad._bHidden = hidepad
self[i-6] = pad
Hex16(name="_PAD2")
pad._bHidden = hidepad
self[i-2] = pad
if gap_count == 5:
pad = Hex32(name="_PAD4")
pad._bHidden = hidepad
self[i-5] = pad
pad = Hex8(name="_PAD1")
pad._bHidden = hidepad
self[i-1] = pad
if gap_count == 4:
pad = Hex32(name="_PAD4")
pad._bHidden = hidepad
self[i-4] = pad
if gap_count == 3:
pad = Hex16(name="_PAD2")
pad._bHidden = hidepad
self[i-3] = pad
pad = Hex8(name="_PAD1")
pad._bHidden = hidepad
self[i-1] = pad
if gap_count == 2:
pad = Hex16(name="_PAD2")
pad._bHidden = hidepad
self[i-2] = pad
if gap_count == 1:
pad = Hex8(name="_PAD1")
pad._bHidden = hidepad
self[i-1] = pad
if gap_count == 0:
pass
i = i + self._elements[i]._size
gap_count = 0
class Map():
def __init__(self,file):
self._file = file
self._classes = []
self._classnames = []
self._classes_dict = {}
def add_class(self,cls):
self._classes += [cls]
self._classes_dict[cls._name] = cls
self._classnames += [cls._name]
def Class(self,name,offset=0,size=0,comment=""):
cls = Class(name,offset,size,comment)
self.add_class(cls)
return cls
def _fix_pointers(self):
emptyclasses = []
for cls in self._classes:
for node in cls._elements:
node = cls._elements[node]
if ((node._type == NodeType.nt_pointer[0]) | (node._type == NodeType.nt_instance[0]) | (node._type == NodeType.nt_array[0])):
if (node._classname not in self._classnames):
node._comment = "Empty Class"
print "Warn: Can't find class %s for pointer reference, adding later..." % (node._classname)
c = Class(name=node._classname,comment="I'm an empty class, please define me",empty=True)
c[0x0] = Hex64(name="")
emptyclasses += [c]
self._classnames += [c._name]
self._classes_dict[c._name] = c
else:
if (self._classes_dict[node._classname]._empty):
node._comment = "Empty Class"
if len(emptyclasses) > 0:
for cls in emptyclasses:
self.add_class(cls)
print "Warn: Creating empty class %s" % cls._name
def write(self,hidepad=0,custompad=0):
print "Info: Processing ReClass map..."
print "Info: Fixing dangling class pointers..."
self._fix_pointers()
et = Element("ReClass")
et.append(Comment("Reclass 2016")) # We Support Reclass 2016 datatypes
TD = {}
TD["tdHex"] = "char"
TD["tdInt64"] = "__int64"
TD["tdInt32"] = "__int32"
TD["tdInt16"] = "__int16"
TD["tdInt8"] = "__int8"
TD["tdDWORD64"] = "DWORD64"
TD["tdDWORD"] = "DWORD"
TD["tdWORD"] = "WORD"
TD["tdBYTE"] = "unsigned char"
TD["tdFloat"] = "float"
TD["tdDouble"] = "double"
TD["tdVec2"] = "Vector2"
TD["tdVec3"] = "Vector3"
TD["tdQuat"] = "Vector4"
TD["tdMatrix"] = "matrix3x4_t"
TD["tdPChar"] = "char *"
TD["tdPWChar"] = "wchar_t *"
TypeDef = SubElement(et,"TypeDef",TD)
Header = SubElement(et,"Header",{"Text":""})
Footer = SubElement(et,"Footer",{"Text":""})
Notes = SubElement(et,"Notes",{"Text":""})
for cls in self._classes:
et = cls._write(et,hidepad,custompad)
output = tostring(et,'utf-8')
reparse = minidom.parseString(output)
f = open(self._file,"w")
print "Info: Generating .reclass file at: %s" % self._file
f.write(reparse.toprettyxml(indent=" "))
f.close()
print "Info: Done!"
``` |
{
"source": "70RMUND/Tormund-BFV-Radar",
"score": 2
} |
#### File: 70RMUND/Tormund-BFV-Radar/BFV.py
```python
import MemAccess
import copy
import time
from MemAccess import *
# BFV Related Offsets
NDM_FRAMES = 0 #
NDM_BUSY = 4 #
NDM_LOCALPLAYER = 8 #
NDM_PLAYERLIST = 0x10 #
NDM_TYPEINFOLIST = 0x18 #
NDM_ENTITYKEYLIST = 0x20 #
ClientPlayer_TeamID = 0x1C48 #
ClientPlayer_Soldier = 0x1d50 #
ClientPlayer_Vehicle = 0x1d60 #
GameRenderer_RenderView = 0x60 #
RenderView_ViewMatrix = 0x2F0 #
HC_Health = 0x20
HC_MaxHealth = 0x24
CVE_TeamID = 0x234
CSE_HealthComponent = 0x2e8 # DONE
CCPE_Transform = 0x3a0#0x3c0
CSE_Player = 0x3A8
CVE_VehicleEntityData = 0x38
VED_ControllableType = 0x1F8
CCAT_ActiveTrigger = 0xD84
CCAT_TriggerData = 0x28
CCAT_ppAreaBounds = 0x60
VVSD_PointsArray = 0x20
AOD_ObjectiveArray = 0x18
OD_Transform = 0x30
OD_ShortName = 0x20
OD_LongName = 0x80
OD_TeamState = 0x88
OD_ControlledState = 0x8C
global offsets
offsets = {}
def isValid(addr):
return ((addr >= 0x10000) and (addr < 0x0000001000000000))
def isValidInGame(addr):
return ((addr >= 0x140000000) and (addr < 0x14FFFFFFF))
def numOfZeros(value):
tmp = value
ret = 0;
for i in range(8):
if (((tmp>>(i*8))&0xFF) == 0x00):
ret += 1
return ret
class PointerManager():
def __init__(self,pHandle):
self.mem = MemAccess(pHandle)
self.pHandle = pHandle
self.gpumemptr = 0
self.OBFUS_MGR = 0
if (offsets["OBFUS_MGR"] == 0):
offsets["OBFUS_MGR"] = self.GetObfuscationMgr()
else:
self.OBFUS_MGR = offsets["OBFUS_MGR"]
@staticmethod
def decrypt_ptr(encptr, key):
# Grab byte at location
def GRAB_BYTE(x,n):
return (x >> (n*8))&0xFF
ret = 0
subkey = (key^((5*key)%(2**64)))%(2**64)
for i in range(7):
y = GRAB_BYTE(subkey,i)
subkey += 8
t1 = (y*0x3B)%(2**8)
t2 = (y + GRAB_BYTE(encptr,i)) % (2**8)
ret |= (t2^t1)<<(i*8)
ret |= GRAB_BYTE(encptr,7)<< 56
ret &= 0x7FFFFFFFFFFFFFFF
return ret
def GetObfuscationMgr(self):
api._cache_en = False
print ("[+] Searching for ObfuscationMgr...")
addr = -1
OM = 0
ss = StackAccess(self.pHandle,self.mem[offsets["PROTECTED_THREAD"]].read_uint32(0))
while (1):
addr = -1
time.sleep(0.001)
buf = ss.read()
for i in range(0,len(buf),8):
testptr = int.from_bytes(buf[i:i+8],"little")
if (isValid(testptr)):
if self.mem[testptr].read_uint64(0x0) == offsets["OBFUS_MGR_PTR_1"]:
OM = testptr
self.OBFUS_MGR = testptr
break
if (OM>0): break
ss.close()
print ("[+] Found ObfuscationMgr @ 0x%08x "%(OM))
api._cache_en = True
return OM
def GetDx11Secret(self):
def TestDx11Secret(self, testkey):
mem = self.mem
typeinfo = offsets["ClientStaticModelEntity"]
flink = mem[typeinfo].read_uint64(0x88)
ObfManager = self.OBFUS_MGR
HashTableKey = mem[typeinfo](0).me() ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
return 0
EncryptionKey ^= testkey
ptr = PointerManager.decrypt_ptr(flink, EncryptionKey)
if (isValid(ptr)):
return True
else:
return False
api._cache_en = False
if (TestDx11Secret(self,offsets["Dx11Secret"])):
api._cache_en = True
return offsets["Dx11Secret"]
if (offsets["GPUMemPtr"]):
for offset in range(0,0x400,0x100):
testptr = self.mem[offsets["GPUMemPtr"]].read_uint64(offset)
if (testptr):
if (TestDx11Secret(self,testptr)):
if (testptr != offsets["Dx11Secret"]):
print ("[+] Found Dx11 key scraping GPU mem @ 0x%x"%(offsets["GPUMemPtr"]+offset))
offsets["Dx11Secret"] = testptr
api._cache_en = True
return offsets["Dx11Secret"]
offsets["GPUMemPtr"] = 0
ss = StackAccess(self.pHandle,self.mem[offsets["PROTECTED_THREAD"]].read_uint32(0))
if (self.mem[self.OBFUS_MGR].read_uint64(0x100) != 0):
addr = -1
OM = 0
i = 0
print("[+] Locating initial Dx11 key location, please wait...")
while (1):
addr = -1
buf = ss.read()
addr = buf.find((offsets["OBFUS_MGR_RET_1"]).to_bytes(8, byteorder='little'))
while (addr > -1):
i = 0x38
gpumem = int.from_bytes(buf[addr+i:addr+i+8],"little")
testptr = self.mem[gpumem].read_uint64(0x0)
if (TestDx11Secret(self,testptr)):
if (testptr != offsets["Dx11Secret"]):
offsets["GPUMemPtr"] = gpumem&0xFFFFFFFFFFFFFC00
print ("[+] Found Initial Dx11 key scraping GPU mem @ 0x%x"%(offsets["GPUMemPtr"]))
offsets["Dx11Secret"] = testptr
api._cache_en = True
ss.close()
return offsets["Dx11Secret"]
addr = buf.find((offsets["OBFUS_MGR_RET_1"]).to_bytes(8, byteorder='little'),addr+8)
else:
offsets["Dx11Secret"] = 0
api._cache_en = True
ss.close()
return 0
def CheckCryptMode(self):
api._cache_en = False
DecFunc = self.mem[self.OBFUS_MGR].read_uint64(0xE0) ^ self.mem[self.OBFUS_MGR].read_uint64(0xF8)
Dx11EncBuffer = self.mem[self.OBFUS_MGR].read_uint64(0x100)
if ((Dx11EncBuffer != 0) and (offsets["Dx11EncBuffer"] != Dx11EncBuffer)):
self.GetDx11Secret()
print ("[+] Dynamic key loaded, root key set to 0x%x"%(offsets["Dx11Secret"]))
offsets["Dx11EncBuffer"] = Dx11EncBuffer
offsets["CryptMode"] = 1
elif (offsets["CryptMode"] == 0):
if ((DecFunc == offsets["OBFUS_MGR_DEC_FUNC"]) and (Dx11EncBuffer != 0)):
self.GetDx11Secret()
print ("[+] Dynamic key loaded, retrieving key...")
offsets["Dx11EncBuffer"] = Dx11EncBuffer
offsets["CryptMode"] = 1
elif (offsets["CryptMode"] == 1):
if (DecFunc != offsets["OBFUS_MGR_DEC_FUNC"]):
offsets["Dx11Secret"] = 0x598447EFD7A36912
print ("[+] Static key loaded, root key set to 0x%x"%(offsets["Dx11Secret"]))
offsets["CryptMode"] = 0
self.gpumemptr = 0
api._cache_en = True
def hashtable_find(self, table, key):
mem = self.mem
bucketCount = mem[table].read_uint32(0x10)
if (bucketCount == 0):
return 0
elemCount = mem[table].read_uint32(0x14)
startcount = key % bucketCount
node = mem[table](0x8)(0x8*startcount).me()
if (node == 0):
return 0
while 1:
first = mem[node].read_uint64(0x0)
second = mem[node].read_uint64(0x8)
next = mem[node].read_uint64(0x10)
if first == key:
#print ("Key: 0x%016x Node: 0x%016x"%(key^ mem[self.OBFUS_MGR].read_uint64(0xE0),node))
return second
elif (next == 0):
return 0
node = next
def GetLocalPlayer(self):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets["CLIENT_GAME_CONTEXT"]](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
LocalPlayerListXorValue = mem[ClientPlayerManager].read_uint64(0xF8)
LocalPlayerListKey = LocalPlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, LocalPlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount != 1):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets["Dx11Secret"]
LocalPlayer = mem[XorValue2].read_uint64(0) ^ XorValue1
return LocalPlayer
def GetPlayerById(self,id):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets["CLIENT_GAME_CONTEXT"]](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
PlayerListXorValue = mem[ClientPlayerManager].read_uint64(0x100)
PlayerListKey = PlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, PlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount != 70):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets["Dx11Secret"]
ClientPlayer = mem[XorValue2].read_uint64(0x8*id) ^ XorValue1
return ClientPlayer
def GetSpectatorById(self,id):
self.CheckCryptMode()
mem = self.mem
ClientPlayerManager = mem[offsets["CLIENT_GAME_CONTEXT"]](0).read_uint64(0x60)
ObfManager = self.OBFUS_MGR
PlayerListXorValue = mem[ClientPlayerManager].read_uint64(0xF0)
PlayerListKey = PlayerListXorValue ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x10
EncryptedPlayerManager = self.hashtable_find(hashtable, PlayerListKey)
if (EncryptedPlayerManager == 0):
return 0
MaxPlayerCount = mem[EncryptedPlayerManager].read_uint32(0x18)
if (MaxPlayerCount == 0) or (id >= MaxPlayerCount):
return 0
XorValue1 = mem[EncryptedPlayerManager].read_uint64(0x20) ^ mem[EncryptedPlayerManager].read_uint64(0x8)
XorValue2 = mem[EncryptedPlayerManager].read_uint64(0x10) ^ offsets["Dx11Secret"]
ClientPlayer = mem[XorValue2].read_uint64(0x8*id) ^ XorValue1
return ClientPlayer
def GetEntityKey(self,PointerKey):
self.CheckCryptMode()
mem = self.mem
ObfManager = self.OBFUS_MGR
HashTableKey = PointerKey ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
return 0
EncryptionKey ^= offsets["Dx11Secret"]
return EncryptionKey
def DecryptPointer(self,EncPtr,PointerKey):
self.CheckCryptMode()
if not (EncPtr&0x8000000000000000):
return 0
mem = self.mem
ObfManager = self.OBFUS_MGR
HashTableKey = PointerKey ^ mem[ObfManager].read_uint64(0xE0)
hashtable = ObfManager+0x78
EncryptionKey = self.hashtable_find(hashtable, HashTableKey)
if (EncryptionKey == 0):
return 0
EncryptionKey ^= offsets["Dx11Secret"]
return PointerManager.decrypt_ptr(EncPtr,EncryptionKey)
def find_typeinfo(name,first,pHandle):
mem = MemAccess(pHandle)
typeinfo = first
while (typeinfo):
if mem[typeinfo](0).read_pstring(0) == name:
return typeinfo
typeinfo = mem[typeinfo].read_uint64(8)
return -1
def build_offsets(pHandle):
global offsets
print ("[+] Gathering offsets, please wait...")
x = sigscan(pHandle)
mem = MemAccess(pHandle)
offsets["OBFUS_MGR"] = 0;
offsets["CryptMode"] = 0
offsets["GPUMemPtr"] = 0
offsets["Dx11Secret"] = 0x598447EFD7A36912
offsets["Dx11EncBuffer"] = 0
offsets["TIMESTAMP"] = get_buildtime(pHandle)
offsets["GAMERENDERER"] = 0x1447f6fb8
offsets["CLIENT_GAME_CONTEXT"] = 0x1447522a8
offsets["OBJECTIVE_MANAGER"] = 0x14468B8B0 # FF 0D ? ? ? ? 48 8B 1D [? ? ? ?] 48 8B 43 10 48 8B 4B 08 48 3B C8 74 0E
offsets["CLIENTSHRINKINGPLAYAREA"] = 0x1446645A0 # ? 8B F2 48 8B D9 ? 8B 35 [? ? ? ?] ? 85 F6
offsets["ClientSoldierEntity"] = 0x144F2EF50
offsets["ClientVehicleEntity"] = 0x144E3A170
offsets["ClientSupplySphereEntity"] = 0x144C54550
offsets["ClientCombatAreaTriggerEntity"] = 0x144E3B870
offsets["ClientExplosionPackEntity"] = 0x144F346A0
offsets["ClientProxyGrenadeEntity"] = 0x144F34370
offsets["ClientGrenadeEntity"] = 0x144F34590
offsets["ClientInteractableGrenadeEntity"] = 0x144C5BCB0
offsets["ClientCapturePointEntity"] = 0x144C8DD30
offsets["ClientLootItemEntity"] = 0x144C473A0
offsets["ClientArmorVestLootItemEntity"] = 0x144C89090
offsets["ClientStaticModelEntity"] = 0x144E32F10
offsets["PROTECTED_THREAD"] = 0x144752654
offsets["OBFUS_MGR_PTR_1"] = 0x1438B46D0
offsets["OBFUS_MGR_RET_1"] = 0x147E207B6
offsets["OBFUS_MGR_DEC_FUNC"] = 0x14161F880
offsets["OBJECTIVE_VTBL"] = 0x1437A7EF8
return offsets
def GetLocalPlayerList(pHandle):
global offsets
pm = PointerManager(pHandle)
ind = 0
plist = []
for i in range(70):
pPlayer = pm.GetPlayerById(i)
if pPlayer != 0:
plist += [pPlayer]
return plist
def GetEncKey(pHandle,typeinfo):
global offsets
cache_en = api._cache_en
api._cache_en = False
global keystore
mem = MemAccess(pHandle)
pm = PointerManager(pHandle)
if (mem[typeinfo].read_uint64(0x88) == 0):
api._cache_en = cache_en
return 0
try:
keystore
except NameError:
keystore = {}
if typeinfo in keystore:
api._cache_en = cache_en
#print ("[+] Typeinfo: 0x%x Encryption Key: 0x%x"% (typeinfo,keystore[typeinfo]))
return keystore[typeinfo]
pm = PointerManager(pHandle)
key = pm.GetEntityKey(mem[typeinfo](0).me())
if key == 0:
return 0
keystore[typeinfo] = key
api._cache_en = cache_en
print ("[+] Typeinfo: 0x%x Encryption Key: 0x%x"% (typeinfo,keystore[typeinfo]))
return keystore[typeinfo]
def GetEntityList(pHandle,typeinfo,flink_offset=0x80):
elist = []
mem = MemAccess(pHandle)
flink = mem[typeinfo].read_uint64(0x88)
key = GetEncKey(pHandle,typeinfo)
while (flink):
ent = PointerManager.decrypt_ptr(flink,key)
if ent >= 0x100000000000:
return []
elist += [ent-flink_offset]
flink = mem[ent].read_uint64(0x0)
return elist
def GetNextEntity(pHandle,Ptr,typeinfo,flink_offset=0x88):
elist = []
mem = MemAccess(pHandle)
key = GetEncKey(pHandle,typeinfo)
if Ptr == 0:
flink = mem[typeinfo].read_uint64(0x88)
else:
flink = mem[Ptr].read_uint64(flink_offset)
ptr = PointerManager.decrypt_ptr(flink,key)-flink_offset
#if (typeinfo == offsets["ClientArmorVestLootItemEntity"]):
#print (hex(ptr))
if (isValid(ptr)):
return ptr
return 0
def GetHandle():
def yes_or_no(question):
while "the answer is invalid":
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[:1] == 'y':
return True
if reply[:1] == 'n':
return False
pid = api.get_processid_by_name("bfv.exe")
if type(pid) == type(None):
return 0
pHandle = HANDLE(api.OpenProcess(DWORD(0x1f0fff),False,DWORD(pid)))
priv = api.is_elevated(pHandle)
if (priv == 2):
ans = yes_or_no("[+] WARNING! BFV.exe is running as admin, do you still want to continue?")
if (ans == False):
exit(0)
return pHandle.value
def GetEntityTransform(pHandle,Entity):
mem = MemAccess(pHandle)
flags = mem[Entity](0x40).read_uint64(0x8)
if flags == None:
return 0
_9 = (flags>>8)&0xFF
_10 = (flags>>16)&0xFF
transform = mem[Entity](0x40).read_mat4((0x20*(_10+(2*_9)))+0x10)
return transform
def list_current_entities(pHandle):
global offsets
mem = MemAccess(pHandle)
next = offsets["FIRST_TYPEINFO"]
while (next!=0):
if (mem[next].read_uint64(0x68) &0x8000000000000000):
str = mem[next](0).read_pstring(0)
if len(str)>0:
num = len(GetEntityList(pHandle,next))
print("%d: %s" % (num,str))
next = mem[next].read_uint64(0x8)
class GameSoldierData():
name = ""
pointer = 0
transform = None
health = 0
maxhealth = 0
teamid = 0
alive = True
vehicle = 0
class GameVehicleData():
pointer = 0
transform = None
teamid = 0
vehicletype = ""
class GameCapturePointData():
pointer = 0
transform = None
objectivedata = None
initialteamowner = 0
radius = 0
class UIObjectiveData():
pointer = 0
transform = None
shortname = ""
longname = ""
teamstate = 0
controlledstate = 0
capturepoint = None
class GameBoundsData():
pointer = 0
teamid = 0
teamspecific = False
points = []
class GameLootData():
LootName = ""
ItemName = ""
LootType = 0
VestEntity = False
AccessCount = 0
transform = [0,0,0,0]
class GameDebugPointData():
chr = ""
transform = [0,0,0,0]
class GameExplosiveData():
pointer = 0
teadid = 0
transform = [0,0,0,0]
class GameGrenadeData():
pointer = 0
transform = [0,0,0,0]
class GameSupplyData():
pointer = 0
name = ""
transform = [0,0,0,0]
class FSObjectData():
pointer = 0
typename = ""
transform = [0,0,0,0]
class GameCircleData():
pointer = 0
OuterCircle_Moving = [0,0,0,0]
InnerCircle_Const = [0,0,0,0]
OuterCircleRadius_Moving = 0.0
InnerCircleRadius_Const = 0.0
class GameCircleData():
pointer = 0
OuterCircle_Moving = [0,0,0,0]
InnerCircle_Const = [0,0,0,0]
OuterCircleRadius_Moving = 0.0
InnerCircleRadius_Const = 0.0
class GameData():
infirestorm = False
testcrates = []
testsafes = []
myplayer = 0
mysoldier = 0
myteamid = 0
myvehicle = 0
myviewmatrix = 0
mytransform = 0
valid = False
def __init__(self):
self.soldiers = []
self.vehicles = []
self.capturepoints = []
self.debugpoints = []
self.loots = {}
self.explosives = []
self.grenades = []
self.supplies = []
self.fsobjects = []
self.uiobjectives = []
self.boundsdata = [[],[],[]]
self.boundsstate = 0
self.LastLootPtr = 0
self.LastVestLootPtr = 0
self.boundslimits = None# x low, x high, y low, y high
self.circledata = None
self.testpoint = False
def AddSoldier(self,soldier):
self.soldiers += [soldier]
def ClearSoldiers(self):
self.soldiers = []
def AddVehicle(self,vehicle):
self.vehicles += [vehicle]
def ClearVehicles(self):
self.vehicles = []
def AddCapturePoint(self,capturepoint):
self.capturepoints += [capturepoint]
def ClearCapturePoints(self):
self.capturepoints = []
def AddUIObjective(self,uiobjective):
self.uiobjectives += [uiobjective]
def ClearUIObjectives(self):
self.uiobjectives = []
def AddDebugPoint(self,debugpoint):
self.debugpoints += [debugpoint]
def ClearDebugPoints(self):
self.debugpoints = []
def AddSupply(self,supply):
self.supplies += [supply]
def ClearSupplies(self):
self.supplies = []
def AddGrenade(self,grenade):
self.grenades += [grenade]
def ClearGrenades(self):
self.grenades = []
def AddExplosive(self,explosive):
self.explosives += [explosive]
def ClearExplosives(self):
self.explosives = []
def AddBoundsData(self,boundsdata, TeamID):
for b in self.boundsdata[TeamID]:
if b.pointer == boundsdata.pointer:
return 0
self.boundsdata[TeamID] += [boundsdata]
for p in boundsdata.points:
if (self.boundslimits == None):
self.boundslimits = [p[0],p[0],p[1],p[1]]
continue
if p[0] < self.boundslimits[0]:
self.boundslimits[0] = p[0]
if p[0] > self.boundslimits[1]:
self.boundslimits[1] = p[0]
if p[1] < self.boundslimits[2]:
self.boundslimits[2] = p[1]
if p[1] > self.boundslimits[3]:
self.boundslimits[3] = p[1]
return 1
def ClearBoundsData(self):
self.boundsdata[0] = [] # Neutral
self.boundsdata[1] = [] # TeamID 1
self.boundsdata[2] = [] # TeamID 2
self.boundslimits = None
def DebugPrintMatrix(mat):
print("[%.3f %.3f %.3f %.3f ]" %(mat[0][0],mat[0][1],mat[0][2],mat[0][3]))
print("[%.3f %.3f %.3f %.3f ]" %(mat[1][0],mat[1][1],mat[1][2],mat[1][3]))
print("[%.3f %.3f %.3f %.3f ]" %(mat[2][0],mat[2][1],mat[2][2],mat[2][3]))
print("[%.3f %.3f %.3f %.3f ]\n"%(mat[3][0],mat[3][1],mat[3][2],mat[3][3]))
def DebugPrintVec4(Vec4):
print("[%.3f %.3f %.3f %.3f ]\n" %(Vec4[0],Vec4[1],Vec4[2],Vec4[3]))
def MakeBoundsData(pHandle,VVSDAddr,Team,IsTeamSpecific):
mem = MemAccess(pHandle)
PointsList = mem[VVSDAddr](VVSD_PointsArray).me()
PointsListSize = mem[PointsList-0x4].read_uint32()
BoundsData = GameBoundsData()
BoundsData.teamid = Team
BoundsData.teamspecific = (False,True)[IsTeamSpecific]
BoundsData.points = []
BoundsData.pointer = VVSDAddr
for i in range(PointsListSize):
BoundsData.points += [mem[PointsList+(i*16)].read_vec4(0)]
return BoundsData
def Process(pHandle,cnt):
global offsets
api._access=0
#api._cache_en = True
del api._cache
api._cache = {}
mem = MemAccess(pHandle)
pm = PointerManager(pHandle)
global g_gamedata
try:
g_gamedata
except NameError:
g_gamedata = GameData()
def GetEntityVec4(pHandle,Entity):
mem = MemAccess(pHandle)
flags = mem[Entity](0x40).read_uint64(0x8)
if flags == None:
return 0
_9 = (flags>>8)&0xFF
_10 = (flags>>16)&0xFF
_off = (0x20*(_10+(2*_9)))+0x10
v4 = [mem[Entity](0x40).read_uint32(_off+0x30),
mem[Entity](0x40).read_uint32(_off+0x34),
mem[Entity](0x40).read_uint32(_off+0x38),
mem[Entity](0x40).read_uint32(_off+0x40)]
return v4
# Get Local Info
MyPlayer = pm.GetLocalPlayer()
MySoldier = mem[MyPlayer].weakptr(ClientPlayer_Soldier).me()
MyTeamId = mem[MyPlayer].read_uint32(ClientPlayer_TeamID)
MyVehicle = mem[MyPlayer].weakptr(ClientPlayer_Vehicle).me()
MyViewmatrix = mem[offsets["GAMERENDERER"]]()(GameRenderer_RenderView).read_mat4(RenderView_ViewMatrix)
MyTransform = GetEntityTransform(pHandle,MySoldier)
MyPos = GetEntityVec4(pHandle,MySoldier)
g_gamedata.myplayer = MyPlayer
g_gamedata.mysoldier = MySoldier
g_gamedata.myteamid = MyTeamId
g_gamedata.myvehicle = MyVehicle
g_gamedata.myviewmatrix = MyViewmatrix
g_gamedata.mytransform = MyTransform
#print ("MyPlayer : 0x%016X" % MyPlayer)
#print ("MySoldier: 0x%016X" % MySoldier)
#print ("MyTeamId : 0x%016X" % MyTeamId)
#print ("MyPos : %s\n" % str(MyPos))
if MySoldier == 0:
g_gamedata.myviewmatrix = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
g_gamedata.mytransform = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
g_gamedata.valid = True
# Render Soldiers
g_gamedata.ClearSoldiers()
for Soldier in GetEntityList(pHandle,offsets["ClientSoldierEntity"],0xf0):
#print ("0x%016x"%Soldier)
# if you are me, skip
if (Soldier == MySoldier):
continue
# if you are not attached to a ClientPlayer, skip
if (mem[Soldier](CSE_Player).me() == 0):
continue
# if you are in my vehicle, skip
Vehicle = mem[Soldier](CSE_Player).weakptr(ClientPlayer_Vehicle).me()
if ((MyVehicle>0) and Vehicle == MyVehicle):
continue
TeamId = mem[Soldier](CSE_Player).read_uint32(ClientPlayer_TeamID)
Transform = GetEntityTransform(pHandle,Soldier)
if Transform == 0:
continue
Health = mem[Soldier](CSE_HealthComponent).read_float(HC_Health)
MaxHealth = mem[Soldier](CSE_HealthComponent).read_float(HC_MaxHealth)
name = mem[Soldier](CSE_Player).read_string(0x40)
Alive = True
if (Health <= 0.0):
Alive = False
SoldierData = GameSoldierData()
SoldierData.teamid = TeamId
SoldierData.transform = Transform
SoldierData.alive = Alive
SoldierData.vehicle = Vehicle
SoldierData.pointer = Soldier
SoldierData.health = Health
SoldierData.maxhealth = MaxHealth
SoldierData.name = name
g_gamedata.AddSoldier(SoldierData)
# Render Vehicles
g_gamedata.ClearVehicles()
for Vehicle in GetEntityList(pHandle,offsets["ClientVehicleEntity"],0xF0):
if (Vehicle == MyVehicle):
continue
#print (hex(Vehicle))
Transform = GetEntityTransform(pHandle,Vehicle)
if Transform == 0:
continue
VehicleData = GameVehicleData()
VehicleData.ownership = 0
VehicleData.transform = Transform
VehicleData.pointer = Vehicle
VehicleData.vehicletype = mem[Vehicle](CVE_VehicleEntityData).read_pstring(VED_ControllableType)
VehicleData.teamid = (mem[Vehicle].read_uint32(CVE_TeamID))
g_gamedata.AddVehicle(VehicleData)
#print ("0x%016x"%Vehicle)
# Get all objectives by accessing ObjectiveManager and iterating all ObjectiveData
g_gamedata.ClearUIObjectives()
i=0
while (1):
UIObj = mem[offsets["OBJECTIVE_MANAGER"]](0)(0x38).read_uint64(i*8)
i+=1
if mem[UIObj].read_uint64(0) != offsets["OBJECTIVE_VTBL"]:
break
Transform = mem[UIObj].read_mat4(OD_Transform)
ShortName = mem[UIObj].read_pstring(OD_ShortName)
LongName = mem[UIObj].read_pstring(OD_LongName)
TeamState = mem[UIObj].read_uint32(OD_TeamState)
ControlledState = mem[UIObj].read_uint32(OD_ControlledState)
UIObjective = UIObjectiveData()
UIObjective.pointer = UIObj
UIObjective.transform = Transform
UIObjective.shortname = ShortName
UIObjective.longname = LongName
UIObjective.teamstate = TeamState
UIObjective.controlledstate = ControlledState
g_gamedata.AddUIObjective(UIObjective)
# Get the shape of the map bounds by iterating ClientCombatAreaTriggerEntity and reading bounds points
ST_UPDATE = 0
ST_UPDATENEXT = 1
ST_SCAN = 2
for ClientCombatAreaTrigger in GetEntityList(pHandle,offsets["ClientCombatAreaTriggerEntity"],0xD40):
ActiveTrigger = mem[ClientCombatAreaTrigger].read_uint32(CCAT_ActiveTrigger)
ClientCombatAreaTriggerData = mem[ClientCombatAreaTrigger](CCAT_TriggerData).me()
Team = mem[ClientCombatAreaTriggerData].read_uint32(0x28)
IsTeamSpecific = mem[ClientCombatAreaTriggerData].read_uint8(0x2D)
updateShape = True
ShapeData = mem[ClientCombatAreaTrigger](CCAT_ppAreaBounds)(0x0).me()
if (g_gamedata.boundsstate == ST_SCAN):
for Shape in g_gamedata.boundsdata[0]:
if Shape.pointer == ShapeData:
updateShape = False
if (updateShape):
g_gamedata.boundsstate = ST_UPDATENEXT
if (g_gamedata.boundsstate == ST_UPDATE):
g_gamedata.AddBoundsData(MakeBoundsData(pHandle,ShapeData,Team,IsTeamSpecific),0)
i = 0xF0
while (1):
ShapeData = mem[ClientCombatAreaTrigger](i).me()
if (ShapeData == 0): break
if (g_gamedata.boundsstate == ST_SCAN):
updateShape = True
for Shape in g_gamedata.boundsdata[Team]:
if Shape.pointer == ShapeData:
updateShape = False
if (updateShape and len(g_gamedata.boundsdata[Team])):
g_gamedata.boundsstate = ST_UPDATENEXT
break
if (g_gamedata.boundsstate == ST_UPDATE):
g_gamedata.AddBoundsData(MakeBoundsData(pHandle,ShapeData,Team,IsTeamSpecific),Team)
else:
break
i+= 0x60
if (g_gamedata.boundsstate == ST_UPDATENEXT):
g_gamedata.boundsstate = ST_UPDATE
g_gamedata.ClearBoundsData()
elif (g_gamedata.boundsstate == ST_UPDATE):
g_gamedata.boundsstate = ST_SCAN
g_gamedata.ClearExplosives()
for Explosive in GetEntityList(pHandle,offsets["ClientExplosionPackEntity"],0xf0):
#print ("Explosive: " + hex(Explosive))
Transform = GetEntityTransform(pHandle,Explosive)
Team = mem[Explosive].read_uint32(0x4c0)
ExplosiveData = GameExplosiveData()
ExplosiveData.transform = Transform
ExplosiveData.teamid = Team
ExplosiveData.pointer = Explosive
g_gamedata.AddExplosive(ExplosiveData)
g_gamedata.ClearGrenades()
for Grenade in (GetEntityList(pHandle,offsets["ClientProxyGrenadeEntity"],0xf0)+GetEntityList(pHandle,offsets["ClientGrenadeEntity"],0xf0)+GetEntityList(pHandle,offsets["ClientInteractableGrenadeEntity"],0xf0)):
Transform = GetEntityTransform(pHandle,Grenade)
GrenadeData = GameGrenadeData()
GrenadeData.transform = Transform
GrenadeData.pointer = Grenade
g_gamedata.AddGrenade(GrenadeData)
g_gamedata.ClearSupplies()
for Supply in GetEntityList(pHandle,offsets["ClientSupplySphereEntity"],0xb8):
#print (hex(Supply))
SupplyName = mem[Supply](0x38).read_pstring(0xB8)
pos = mem[Supply].read_vec4(0x100)
#if pos == 0:
# continue
#print ("0x%x (%s)"% (Supply,SupplyName))
#print("%f %f %f %f"%(pos[0],pos[1],pos[2],pos[3]))
#print("%f %f %f %f"%(MyTransform[1][0],MyTransform[1][1],MyTransform[1][2],MyTransform[1][3]))
#print("%f %f %f %f"%(MyTransform[2][0],MyTransform[2][1],MyTransform[2][2],MyTransform[2][3]))
#print("%f %f %f %f"%(MyTransform[3][0],MyTransform[3][1],MyTransform[3][2],MyTransform[3][3]))
SupplyData = GameSupplyData()
SupplyData.transform = [[0,0,0,0],[0,0,0,0],[0,0,0,0],pos]
SupplyData.name = SupplyName
SupplyData.pointer = Supply
g_gamedata.AddSupply(SupplyData)
# This pointer only exists if we are in FireStorm mode
ShrinkingPlayArea = mem[offsets["CLIENTSHRINKINGPLAYAREA"]](0).me()
g_gamedata.circledata = None
if (not ShrinkingPlayArea):
g_gamedata.infirestorm = False
g_gamedata.fsobjects = []
if (ShrinkingPlayArea):
if (not g_gamedata.infirestorm):
for model in GetEntityList(pHandle,offsets["ClientStaticModelEntity"],0xf0):
name = mem[model](0x38)(0xA8).read_pstring(0x18)
if name == "artassets/props/gadgetcrate_01/gadgetcrate_01_200_paperfilling_Mesh":
fsobject = FSObjectData()
fsobject.pointer = model
fsobject.typename = "crate"
fsobject.transform = GetEntityTransform(pHandle,model)
g_gamedata.fsobjects += [fsobject]
elif name == "dakar/gameplay/prefabs/objectives/dk_safe_02_lid_Mesh":
fsobject = FSObjectData()
fsobject.pointer = model
fsobject.typename = "safe"
fsobject.transform = GetEntityTransform(pHandle,model)
g_gamedata.fsobjects += [fsobject]
g_gamedata.infirestorm = True
CircleData = GameCircleData()
CircleData.OuterCircle_Moving = mem[ShrinkingPlayArea].read_vec4(0x40)
CircleData.InnerCircle_Const = mem[ShrinkingPlayArea].read_vec4(0x50)
CircleData.OuterCircleRadius_Moving = mem[ShrinkingPlayArea].read_float(0x64)
CircleData.InnerCircleRadius_Const = mem[ShrinkingPlayArea].read_float(0x68)
g_gamedata.circledata = CircleData
# So because python is slow and there are a lot of lootentities
# lets just walk them 5 entities per render so we don't completely
# kill our fps. We don't need low latency for these
for n in range(5):
g_gamedata.LastLootPtr = GetNextEntity(pHandle,g_gamedata.LastLootPtr,offsets["ClientLootItemEntity"],flink_offset=0xf0)
if (g_gamedata.LastLootPtr!=0):
if g_gamedata.LastLootPtr not in g_gamedata.loots:
if (mem[g_gamedata.LastLootPtr].read_int32(0x238) != -1):
Loot = GameLootData()
Loot.LootName = mem[g_gamedata.LastLootPtr](0x720).read_pstring(0x40)
Loot.LootType = mem[g_gamedata.LastLootPtr](0x38).read_uint32(0x118)
Loot.ItemName = mem[g_gamedata.LastLootPtr](0x38)(0x100)(0x0).read_pstring(0x18)
Loot.transform = GetEntityTransform(pHandle,g_gamedata.LastLootPtr)
g_gamedata.loots[g_gamedata.LastLootPtr] = Loot
else:
g_gamedata.loots[g_gamedata.LastLootPtr].AccessCount += 1
if (mem[g_gamedata.LastLootPtr].read_int32(0x238) == -1):
del g_gamedata.loots[g_gamedata.LastLootPtr]
elif (g_gamedata.loots[g_gamedata.LastLootPtr].AccessCount >= 50):
loots = copy.copy(g_gamedata.loots)
for LootPtr in loots:
if g_gamedata.loots[LootPtr].AccessCount < 10:
del g_gamedata.loots[LootPtr]
else:
g_gamedata.loots[LootPtr].AccessCount = 0
# So because python is slow and there are a lot of lootentities
# lets just walk them 5 entities per render so we don't completely
# kill our fps. We don't need low latency for these
for n in range(5):
g_gamedata.LastVestLootPtr = GetNextEntity(pHandle,g_gamedata.LastVestLootPtr,offsets["ClientArmorVestLootItemEntity"],flink_offset=0xf0)
if (g_gamedata.LastVestLootPtr!=0):
if g_gamedata.LastVestLootPtr not in g_gamedata.loots:
if (mem[g_gamedata.LastVestLootPtr].read_int32(0x238) != -1):
Loot = GameLootData()
Loot.LootName = mem[g_gamedata.LastVestLootPtr](0x720).read_pstring(0x40)
Loot.VestEntity = True
Loot.ItemName = mem[g_gamedata.LastVestLootPtr](0x38)(0x100)(0x0).read_pstring(0x18)
Loot.transform = GetEntityTransform(pHandle,g_gamedata.LastVestLootPtr)
g_gamedata.loots[g_gamedata.LastVestLootPtr] = Loot
else:
g_gamedata.loots[g_gamedata.LastVestLootPtr].AccessCount += 1
if (mem[g_gamedata.LastVestLootPtr].read_int32(0x238) == -1):
del g_gamedata.loots[g_gamedata.LastVestLootPtr]
def initialize(pHandle):
global offsets
PAGE_SIZE = 0x1000
ALL_ACCESS = 0x1f0fff
PAGE_FLR = 0xFFFFFFFFFFFFF000
PAGE_RWX = 0x40
offsets = build_offsets(pHandle)
return
``` |
{
"source": "70ucanbin/IMS",
"score": 2
} |
#### File: ims/service/clientWorkServ.py
```python
import traceback
from flask import abort
from flask_login import current_user
from sqlalchemy import exc
from ims import db
from ims.service.mappers.comUserMapper import selectComUser as __getUser
from ims.service.mappers.clientWorkMapper import selectWorkMonthDetails as __getMonthDetails
from ims.service.mappers.clientWorkMapper import selectTraClientWorkList as __getList
from ims.service.mappers.clientWorkMapper import selectTraClientWorkDetails as __getDetails
from ims.service.mappers.clientWorkMapper import insertUpdateTraClientWork as __insertUpdateOne
from ims.service.mappers.clientWorkMapper import insertDayOffFlg as __insertDayOff
from ims.service.mappers.clientWorkMapper import deleteTraClientWork as __deleteOne
from ims.service.mappers.clientWorkMapper import deleteDay as __deleteDay
def getClientWorkMonthDetails(userId, year, month, startDay, endDay):
"""1ヶ月分の日別稼働時間を取得するMapperを呼び出す
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param startDay: 月の初日
:param endDay: 月の最後の日
"""
result = __getMonthDetails(userId, year, month , startDay, endDay)
return result
def getClientWorkList(userId, year, month, day):
"""選択された日の稼働リストを取得するMapperを呼び出す
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param day: 登録日
"""
dto = __getList(current_user.group_id, userId, year, month ,day)
return dto
def getClientWorkDetails(clientWorkId):
"""選択された稼働詳細を取得するMapperを呼び出す
:param clientWorkId: 稼働詳細ID
"""
try:
Id = int(clientWorkId)
dto = __getDetails(Id)
if dto:
user = __getUser(dto.userId)
if user.group_id == current_user.group_id:
return dto
else:
return None
else:
return None
except:
return None
def insertUpdateClientWork(dto, isUpdate):
"""稼働詳細の新規または修正を処理するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param dto: 稼働詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
try:
__deleteDay(current_user.user_id, dto['year'], dto['month'], dto['day'])
__insertUpdateOne(dto,isUpdate)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
def tookDayOff(year, month, day):
"""選択された日を休みとして登録するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param year: 登録年
:param month: 登録月
:param day: 登録日
"""
try:
__deleteDay(current_user.user_id, year, month, day, 0)
__insertDayOff(current_user.user_id, year, month, day)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
def deleteClientWork(clientWorkId):
"""稼働詳細を削除するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param clientWorkId: 稼働詳細ID
"""
try:
__deleteOne(clientWorkId)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
```
#### File: service/mappers/clientWorkMapper.py
```python
from sqlalchemy import and_, Integer
from sqlalchemy.sql import func
from sqlalchemy.orm import aliased
from sqlalchemy.exc import IntegrityError
from ims import db
from ims.service.mappers.models.traClientWork import TraClientWork as __model
from ims.service.mappers.models.traOrderData import TraOrder, TraSubOrder
def selectWorkMonthDetails(userId, year, month, startDay, endDay):
"""選択された月の日別とその稼働時間を取得するDB処理
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param startDay: 月の初日
:param endDay: 月の最後の日
"""
subq1 = db.session.query(
func.generate_series(
func.date(startDay) - func.CURRENT_DATE(), func.date(endDay) - func.CURRENT_DATE()
).label('i')
).subquery()
subq2 = db.session.query(
func.cast(func.date_part('day', func.CURRENT_DATE() + subq1.c.i ), Integer).label('day')
).subquery()
monthDetails = db.session.query(
subq2.c.day,
__model.rest_flg,
db.func.to_char(db.func.sum(__model.work_time),'HH24:MI').label('workTime'),
).outerjoin(__model,
and_(
subq2.c.day == __model.work_day,
__model.user_id == userId,
__model.work_year == year,
__model.work_month == month
)
).group_by(
subq2.c.day,
__model.rest_flg
).order_by(
subq2.c.day
).all()
return monthDetails
def selectTraClientWorkList(groupId, userId, year, month, day):
"""選択された日の稼働リストを取得するDB処理
:param groupId: 登録ユーザの所属コード
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param day: 登録日
"""
orderCd = aliased(TraOrder)
subOrderCd = aliased(TraSubOrder)
clientWorkList = db.session.query(
__model.client_work_id.label('clientWorkId'),
db.func.to_char(__model.work_time,'HH24:MI').label('workTime'),
orderCd.order_value.label('orderCd'),
__model.task_cd.label('taskCd'),
func.COALESCE(subOrderCd.sub_order_value, 'なし').label('subOrderCd')
).filter(
__model.user_id == userId,
__model.work_year == year,
__model.work_month == month,
__model.work_day == day,
__model.rest_flg == 0
).outerjoin(
(orderCd,
and_(orderCd.order_cd == __model.order_cd,
orderCd.group_id == groupId)),
(subOrderCd,
and_(subOrderCd.order_cd == __model.order_cd,
subOrderCd.sub_order_cd == __model.sub_order_cd,
subOrderCd.group_id == groupId))
).all()
return clientWorkList
def selectTraClientWorkDetails(clientWorkId):
"""選択された稼働詳細を取得するDB処理
:param clientWorkId: 稼働詳細ID
"""
clientWorkDetails = db.session.query(
__model.client_work_id.label('clientWorkId'),
__model.user_id.label('userId'),
__model.work_month.label('workMonth'),
__model.work_day.label('workDay'),
func.to_number(func.to_char((__model.work_time),'HH24'), '999999').label('workHours'),
func.to_number(func.to_char((__model.work_time),'MI'), '999999').label('workMinutes'),
__model.order_cd.label('orderCd'),
__model.task_cd.label('taskCd'),
__model.sub_order_cd.label('subOrderCd'),
__model.note
).filter_by(
client_work_id = clientWorkId
).first()
return clientWorkDetails
def insertUpdateTraClientWork(dto,isUpdate):
"""稼働詳細の新規または修正を処理するDB処理
:param dto: 稼働詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
model = __model()
model.user_id = dto['userId'],
model.work_year = dto['year'],
model.work_month = dto['month'],
model.work_day = dto['day'],
model.rest_flg = 0,
model.order_cd = dto['orderCd'],
model.task_cd = dto['taskCd'],
model.sub_order_cd = dto['subOrderCd'],
model.work_time = dto['workTime'],
model.note = dto['note'] or ""
if isUpdate:
model.client_work_id = dto['clientWorkId']
db.session.merge(model)
else:
db.session.add(model)
db.session.flush()
def insertDayOffFlg(userId, year, month, day):
"""選択された日を休みとして登録するDB処理
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param day: 登録日
"""
model = __model()
model.user_id = userId,
model.work_year = year,
model.work_month = month,
model.work_day = day,
model.rest_flg = 1,
db.session.add(model)
db.session.flush()
def deleteDay(userId, year, month, day, restFlg=1):
"""選択された日の稼働情報を削除するDB処理
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
:param day: 登録日
"""
__model.query.filter_by(
user_id = userId,
work_year = year,
work_month = month,
work_day = day,
rest_flg = restFlg
).delete()
db.session.flush()
def deleteTraClientWork(clientWorkId):
"""稼働詳細を削除するDB処理
:param clientWorkId: 稼働詳細ID
"""
__model.query.filter_by(client_work_id = clientWorkId).delete()
db.session.flush()
```
#### File: service/mappers/comItemMapper.py
```python
from ims import db
from ims.service.mappers.models.comItem import ComItem as __model
def selectComItemList(category):
"""マスタデータListを取得するDB処理
:param category: データカテゴリー
"""
dto = __model.query.filter_by(
item_category = category
).order_by(
__model.display_order
).all()
return dto
def selectComItem(itemId):
"""マスタデータ詳細を取得するDB処理
:param itemId: マスタデータID
"""
dto = __model.query.filter_by(
item_id = itemId
).first()
return dto
def insertUpdateComItem(dto, isUpdate):
"""マスタデータ詳細の新規または修正を処理するDB処理
:param dto: マスタデータ詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
model = __model()
model.item_category = dto['itemCategory'],
model.item_cd = dto['itemCD'],
model.item_value = dto['itemValue'],
model.display_order = dto['displayOrder'],
if dto['isActive'] == True:
model.is_active = True
else:
model.is_active = False
model.update_user = dto['updateUser']
if isUpdate:
model.item_id = dto['itemId']
db.session.merge(model)
else:
db.session.add(model)
db.session.flush()
def deleteComItem(itemId):
"""マスタデータを削除するDB処理
:param itemId: マスタデータID
"""
__model.query.filter_by(item_id = itemId).delete()
db.session.flush()
```
#### File: service/mappers/orderDataMapper.py
```python
from sqlalchemy import and_
from sqlalchemy.orm import aliased
from ims import db
from ims.service.mappers.models.comItem import ComItem
from ims.service.mappers.models.traOrderData import TraOrder as __orderModel
from ims.service.mappers.models.traOrderData import TraSubOrder as __subOrderModel
def selectOrederList(groupId):
"""件名大分類リストを取得するDB処理
:param groupId: 所属コード
"""
client_name = aliased(ComItem)
orderList = db.session.query(
__orderModel.order_id.label('orderId'),
client_name.item_value.label('clientName'),
__orderModel.order_cd.label('orderCd'),
__orderModel.order_value.label('orderValue'),
__orderModel.display_order.label('displayOrder'),
__orderModel.is_active.label('isActive'),
).filter(
__orderModel.group_id == groupId
).outerjoin(
(client_name,
and_(client_name.item_cd == __orderModel.client_cd,
client_name.item_category =='client_cd'))
).all()
return orderList
def selectOreder(orderId):
"""件名大分類詳細を取得するDB処理
:param orderId: オーダーID
"""
dto = __orderModel.query.filter_by(
order_id = orderId
).first()
return dto
def insertUpdateOreder(dto, isUpdate):
"""件名大分類詳細の新規または修正を処理するDB処理
:param dto: 件名大分類詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
model = __orderModel()
model.client_cd = dto['clientCd'],
model.group_id = dto['groupId'],
model.order_cd = dto['orderCd'],
model.order_value = dto['orderValue'],
model.display_order = dto['displayOrder'],
if dto['isActive'] == True:
model.is_active = True
else:
model.is_active = False
model.update_user = dto['updateUser']
if isUpdate:
model.order_id = dto['orderId']
db.session.merge(model)
else:
db.session.add(model)
db.session.flush()
def deleteOreder(orderId):
"""件名大分類データを削除するDB処理
:param orderId: 件名大分類データID
"""
__orderModel.query.filter_by(order_id = orderId).delete()
db.session.flush()
def selectSubOrederList(groupId, orderCd):
"""件名小分類リストを取得するDB処理
:param groupId: 所属コード
:param orderCd: オーダーコード
"""
client_name = aliased(ComItem)
orderList = db.session.query(
__subOrderModel.sub_order_id.label('orderId'),
client_name.item_value.label('clientName'),
__subOrderModel.sub_order_cd.label('subOrderCd'),
__subOrderModel.sub_order_value.label('subOrderValue'),
__subOrderModel.display_order.label('displayOrder'),
__subOrderModel.is_active.label('isActive'),
).filter(
__subOrderModel.group_id == groupId,
__subOrderModel.order_cd == orderCd
).outerjoin(
(client_name,
and_(client_name.item_cd == __subOrderModel.client_cd,
client_name.item_category =='client_cd'))
).all()
return orderList
def selectSubOreder(subOrderId):
"""件名小分類詳細を取得するDB処理
:param orderId: オーダーID
"""
dto = __subOrderModel.query.filter_by(
sub_order_id = subOrderId
).first()
return dto
def insertUpdateSubOreder(dto, isUpdate):
"""件名小分類詳細の新規または修正を処理するDB処理
:param dto: 件名小分類詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
model = __subOrderModel()
model.client_cd = dto['clientCd'],
model.group_id = dto['groupId'],
model.order_cd = dto['orderCd'],
model.sub_order_cd = dto['subOrderCd'],
model.sub_order_value = dto['subOrderValue'],
model.display_order = dto['displayOrder'],
if dto['isActive'] == True:
model.is_active = True
else:
model.is_active = False
model.update_user = dto['updateUser']
if isUpdate:
model.sub_order_id = dto['subOrderId']
db.session.merge(model)
else:
db.session.add(model)
db.session.flush()
def deleteSubOreder(subOrderId):
"""件名小分類データを削除するDB処理
:param subOrderId: 件名小分類データID
"""
__subOrderModel.query.filter_by(sub_order_id = subOrderId).delete()
db.session.flush()
```
#### File: ims/service/travelExpensesServ.py
```python
import traceback
from flask import abort
from flask_login import current_user
from ims import db
from ims.service.mappers.comUserMapper import selectComUser as __getUser
from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesList as __getList
from ims.service.mappers.travelExpensesMapper import selectTraTravelExpensesDetails as __getDetails
from ims.service.mappers.travelExpensesMapper import insertUpdateTraTravelExpenses as __insertUpdateOne
from ims.service.mappers.travelExpensesMapper import deleteTraTravelExpenses as __deleteOne
def getTravelExpensesList(userId, year, month):
"""1ヶ月分旅費精算リストを取得するMapperを呼び出す
:param userId: 登録ユーザID
:param year: 登録年
:param month: 登録月
"""
dtoList = __getList(userId, year, month)
return dtoList
def getTravelExpensesDetails(travelExpensesId):
"""選択された旅費精算詳細を取得するMapperを呼び出す
:param travelExpensesId: 旅費精算ID
"""
try:
Id = int(travelExpensesId)
dto = __getDetails(Id)
if dto:
user = __getUser(dto.user_id)
if user.group_id == current_user.group_id:
return dto
else:
return None
except:
return None
def insertUpdateTravelExpenses(dto, isUpdate):
"""旅費精算の新規または修正を処理するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param dto: 旅費精算詳細データ
:param isUpdate: 新規・修正判定フラグ
"""
try:
__insertUpdateOne(dto,isUpdate)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
def deleteTravelExpenses(travelExpensesId):
"""旅費精算を削除するMapperを呼び出す
サービス層のExceptionをキャッチし、処理します。
:param travelExpensesId: 旅費精算ID
"""
try:
__deleteOne(travelExpensesId)
db.session.commit()
except Exception:
traceback.print_exc()
db.session.rollback()
abort(500)
finally:
db.session.close()
```
#### File: ims/views/masterData.py
```python
from flask import Blueprint, flash, jsonify, request, redirect, render_template, session, url_for
from flask_login import login_required, current_user
from ims.common.ComboBoxUtil import getComCategoryList
from ims.common.Messages import Messages
from ims.common.RoleUtil import admin_required
from ims.contents.comCont import MasterDataList as listCont
from ims.contents.comCont import MasterDetails as detailsCont
from ims.form.masterDataForm import MasterDataForm
from ims.service.comServ import insertUpdateMasterData as insertUpdateDto
from ims.service.comServ import getComItemList as getDtoList
from ims.service.comServ import getComItem as getDto
from ims.service.comServ import deleteMasterData as deleteDto
masterData = Blueprint('masterData', __name__)
@masterData.route('/list/')
@admin_required
def master_list():
"""マスタデータ一覧の初期表示 GETのrequestを受付
当処理はhtmlテンプレート及び画面用コンテンツを返します。
"""
categoryList = getDtoList('master_combo')
comboList = getComCategoryList(categoryList)
cont = listCont(comboList)
return render_template('master_data_management/master-list.html', cont=cont)
@masterData.route('/list/getData/', methods = ['POST'])
@admin_required
def master_post_data():
"""マスタデータ一覧表示用データ取得 POSTのrequestを受付
一覧画面から選択されたカテゴリーのデータを取得し、json形式でデータを返します。
:param category: 選択されたカテゴリー
"""
try:
category = request.json['category']
models = getDtoList(category)
dataset = []
for model in models:
data = {}
data["itemId"] = model.item_id
data["itemCd"] = model.item_cd
data["itemValue"] = model.item_value
data["displayOrder"] = model.display_order
data["isActive"] = model.is_active
dataset.append(data)
except:
pass
return jsonify(dataset)
@masterData.route('/create/')
@admin_required
def master_create():
"""マスタデータ作成処理
一覧画面から「新規作成」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
"""
categoryList = getDtoList('master_combo')
comboList = getComCategoryList(categoryList)
form = MasterDataForm()
form.itemCategory.choices = [(i.key, i.value) for i in comboList]
cont = detailsCont(form)
return render_template('master_data_management/master-details.html', cont=cont)
@masterData.route('/<int:itemId>/edit/')
@admin_required
def master_edit(itemId):
"""マスタデータ修正処理
一覧画面からデータの「コード」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
:param itemId: 対象データのID
"""
dto = getDto(itemId)
if not dto:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('masterData.master_list'))
categoryList = getDtoList('master_combo')
form = MasterDataForm()
form.itemCategory.choices = [(i.item_cd, i.item_value) for i in categoryList]
form.itemId.data = dto.item_id
form.itemCategory.data = dto.item_category
form.itemCD.data = dto.item_cd
form.itemValue.data = dto.item_value
form.displayOrder.data = dto.display_order
form.isActive.data = dto.is_active
cont = detailsCont(form)
return render_template('master_data_management/master-details.html', cont=cont)
@masterData.route('/details/save/', methods=['POST'])
@admin_required
def master_save():
"""マスタデータ詳細画面登録処理
formのデータをDBに保存します。
処理終了後はマスタデータ一覧画面へ遷移します。
"""
categoryList = getDtoList('master_combo')
comboList = getComCategoryList(categoryList)
form = MasterDataForm()
form.itemCategory.choices = [(i.key, i.value) for i in comboList]
if form.validate_on_submit():
if form.itemId.data:
isUpdate = True
dto = getDto(form.itemId.data)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('masterData.master_list'))
else:
isUpdate = False
data = form.data
data['updateUser'] = current_user.user_id
data['isActive'] = bool(form.isActive.data)
try:
insertUpdateDto(data, isUpdate)
except Exception:
return redirect(url_for('masterData.master_list'))
if isUpdate:
flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS)
else:
flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS)
return redirect(url_for('masterData.master_list'))
for error in form.errors.values():
flash(error[0],Messages.DANGER_CSS)
cont = detailsCont(form)
return render_template('master_data_management/master-details.html', cont=cont)
@masterData.route('/details/<int:itemId>/delete/')
@admin_required
def master_delete(itemId):
"""マスタデータ詳細画面削除処理
当該データを物理削除します。
処理終了後はマスタデータ一覧画面へ遷移します。
:param itemId: 削除対象のIDです。
"""
dto = getDto(itemId)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('masterData.master_list'))
deleteDto(itemId)
flash(Messages.SUCCESS_DELETED, Messages.SUCCESS_CSS)
return redirect(url_for('masterData.master_list'))
```
#### File: ims/views/orderData.py
```python
from flask import Blueprint, flash, jsonify, request, redirect, render_template, url_for
from flask_login import current_user
from ims.common.ComboBoxUtil import getOrderComBoList
from ims.common.Messages import Messages
from ims.common.RoleUtil import general_manager_required
from ims.contents.orderDataCont import OrderDataList as orderListCont
from ims.contents.orderDataCont import Details as detailsCont
from ims.contents.orderDataCont import SubOrderDataList as subOrderListCont
from ims.form.orderDataForm import OrderDataForm, SubOrderDataForm
from ims.service.comServ import getComItemList
from ims.service.orderDataServ import getOrderList
from ims.service.orderDataServ import getSubOrderList
from ims.service.orderDataServ import getOrderDetails
from ims.service.orderDataServ import getSubOrderDetails
from ims.service.orderDataServ import insertUpdateOrder
from ims.service.orderDataServ import insertUpdateSubOrder
from ims.service.orderDataServ import deleteOrder
from ims.service.orderDataServ import deleteSubOrder
orderData = Blueprint('orderData', __name__)
@orderData.route('/order_list/')
@general_manager_required
def order_list():
"""件名大分類データ一覧の初期表示 GETのrequestを受付
当処理はhtmlテンプレート及び画面用コンテンツを返します。
"""
data = getOrderList(current_user.group_id)
cont = orderListCont(data)
return render_template('order_data_management/order-list.html', cont=cont)
@orderData.route('/order_create/')
@general_manager_required
def order_create():
"""件名大分類データ作成処理
件名大分類一覧画面から「新規作成」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
"""
clientList = getComItemList('client_cd')
form = OrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
cont = detailsCont(form)
return render_template('order_data_management/order-details.html', cont=cont)
@orderData.route('/order/<int:orderId>/edit/')
@general_manager_required
def order_edit(orderId):
"""件名大分類データ修正処理
一覧画面からデータの「コード」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
:param orderId: 選択された件名大分類データのID
"""
dto = getOrderDetails(orderId)
if not dto:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.order_list'))
clientList = getComItemList('client_cd')
form = OrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
form.orderId.data = dto.order_id
form.clientCd.data = dto.client_cd
form.orderCd.data = dto.order_cd
form.orderValue.data = dto.order_value
form.displayOrder.data = dto.display_order
form.isActive.data = dto.is_active
cont = detailsCont(form)
return render_template('order_data_management/order-details.html', cont=cont)
@orderData.route('/order_details/save/', methods=['POST'])
@general_manager_required
def order_save():
"""件名大分類データ詳細画面登録処理
formのデータをDBに保存します。
処理終了後はマスタデータ一覧画面へ遷移します。
"""
clientList = getComItemList('client_cd')
form = OrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
if form.validate_on_submit():
if form.orderId.data:
isUpdate = True
dto = getOrderDetails(form.orderId.data)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.order_list'))
else:
isUpdate = False
data = form.data
data['groupId'] = current_user.group_id
data['updateUser'] = current_user.user_id
data['isActive'] = bool(form.isActive.data)
try:
insertUpdateOrder(data, isUpdate)
except Exception:
return redirect(url_for('orderData.order_list'))
if isUpdate:
flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS)
else:
flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS)
return redirect(url_for('orderData.order_list'))
for error in form.errors.values():
flash(error[0],Messages.DANGER_CSS)
cont = detailsCont(form)
return render_template('order_data_management/order-details.html', cont=cont)
@orderData.route('/order_details/<int:orderId>/delete/')
@general_manager_required
def order_delete(orderId):
"""件名大分類データ詳細画面削除処理
当該データを物理削除します。
処理終了後は件名大分類データ一覧画面へ遷移します。
:param orderId: 削除対象のIDです。
"""
dto = getOrderDetails(orderId)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.order_list'))
deleteOrder(orderId)
flash(Messages.SUCCESS_DELETED, Messages.SUCCESS_CSS)
return redirect(url_for('orderData.order_list'))
@orderData.route('/sub_order_list/')
@general_manager_required
def sub_order_list():
"""件名小分類データ一覧の初期表示 GETのrequestを受付
当処理はhtmlテンプレート及び画面用コンテンツを返します。
"""
orderList = getOrderList(current_user.group_id)
comboList = getOrderComBoList(orderList)
if not comboList:
flash(Messages.NO_FOUND_ORDER, Messages.WARNING_CSS)
return redirect(url_for('orderData.order_list'))
cont = subOrderListCont(comboList)
return render_template('order_data_management/sub-order-list.html', cont=cont)
@orderData.route('/list/getData/', methods = ['POST'])
@general_manager_required
def sub_order_post_data():
"""件名小分類データ一覧表示用データ取得 POSTのrequestを受付
一覧画面から選択されたカテゴリーのデータを取得し、json形式でデータを返します。
:param orderCd: 選択された件名
"""
try:
orderCd = request.json['orderCd']
models = getSubOrderList(current_user.group_id, orderCd)
dataset = []
for model in models:
data = {}
data["subOrderId"] = model.orderId
data["clientName"] = model.clientName
data["subOrderCd"] = model.subOrderCd
data["subOrderValue"] = model.subOrderValue
data["displayOrder"] = model.displayOrder
data["isActive"] = model.isActive
dataset.append(data)
except:
pass
return jsonify(dataset)
@orderData.route('/sub_order_create/')
@general_manager_required
def sub_order_create():
"""件名小分類データ作成処理
件名小分類一覧画面から「新規作成」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
"""
clientList = getComItemList('client_cd')
orderList = getOrderList(current_user.group_id)
form = SubOrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
form.orderCd.choices = [(i.orderCd, i.orderValue) for i in orderList]
cont = detailsCont(form)
return render_template('order_data_management/sub-order-details.html', cont=cont)
@orderData.route('/sub_order/<int:subOrderId>/edit/')
@general_manager_required
def sub_order_edit(subOrderId):
"""件名小分類データ修正処理
一覧画面からデータの「コード」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
:param subOrderId: 選択された件名小分類データのID
"""
dto = getSubOrderDetails(subOrderId)
if not dto:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.sub_order_list'))
clientList = getComItemList('client_cd')
orderList = getOrderList(current_user.group_id)
form = SubOrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
form.orderCd.choices = [(i.orderCd, i.orderValue) for i in orderList]
form.subOrderId.data = dto.sub_order_id
form.clientCd.data = dto.client_cd
form.orderCd.data = dto.order_cd
form.subOrderCd.data = dto.sub_order_cd
form.subOrderValue.data = dto.sub_order_value
form.displayOrder.data = dto.display_order
form.isActive.data = dto.is_active
cont = detailsCont(form)
return render_template('order_data_management/sub-order-details.html', cont=cont)
@orderData.route('/sub_order_details/save/', methods=['POST'])
@general_manager_required
def sub_order_save():
"""件名小分類データ詳細画面登録処理
formのデータをDBに保存します。
処理終了後は件名小分類データ一覧画面へ遷移します。
"""
clientList = getComItemList('client_cd')
orderList = getOrderList(current_user.group_id)
form = SubOrderDataForm()
form.clientCd.choices = [(i.item_cd, i.item_value) for i in clientList]
form.orderCd.choices = [(i.orderCd, i.orderValue) for i in orderList]
if form.validate_on_submit():
if form.subOrderId.data:
isUpdate = True
dto = getSubOrderDetails(form.subOrderId.data)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.sub_order_list'))
else:
isUpdate = False
data = form.data
data['groupId'] = current_user.group_id
data['updateUser'] = current_user.user_id
data['isActive'] = bool(form.isActive.data)
try:
insertUpdateSubOrder(data, isUpdate)
except Exception:
return redirect(url_for('orderData.sub_order_list'))
if isUpdate:
flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS)
else:
flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS)
return redirect(url_for('orderData.sub_order_list'))
for error in form.errors.values():
flash(error[0],Messages.DANGER_CSS)
cont = detailsCont(form)
return render_template('order_data_management/sub-order-details.html', cont=cont)
@orderData.route('/sub_order_details/<int:subOrderId>/delete/')
@general_manager_required
def sub_order_delete(subOrderId):
"""件名小分類データ詳細画面削除処理
当該データを物理削除します。
処理終了後は件名小分類データ一覧画面へ遷移します。
:param subOrderId: 削除対象のIDです。
"""
dto = getSubOrderDetails(subOrderId)
if dto:
pass
else:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('orderData.sub_order_list'))
deleteSubOrder(subOrderId)
flash(Messages.SUCCESS_DELETED, Messages.SUCCESS_CSS)
return redirect(url_for('orderData.sub_order_list'))
```
#### File: ims/views/userManagement.py
```python
from flask import Blueprint, flash, redirect, render_template, url_for, session
from flask_login import login_required, current_user
from ims import bcrypt
from ims.common.Messages import Messages
from ims.common.RoleUtil import admin_required
from ims.service.comServ import getAllUserList, getComUser, getComItemList, insertUpdateComUser
from ims.contents.userCont import UserListCont as listCont
from ims.contents.userCont import UserDetailsCont as detailsCont
from ims.form.userForm import UserForm, MyPageForm
userManagement = Blueprint('userManagement', __name__)
@userManagement.route('/list/')
@admin_required
def user_list():
"""ユーザ一覧の初期表示 GETのrequestを受付
当処理はhtmlテンプレート及び画面用コンテンツを返します。
"""
userList = getAllUserList()
cont = listCont(userList)
return render_template('user_management/user-list.html', cont=cont)
@userManagement.route('/details/create')
@admin_required
def user_create():
"""ユーザー作成処理
一覧画面から「新規作成」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
"""
groupIdList = getComItemList('group_id')
form = UserForm()
form.groupId.choices = [(i.item_cd, i.item_value) for i in groupIdList]
cont = detailsCont(form)
return render_template('user_management/user-details.html', cont=cont)
@userManagement.route('/details/<string:userId>/edit')
@admin_required
def user_edit(userId):
"""ユーザー修正処理
一覧画面から「月日」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
:param userId: 修正対象データのID
"""
dto = getComUser(userId)
if not dto:
flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED,
Messages.WARNING_CSS)
return redirect(url_for('userManagement.user_list'))
groupIdList = getComItemList('group_id')
form = UserForm()
form.groupId.choices = [(i.item_cd, i.item_value) for i in groupIdList]
form.userId.data = dto.user_id
form.userName.data = dto.user_name
form.groupId.data = dto.group_id
form.role.data = dto.role
form.email.data = dto.email
cont = detailsCont(form)
return render_template('user_management/user-details.html', cont=cont)
@userManagement.route('/details/save/', methods=['POST'])
@admin_required
def user_save():
"""ユーザー情報詳細画面登録処理
formのデータをDBに保存します。
処理終了後はマスタユーザー一覧画面へ遷移します。
"""
groupIdList = getComItemList('group_id')
form = UserForm()
form.groupId.choices = [(i.item_cd, i.item_value) for i in groupIdList]
if form.validate_on_submit():
data = form.data
data['password'] = <PASSWORD>.generate_password_hash(form.password.data).decode(encoding='utf-8')
isUpdate = False
dto = getComUser(data['userId'])
if dto:
isUpdate = True
insertUpdateComUser(data, isUpdate)
if isUpdate:
flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS)
else:
flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS)
return redirect(url_for('userManagement.user_list'))
for error in form.errors.values():
flash(error[0], Messages.DANGER_CSS)
cont = detailsCont(form)
return render_template('user_management/user-details.html', cont=cont)
@userManagement.route('/mypage')
@login_required
def user_mypage():
"""マイアカウントページ処理
画面から「自分のユーザ名」を押下後、GETのrequestを受付します。
htmlテンプレート及び画面用コンテンツを返します。
"""
dto = getComUser(current_user.user_id)
form = MyPageForm()
form.userName.data = dto.user_name
form.email.data = dto.email
cont = detailsCont(form)
return render_template('user_management/mypage.html', cont=cont)
@userManagement.route('/mypage_save', methods=['POST'])
@login_required
def user_mypage_save():
"""マイアカウント登録処理
変更後の情報を保存します。
"""
dto = getComUser(current_user.user_id)
form = MyPageForm()
if form.validate_on_submit():
data = form.data
data['password'] = bcrypt.generate_password_hash(form.password.data).decode(encoding='utf-8')
data['userId'] = current_user.user_id
data['groupId'] = current_user.group_id
data['role'] = dto.role
insertUpdateComUser(data, True)
flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS)
return redirect(url_for('home.index'))
else:
for error in form.errors.values():
flash(error[0], Messages.DANGER_CSS)
cont = detailsCont(form)
return render_template('user_management/mypage.html', cont=cont)
```
#### File: 70ucanbin/IMS/init_script.py
```python
from flask import Flask
import click
from flask.cli import with_appcontext
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.schema import Sequence, CreateSequence
from flask_bcrypt import Bcrypt
from config import AppConfig as __Config
from datetime import datetime
app = Flask(__name__)
app.config.from_object(__Config)
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
class User(db.Model):
__tablename__ = 'com_user'
user_id = db.Column(db.String(20), primary_key=True)
group_id = db.Column(db.String(20))
role = db.Column(db.SMALLINT, nullable=False)
user_name = db.Column(db.String(20))
password = db.Column(db.String(150))
email = db.Column(db.String(50))
is_active = db.Column(db.Boolean, server_default=u'True')
update_user = db.Column(db.String(20))
update_date = db.Column(db.DateTime, default=datetime.now)
class ComItem(db.Model):
__tablename__ = 'com_item'
item_id = db.Column(db.Integer, db.Sequence('com_item_seq'), unique=True, nullable=False)
item_category = db.Column(db.String(20), primary_key=True)
item_cd = db.Column(db.String(20), primary_key=True)
item_value = db.Column(db.String(100))
display_order = db.Column(db.Integer)
is_active = db.Column(db.Boolean, server_default=u'True')
update_user = db.Column(db.String(20))
update_date = db.Column(db.DateTime, default=datetime.now)
class TraClientWork(db.Model):
__tablename__ = 'tra_client_work'
client_work_id = db.Column(db.Integer, db.Sequence('tra_client_work_seq'), primary_key=True)
user_id = db.Column(db.String(20), primary_key=True)
work_year = db.Column(db.SMALLINT , primary_key=True)
work_month = db.Column(db.SMALLINT , primary_key=True)
work_day = db.Column(db.SMALLINT)
rest_flg = db.Column(db.SMALLINT)
order_cd = db.Column(db.String(20))
task_cd = db.Column(db.String(20))
sub_order_cd = db.Column(db.String(20))
work_time = db.Column(db.Time)
note = db.Column(db.String(200))
class TraMonthlyReport(db.Model):
__tablename__ = 'tra_monthly_report'
user_id = db.Column(db.String(20), primary_key=True)
work_year = db.Column(db.Integer, primary_key=True)
work_month = db.Column(db.Integer, primary_key=True)
work_day = db.Column(db.Integer, primary_key=True)
rest_flg = db.Column(db.Integer)
work_details = db.Column(db.String(100))
start_work_time = db.Column(db.DateTime)
end_work_time = db.Column(db.DateTime)
normal_working_hours = db.Column(db.Numeric(4,2))
overtime_hours = db.Column(db.Numeric(4,2))
holiday_work_hours = db.Column(db.Numeric(4,2))
note = db.Column(db.String(200))
class TraTravelExpenses(db.Model):
__tablename__ = 'tra_travel_expenses'
travel_expenses_id = db.Column(db.Integer, db.Sequence('tra_travel_expenses_seq'), primary_key=True)
user_id = db.Column(db.String(20), nullable=False)
entry_year = db.Column(db.Integer, nullable=False)
entry_month = db.Column(db.Integer, nullable=False)
expense_date = db.Column(db.String(30))
expense_item = db.Column(db.String(50))
route = db.Column(db.String(50))
transit = db.Column(db.String(50))
payment = db.Column(db.Integer)
file_name = db.Column(db.String(100))
note = db.Column(db.String(200))
class TraOrder(db.Model):
__tablename__ = 'tra_order'
order_id = db.Column(db.Integer, db.Sequence('tra_order_seq'), unique=True, nullable=False)
client_cd = db.Column(db.String(20), primary_key=True)
group_id = db.Column(db.String(20), primary_key=True)
order_cd = db.Column(db.String(20), primary_key=True)
order_value = db.Column(db.String(100))
display_order = db.Column(db.Integer)
is_active = db.Column(db.Boolean, server_default=u'True')
update_user = db.Column(db.String(20))
update_date = db.Column(db.DateTime, default=datetime.now)
class TraSubOrder(db.Model):
__tablename__ = 'tra_sub_order'
sub_order_id = db.Column(db.Integer, db.Sequence('tra_sub_order_seq'), unique=True, nullable=False)
client_cd = db.Column(db.String(20), primary_key=True)
group_id = db.Column(db.String(20), primary_key=True)
order_cd = db.Column(db.String(20), primary_key=True)
sub_order_cd = db.Column(db.String(20), primary_key=True)
sub_order_value = db.Column(db.String(100))
display_order = db.Column(db.Integer)
is_active = db.Column(db.Boolean, server_default=u'True')
update_user = db.Column(db.String(20))
update_date = db.Column(db.DateTime, default=datetime.now)
@click.command(name='setup')
@with_appcontext
def setup():
db.drop_all()
db.create_all()
user = User()
user.user_id = 'admin'
user.user_name = 'Admin'
user.password = bcrypt.generate_password_hash('password').decode(encoding='utf-8')
user.group_id = 'manager'
user.role = 3
user.email = '<EMAIL>'
user.update_user = 'admin'
db.session.add(user)
master_combo1 = ComItem()
master_combo1.item_category = 'master_combo'
master_combo1.item_cd = 'group_id'
master_combo1.item_value = '所属部署'
master_combo1.display_order = 1
master_combo1.update_user = 'Admin'
db.session.add(master_combo1)
master_combo2 = ComItem()
master_combo2.item_category = 'master_combo'
master_combo2.item_cd = 'client_cd'
master_combo2.item_value = '客先'
master_combo2.display_order = 2
master_combo2.update_user = 'Admin'
db.session.add(master_combo2)
client_cd = ComItem()
client_cd.item_category = 'client_cd'
client_cd.item_cd = '999999999'
client_cd.item_value = '自社作業・応援'
client_cd.display_order = 1
client_cd.update_user = 'Admin'
db.session.add(client_cd)
group_id = ComItem()
group_id.item_category = 'group_id'
group_id.item_cd = 'manager'
group_id.item_value = '管理者グループ'
group_id.display_order = 1
group_id.update_user = 'Admin'
db.session.add(group_id)
order = TraOrder()
order.client_cd = '999999999'
order.group_id = 'manager'
order.order_cd = '999999999'
order.order_value = '自社作業'
order.display_order = 1
order.update_user = 'Admin'
db.session.add(order)
db.session.commit()
app.cli.add_command(setup)
``` |
{
"source": "70ucanbin/line_todo",
"score": 2
} |
#### File: line_todo/todo/__init__.py
```python
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import timedelta, datetime
from fastapi import FastAPI, Request, Body
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import MessageEvent, TextMessage, TextSendMessage
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import logging, json, uvicorn
from config import Settings, Messages
from .model import Task
from .util import time_conv, text_conv
app = FastAPI()
scheduler = BackgroundScheduler()
bot_name = Settings.bot_name
# configure logging
formatter = '%(levelname)s : %(asctime)s : %(message)s'
logging.basicConfig(filename=Settings.log_file_path, level=logging.DEBUG, format=formatter)
# configure database
engine = sqlalchemy.create_engine(Settings.db_path, echo=True)
# configure line api
line_bot_api = LineBotApi(Settings.channel_access_token)
handler = WebhookHandler(Settings.channel_secret)
# configure reply message
Invalid_formard = Messages.Invalid_formard
Accepted = Messages.Accepted
Task_done = Messages.Task_done
@app.on_event("startup")
async def startup_event():
logging.info("app startup")
@app.on_event("shutdown")
def shutdown_event():
logging.info("app shutdown")
@app.post("/")
def callback(Request: Request, body: dict = Body(None)):
# get X-Line-Signature header value
signature = Request.headers['X-Line-Signature']
# handle webhook body
body = json.dumps(body,ensure_ascii=False,separators=(',', ':'))
try:
handler.handle(body, signature)
except InvalidSignatureError:
logging.error("Invalid signature. Please check your channel access token/channel secret.")
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
if bot_name in event.message.text:
if event.message.type == 'text':
create_todo(event)
return 'OK'
def create_todo(event):
task = Task()
user_id = event.source.user_id
if event.source.type == 'group':
group_id = event.source.group_id
profile = line_bot_api.get_group_member_profile(group_id, user_id)
task.push_id = group_id
else:
profile = line_bot_api.get_profile(user_id)
task.push_id = user_id
try:
session = sessionmaker(bind=engine)()
_, to_user, time, *details, by_user = text_conv(event.message.text).split(',')
if details:
pass
else:
details, by_user = ''.join(by_user), profile.display_name
task.deadline = time_conv(time)
task.to_user = to_user
task.task_details = ''.join(details)
task.by_user = by_user
session.add(instance=task)
session.flush()
id : int = task.id
run_date: str = task.deadline.strftime('%Y-%m-%d %H:%M:%S')
session.commit()
handle_scheduler(id, run_date)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(Accepted)
)
except ValueError:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(Invalid_formard)
)
logging.error("valueError")
finally:
session.close()
def tick(id: int) -> None:
try:
session = sessionmaker(bind=engine)()
task = session.query(Task).filter(Task.id==id).first()
message = Task_done % (task.deadline + timedelta(hours=9), task.to_user, ''.join(task.task_details), task.by_user)
line_bot_api.push_message(task.push_id, TextSendMessage(text=message))
logging.info("task done: {}...".format(''.join(task.task_details[:10])))
task.is_active = False
session.commit()
except:
logging.error("databaseError")
finally:
session.close()
def handle_scheduler(id: int, run_date: str) -> None:
global scheduler
scheduler.pause()
scheduler.add_job(tick, 'date', run_date=run_date, args=[id])
scheduler.resume()
def main():
try:
global scheduler
session = sessionmaker(bind=engine)()
tasks = session.query(Task).filter_by(is_active=True).order_by(Task.deadline)
for task in tasks:
id : int = task.id
run_date: str = task.deadline.strftime('%Y-%m-%d %H:%M:%S')
if task.deadline > datetime.now():
scheduler.add_job(tick, 'date', run_date=run_date, args=[id])
else:
task.is_active = False
else:
session.commit()
session.close()
scheduler.start()
uvicorn.run(app, host="0.0.0.0", port=8080)
except:
logging.error("startup failed")
finally:
scheduler.shutdown()
``` |
{
"source": "711e/deep_learn_recommender",
"score": 2
} |
#### File: 711e/deep_learn_recommender/test.py
```python
import logging
import os
import pickle
import tensorflow as tf
from dataset import Dataset, decompression_feature
from inference import full_network
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
BATCH_SIZE = 256
DROPOUT_PROB = 1
def test(test_X, test_y, model_path):
user_id = tf.placeholder(tf.int32, [None, 1], name='user_id')
user_gender = tf.placeholder(tf.int32, [None, 1], name='user_gender')
user_age = tf.placeholder(tf.int32, [None, 1], name='user_age')
user_job = tf.placeholder(tf.int32, [None, 1], name='user_job')
movie_id = tf.placeholder(tf.int32, [None, 1], name='movie_id')
movie_genres = tf.placeholder(tf.float32, [None, 18], name='movie_categories')
movie_titles = tf.placeholder(tf.int32, [None, 15], name='movie_titles')
movie_title_length = tf.placeholder(tf.float32, [None], name='movie_title_length')
targets = tf.placeholder(tf.int32, [None, 1], name='targets')
dropout_keep_prob = tf.constant(DROPOUT_PROB, dtype=tf.float32, name='dropout_keep_prob')
_, _, predicted = full_network(user_id, user_gender, user_age, user_job, movie_id,
movie_genres, movie_titles, movie_title_length,
dropout_keep_prob)
with tf.name_scope('loss'):
# MSE损失,将计算值回归到评分
loss = tf.losses.mean_squared_error(targets, predicted)
loss_mae = tf.losses.absolute_difference(targets, predicted)
tf.summary.scalar('loss_mae', loss_mae)
# tf.summary.scalar('loss', loss)
dataset = Dataset(test_X.values, test_y.values)
batch_per_epcho = (len(test_X) + BATCH_SIZE - 1) // BATCH_SIZE
saver = tf.train.Saver()
summaries_merged = tf.summary.merge_all()
with tf.Session() as sess:
train_summary_dir = os.path.join('./data', 'summaries', 'test')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
cpkt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess, cpkt.model_checkpoint_path)
avg_loss = 0
for batch_i in range(batch_per_epcho):
Xs, ys = dataset.next_batch(BATCH_SIZE)
users, movies = decompression_feature(Xs)
feed = {
user_id: users.id,
user_gender: users.gender,
user_age: users.age,
user_job: users.job,
movie_id: movies.id,
movie_genres: movies.genres,
movie_titles: movies.titles,
movie_title_length: movies.title_length,
targets: ys}
# test_loss, summaries = sess.run([loss, summaries_merged], feed)
test_loss, summaries = sess.run([loss_mae, summaries_merged], feed)
train_summary_writer.add_summary(summaries, batch_i)
show_message = 'Batch {:>4}/{} test_loss = {:.3f}'.format(batch_i, batch_per_epcho, test_loss)
logging.info(show_message)
avg_loss = avg_loss + test_loss * len(users.id)
avg_loss = avg_loss / dataset.size
logging.info('Loss on test is {:.3f}'.format(avg_loss))
if __name__ == '__main__':
with open('./data/data.p', 'rb') as data:
_, _, test_X, test_y = pickle.load(data, encoding='utf-8')
test(test_X, test_y, './data/model')
``` |
{
"source": "7125messi/flask-web-",
"score": 3
} |
#### File: 7125messi/flask-web-/05_flask_return_json.py
```python
import json
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
def read_pvuv_data():
"""
read pv uv data
:return:list,ele = (date,pv,uv)
"""
data = []
with open('./data/pvuv.txt') as fo:
linenum = 0
for row in fo:
if linenum == 0:
linenum += 1
continue
date, pv, uv = row.strip().split("\t")
data.append((date, pv, uv))
return data
@app.route('/getjson')
def getjson():
# read file
data = read_pvuv_data()
# return json
return json.dumps(data)
if __name__ == '__main__':
app.run()
```
#### File: flask_model_settle_demo/flask_api/Flask_Model_Trainning.py
```python
import pandas as pd
import tensorflow as tf
import keras
from keras import models, layers
# 加载样本数据集,划分为x和y DataFrame
df = pd.read_csv("https://github.com/bgweber/Twitch/raw/master/Recommendations/games-expand.csv")
x = df.drop(['label'], axis=1)
y = df['label']
# 定义Keras模型
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10,)))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.1))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# 使用自定义度量函数
def auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
keras.backend.get_session().run(tf.local_variables_initializer())
return auc
# 编译并拟合模型
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=[auc]
)
history = model.fit(
x,
y,
epochs=100,
batch_size=100,
validation_split = .2,
verbose=0
)
# 以H5格式保存模型
model.save("games.h5")
``` |
{
"source": "7125messi/ner_action",
"score": 3
} |
#### File: ner_action/code/processing.py
```python
import pandas as pd
df = pd.read_excel('../data/index.xlsx',header=None)
df.columns = ['pitfall_c1','pitfall_c2','pitfall_c3','pitfall_c4','pitfall_c5','pitfall_name','pitfall_class','pitfall_nums_man','pitfall_prob','influence_class','season_class','pitfall_code']
print(df.head())
def dataProcessing(row):
if row['pitfall_name'] == row['pitfall_c1']:
ind = 1
elif row['pitfall_name'] == row['pitfall_c2']:
ind = 1
elif row['pitfall_name'] == row['pitfall_c3']:
ind = 1
elif row['pitfall_name'] == row['pitfall_c4']:
ind = 1
elif row['pitfall_name'] == row['pitfall_c5']:
ind = 1
else:
ind = 0
return ind
df['ind'] = df.apply(dataProcessing,axis=1)
print(len(df))
df_txt = df.loc[df['ind'] == 0,'pitfall_name']
print(len(df_txt))
df_txt.to_csv('../data/result.txt',sep='\t',index=False)
``` |
{
"source": "7125messi/nlp",
"score": 2
} |
#### File: code/module/data_prepare.py
```python
import os
import time
import datetime
import random
import json
from collections import Counter
from math import sqrt
import gensim
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import logging
from gensim.models import word2vec
import multiprocessing
import yaml
import jieba
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import tensorflow as tf
from tensorflow.keras.layers import GlobalAveragePooling1D
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import GRU
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Flatten
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
class Dataset(object):
def __init__(self, config):
self.dataSource = config.dataSource
self.stopWordSource = config.stopWordSource
# 每条输入的序列处理为定长
self.sequenceLength = config.sequenceLength
self.embeddingSize = config.embeddingSize
self.batchSize = config.batchSize
self.rate = config.rate
self.miniFreq = config.miniFreq
self.stopWordDict = {}
self.trainReviews = []
self.trainLabels = []
self.evalReviews = []
self.evalLabels = []
self.wordEmbedding = None
self.n_symbols = 0
self.wordToIndex = {}
self.indexToWord = {}
def readData(self, filePath):
with open(filePath, mode='r', encoding='utf-8') as f:
text = []
label = []
for line in f:
temp = line.replace('\n', '').split(',,')
text.append(temp[0])
label.append(temp[1])
print('data:the text number is {},the label number is {}'.format(len(text), len(label)))
texts = [jieba.lcut(document.replace('\n', '')) for document in text]
return texts, label
def readStopWord(self, stopWordPath):
"""
读取停用词
"""
with open(stopWordPath, mode='r', encoding='utf-8') as f:
stopWordList = f.read().splitlines()
# 将停用词用列表的形式生成,之后查找停用词时会比较快
self.stopWordDict = dict(zip(stopWordList, list(range(len(stopWordList)))))
def getWordEmbedding(self, words):
"""
按照我们的数据集中的单词取出预训练好的word2vec中的词向量
"""
# 中文
model = gensim.models.Word2Vec.load('../data/word2VecModel')
vocab = []
wordEmbedding = []
# 添加 "pad" 和 "UNK",
vocab.append("pad")
wordEmbedding.append(np.zeros(self.embeddingSize))
vocab.append("UNK")
wordEmbedding.append(np.random.randn(self.embeddingSize))
for word in words:
try:
# 中文
vector = model[word]
vocab.append(word)
wordEmbedding.append(vector)
except:
print(word + " : 不存在于词向量中")
return vocab, np.array(wordEmbedding)
def genVocabulary(self, reviews):
"""
生成词向量和词汇-索引映射字典,可以用全数据集
"""
allWords = [word for review in reviews for word in review]
# 去掉停用词
subWords = [word for word in allWords if word not in self.stopWordDict]
wordCount = Counter(subWords) # 统计词频,排序
sortWordCount = sorted(wordCount.items(), key=lambda x: x[1], reverse=True)
# 去除低频词
words = [item[0] for item in sortWordCount if item[1] >= self.miniFreq]
# 获取词列表和顺序对应的预训练权重矩阵
vocab, wordEmbedding = self.getWordEmbedding(words)
self.wordEmbedding = wordEmbedding
self.wordToIndex = dict(zip(vocab, list(range(len(vocab)))))
self.indexToWord = dict(zip(list(range(len(vocab))), vocab))
self.n_symbols = len(self.wordToIndex) + 1
# 将词汇-索引映射表保存为json数据,之后做inference时直接加载来处理数据
with open('../data/wordJson/wordToIndex.json', "w", encoding="utf-8") as f:
json.dump(self.wordToIndex, f)
with open('../data/wordJson/indexToWord.json', "w", encoding="utf-8") as f:
json.dump(self.indexToWord, f)
def reviewProcess(self, review, sequenceLength, wordToIndex):
"""
将数据集中的每条评论里面的词,根据词表,映射为index表示
每条评论 用index组成的定长数组来表示
"""
reviewVec = np.zeros((sequenceLength))
sequenceLen = sequenceLength
# 判断当前的序列是否小于定义的固定序列长度
if len(review) < sequenceLength:
sequenceLen = len(review)
for i in range(sequenceLen):
if review[i] in wordToIndex:
reviewVec[i] = wordToIndex[review[i]]
else:
reviewVec[i] = wordToIndex["UNK"]
return reviewVec
def genTrainEvalData(self, x, y, rate):
"""
生成训练集和验证集
"""
reviews = []
labels = []
# 遍历所有的文本,将文本中的词转换成index表示
for i in range(len(x)):
reviewVec = self.reviewProcess(x[i], self.sequenceLength, self.wordToIndex)
reviews.append(reviewVec)
labels.append([y[i]])
trainIndex = int(len(x) * rate)
trainReviews = sequence.pad_sequences(reviews[:trainIndex], maxlen=self.sequenceLength)
trainReviews = np.asarray(reviews[:trainIndex], dtype="int64")
trainLabels = np.array(labels[:trainIndex], dtype="float32")
trainLabels = to_categorical(trainLabels, num_classes=2)
evalReviews = sequence.pad_sequences(reviews[trainIndex:], maxlen=self.sequenceLength)
evalReviews = np.asarray(reviews[trainIndex:], dtype="int64")
evalLabels = np.array(labels[trainIndex:], dtype="float32")
evalLabels = to_categorical(evalLabels, num_classes=2)
return trainReviews, trainLabels, evalReviews, evalLabels
def dataGen(self):
"""
初始化训练集和验证集
"""
# 读取停用词
self.readStopWord(self.stopWordSource)
# 读取数据集
reviews, labels = self.readData(self.dataSource)
# 分词、去停用词
# 生成 词汇-索引 映射表和预训练权重矩阵,并保存
self.genVocabulary(reviews)
# 初始化训练集和测试集
trainReviews, trainLabels, evalReviews, evalLabels = self.genTrainEvalData(reviews, labels, self.rate)
self.trainReviews = trainReviews
self.trainLabels = trainLabels
self.evalReviews = evalReviews
self.evalLabels = evalLabels
``` |
{
"source": "7125messi/user_profile_case",
"score": 2
} |
#### File: user_profile_case/code/middletable_build.py
```python
import os
import sys
import time
import datetime
import logging
from pyspark.sql import SparkSession
from pyspark import SparkConf, SparkContext
ONE_DAY = datetime.timedelta(days=1)
class Logger(object):
"""docstring for Logger"""
def __init__(self, path, clevel=logging.DEBUG, flevel=logging.DEBUG):
super(Logger, self).__init__()
self.logger = logging.getLogger(path)
self.logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S')
# 设置CMD日志
sh = logging.StreamHandler()
sh.setFormatter(fmt)
sh.setLevel(clevel)
# 设置文件日志
fh = logging.FileHandler(path)
fh.setFormatter(fmt)
fh.setLevel(flevel)
self.logger.addHandler(sh)
self.logger.addHandler(fh)
def debug(self, msg):
self.logger.debug(msg)
def info(self, msg):
self.logger.info(msg)
def war(self, msg):
self.logger.warn(msg)
def error(self, msg):
self.logger.error(msg)
def cri(self, msg):
self.logger.critical(msg)
def exec_time_utils(start_exec_time):
last_time = time.time() - start_exec_time
m, s = divmod(last_time, 60)
h, m = divmod(m, 60)
return "Elapse Times: %02d:%02d:%02d" % (h, m, s)
def get_before_day_str(olddate, format="%Y%m%d", days=1):
if isinstance(olddate, str):
olddate = datetime.datetime.strptime(olddate, format)
elif isinstance(olddate, datetime.datetime):
before_date = olddate - datetime.timedelta(days=days)
return before_date.strftime("%Y%m%d")
def buid_query_sql(data_date):
'''
build indices and get sql
--------
data_date: str
eg: '20180801',通过时间按日拼接sql ,获取日分区数据
return
--------
cookie_dau_goods_rela_sql: str
cookie_dau_goods_relation_event 中间表指标查询sql
goods_detail_duration_sql: str
ods_page_view_log 日志表获取 goods detail 访问时长查询sql
goods_paid_order_sql: str
cookie_order 中间表获取下单次数、购买次数查询sql
goods_click_gd_event_sql: str
ods_event_log 事件日志表获取 goods_click 和其它 goods detail页面事件指标查询sql
'''
# 获取中间表指标 cookie_dau_goods_relation_event
cookie_dau_goods_rela_sql = "\
select\
cookie_id as cookieid\
,goods_id as goodsid\
,site_id as siteid\
,visit_cnt as visit_cnt\
,cart_cnt as cart_cnt\
,fav_cnt as fav_cnt\
,share_cnt as share_cnt\
,checkout_cnt as checkout_cnt\
,pay_type_cnt as pay_type_cnt\
,signed_cnt as signed_cnt\
,reg_cnt as reg_cnt\
,trytopay_cnt as trytopay_cnt\
,0 as gd_visit_duration\
,0 as paid_cnt\
,0 as order_cnt\
,0 as goods_impression_cnt\
,0 as goods_click_cnt\
,0 as gd_reviews_click_cnt\
,0 as gd_reviews_more_click_cnt\
,0 as gd_details_click_cnt\
,0 as gd_select_click_cnt\
,0 as gd_share_click_cnt\
,0 as gd_shopgoods_click_cnt\
,0 as cart_click_cnt\
,0 as cart_submit_click_cnt\
,0 as fav_recommend_cnt\
,0 as fav_main_cnt\
,0 as sizeguide_click_cnt\
,0 as gd_shop_tips_click_cnt\
,0 as gd_brand_click_cnt\
,0 as gd_brand_recommend_cnt\
,0 as gd_coupon_click_cnt\
,0 as gd_coupon_receive_cnt\
from dw.cookie_dau_goods_relation_event\
where data_date = '"+ data_date +"'\
and site_id not in(400, 700)\
and goods_id > 0\
and regexp_replace(cookie_id, '[a-zA-Z0-9\-]', '') = ''\
"
# pageview 表获取详情页访问时长 ods_page_view_log
goods_detail_duration_sql = "\
select\
a.cookieid as cookieid\
,b.gid as goodsid\
,a.siteid as siteid\
,0 as visit_cnt\
,0 as cart_cnt\
,0 as fav_cnt\
,0 as share_cnt\
,0 as checkout_cnt\
,0 as pay_type_cnt\
,0 as signed_cnt\
,0 as reg_cnt\
,0 as trytopay_cnt\
,sum(a.pageview_duration) as gd_visit_duration\
,0 as paid_cnt\
,0 as order_cnt\
,0 as goods_impression_cnt\
,0 as goods_click_cnt\
,0 as gd_reviews_click_cnt\
,0 as gd_reviews_more_click_cnt\
,0 as gd_details_click_cnt\
,0 as gd_select_click_cnt\
,0 as gd_share_click_cnt\
,0 as gd_shopgoods_click_cnt\
,0 as cart_click_cnt\
,0 as cart_submit_click_cnt\
,0 as fav_recommend_cnt\
,0 as fav_main_cnt\
,0 as sizeguide_click_cnt\
,0 as gd_shop_tips_click_cnt\
,0 as gd_brand_click_cnt\
,0 as gd_brand_recommend_cnt\
,0 as gd_coupon_click_cnt\
,0 as gd_coupon_receive_cnt\
from\
(\
select cookieid\
,siteid\
,get_json_object(segment, '$.pvid2') as pvid2\
,max(pageview_duration) as pageview_duration\
from ods.ods_page_view_log\
where data_date = '"+ data_date +"'\
and pagename = 'GoodsDetail'\
and siteid not in(300, 400)\
and regexp_replace(cookieid, '[a-zA-Z0-9\-]', '') = ''\
and get_json_object(segment, '$.pvid2') is not null\
group by cookieid, siteid, get_json_object(segment, '$.pvid2')\
) a\
inner join\
(\
select\
cookieid\
,siteid\
,pvid2\
,gid\
from\
(\
select cookieid\
,siteid\
,get_json_object(segment, '$.pvid2') as pvid2\
,get_json_object(segment, '$.gid') as gid\
,row_number() over(partition by cookieid, siteid, get_json_object(segment, '$.pvid2') order by servertime desc) as desc_no\
from ods.ods_page_view_log\
where data_date = '"+ data_date +"'\
and pagename = 'GoodsDetail'\
and siteid not in(400, 700)\
and regexp_replace(cookieid, '[a-zA-Z0-9\-]', '') = ''\
and get_json_object(segment, '$.pvid2') is not null\
and get_json_object(segment, '$.gid') is not null\
) p\
where desc_no = 1\
) b\
on a.cookieid = b.cookieid and a.pvid2 = b.pvid2 and a.siteid = b.siteid\
where b.gid > 0\
group by a.cookieid, b.gid, a.siteid\
"
# 中间表获取下单和购买指标 cookie_order
goods_paid_order_sql = "\
select\
cookieid as cookieid\
,goodsid as goodsid\
,site_id as siteid\
,0 as visit_cnt\
,0 as cart_cnt\
,0 as fav_cnt\
,0 as share_cnt\
,0 as checkout_cnt\
,0 as pay_type_cnt\
,0 as signed_cnt\
,0 as reg_cnt\
,0 as trytopay_cnt\
,0 as gd_visit_duration\
,sum(is_paid) as paid_cnt\
,sum(is_order) as order_cnt\
,0 as goods_impression_cnt\
,0 as goods_click_cnt\
,0 as gd_reviews_click_cnt\
,0 as gd_reviews_more_click_cnt\
,0 as gd_details_click_cnt\
,0 as gd_select_click_cnt\
,0 as gd_share_click_cnt\
,0 as gd_shopgoods_click_cnt\
,0 as cart_click_cnt\
,0 as cart_submit_click_cnt\
,0 as fav_recommend_cnt\
,0 as fav_main_cnt\
,0 as sizeguide_click_cnt\
,0 as gd_shop_tips_click_cnt\
,0 as gd_brand_click_cnt\
,0 as gd_brand_recommend_cnt\
,0 as gd_coupon_click_cnt\
,0 as gd_coupon_receive_cnt\
from dw.cookie_order\
where data_date ='"+ data_date +"'\
and site_id not in(400, 700)\
and goodsid > 0\
and regexp_replace(cookieid, '[a-zA-Z0-9\-]', '') = ''\
group by cookieid\
,goodsid\
,site_id\
"
# 中间表获取goods 曝光指标 dw_cookie_dau_goods_relation_imp
goods_imp_sql = "\
select\
cookie_id as cookieid\
,goods_id as goodsid\
,site_id as siteid\
,0 as visit_cnt\
,0 as cart_cnt\
,0 as fav_cnt\
,0 as share_cnt\
,0 as checkout_cnt\
,0 as pay_type_cnt\
,0 as signed_cnt\
,0 as reg_cnt\
,0 as trytopay_cnt\
,0 as gd_visit_duration\
,0 as paid_cnt\
,0 as order_cnt\
,imp_cnt as goods_impression_cnt\
,0 as goods_click_cnt\
,0 as gd_reviews_click_cnt\
,0 as gd_reviews_more_click_cnt\
,0 as gd_details_click_cnt\
,0 as gd_select_click_cnt\
,0 as gd_share_click_cnt\
,0 as gd_shopgoods_click_cnt\
,0 as cart_click_cnt\
,0 as cart_submit_click_cnt\
,0 as fav_recommend_cnt\
,0 as fav_main_cnt\
,0 as sizeguide_click_cnt\
,0 as gd_shop_tips_click_cnt\
,0 as gd_brand_click_cnt\
,0 as gd_brand_recommend_cnt\
,0 as gd_coupon_click_cnt\
,0 as gd_coupon_receive_cnt\
from dw.dw_cookie_dau_goods_relation_imp\
where data_date = '"+ data_date +"'\
and site_id not in(400, 700)\
and goods_id > 0\
and regexp_replace(cookie_id, '[a-zA-Z0-9\-]', '') = ''\
"
# 事件日志表获取 goods_click 和其它详情页事件指标 ods_event_log
goods_click_gd_event_sql = "\
select\
cookieid\
,goodsid\
,siteid\
,0 as visit_cnt\
,0 as cart_cnt\
,0 as fav_cnt\
,0 as share_cnt\
,0 as checkout_cnt\
,0 as pay_type_cnt\
,0 as signed_cnt\
,0 as reg_cnt\
,0 as trytopay_cnt\
,0 as gd_visit_duration\
,0 as paid_cnt\
,0 as order_cnt\
,0 as goods_impression_cnt\
,sum(goods_click) as goods_click_cnt\
,sum(gd_reviews_click) as gd_reviews_click_cnt\
,sum(gd_reviews_more_click) as gd_reviews_more_click_cnt\
,sum(gd_details_click) as gd_details_click_cnt\
,sum(gd_select_click) as gd_select_click_cnt\
,sum(gd_share_click) as gd_share_click_cnt\
,sum(gd_shopgoods_click) as gd_shopgoods_click_cnt\
,sum(cart_click) as cart_click_cnt\
,sum(cart_submit_click) as cart_submit_click_cnt\
,sum(fav_recommend) as fav_recommend_cnt\
,sum(fav_main) as fav_main_cnt\
,sum(sizeguide_click) as sizeguide_click_cnt\
,sum(gd_shopping_tips_click) as gd_shop_tips_click_cnt\
,sum(gd_brand_click) as gd_brand_click_cnt\
,sum(gd_brandgoods_click) as gd_brand_recommend_cnt\
,sum(gd_coupon_click) as gd_coupon_click_cnt\
,sum(gd_coupon_receive) as gd_coupon_receive_cnt\
from\
(\
select\
cookieid\
,goodsid\
,siteid\
,case when lower(eventkey)='goods_click' then 1 else 0 end as goods_click\
,case when lower(eventkey)='goodsdetail_reviews_click' then 1 else 0 end as gd_reviews_click\
,case when lower(eventkey)='goodsdetail_reviews_more_click' then 1 else 0 end as gd_reviews_more_click\
,case when lower(eventkey)='goodsdetail_details_click' then 1 else 0 end as gd_details_click\
,case when lower(eventkey)='goodsdetail_select_click' then 1 else 0 end as gd_select_click\
,case when lower(eventkey)='goodsdetail_share_click' then 1 else 0 end as gd_share_click\
,case when lower(eventkey)='goodsdetail_shopgoods_click' then 1 else 0 end as gd_shopgoods_click\
,case when lower(eventkey)='addtobag_click' then 1 else 0 end as cart_click\
,case when lower(eventkey)='addtobag_submit_click' then 1 else 0 end as cart_submit_click\
,case when lower(eventkey)='wishlist_add' and lable='1' then 1 else 0 end as fav_recommend\
,case when lower(eventkey)='wishlist_add' and lable='1' then 1 else 0 end as fav_main\
,case when lower(eventkey)='addtobag_sizeguide_click' then 1 else 0 end as sizeguide_click\
,case when lower(eventkey)='goodsdetail_shopping_tips_click' then 1 else 0 end as gd_shopping_tips_click\
,case when lower(eventkey)='goodsdetail_brand_click' then 1 else 0 end as gd_brand_click\
,case when lower(eventkey)='goodsdetail_brandgoods_click' then 1 else 0 end as gd_brandgoods_click\
,case when lower(eventkey)='goodsdetail_coupon_click' then 1 else 0 end as gd_coupon_click\
,case when lower(eventkey)='goodsdetail_coupon_receive_result' and result='1' then 1 else 0 end as gd_coupon_receive\
from ods.ods_event_log\
where data_date = '"+ data_date +"'\
and siteid not in(400, 700)\
and goodsid > 0\
and regexp_replace(cookieid, '[a-zA-Z0-9\-]', '') = ''\
and lower(eventkey) in(\
'goods_click'\
,'goodsdetail_reviews_click'\
,'goodsdetail_reviews_more_click'\
,'goodsdetail_details_click'\
,'goodsdetail_select_click'\
,'goodsdetail_share_click'\
,'goodsdetail_shopgoods_click'\
,'addtobag_click'\
,'addtobag_submit_click'\
,'wishlist_add'\
,'addtobag_sizeguide_click' \
,'goodsdetail_shopping_tips_click' \
,'goodsdetail_brand_click' \
,'goodsdetail_brandgoods_click' \
,'goodsdetail_coupon_click' \
,'goodsdetail_coupon_receive_result')\
) p\
group by cookieid\
,goodsid\
,siteid\
"
return cookie_dau_goods_rela_sql, goods_detail_duration_sql, goods_paid_order_sql, goods_imp_sql, goods_click_gd_event_sql
def main():
if 1 == len(sys.argv):
today = datetime.datetime.today()
yesterday_str = get_before_day_str(today)
start_date_in = yesterday_str
end_date_in = start_date_in
elif 2 == len(sys.argv):
start_date_in = sys.argv[1]
end_date_in = start_date_in
elif 3 == len(sys.argv):
start_date_in = sys.argv[1]
end_date_in = sys.argv[2]
else:
print "Illegal Parameters!"
return
# 标记开始加工时间,统计耗时
start_time = time.time()
start_date_str = str(start_date_in)
end_date_str = str(end_date_in)
start_date = datetime.datetime.strptime(start_date_str, "%Y%m%d")
end_date = datetime.datetime.strptime(end_date_str, "%Y%m%d")
date_timedelta = end_date - start_date
epochs = date_timedelta.days+1
#logger.info('time range:'+start_date_str+'to'+end_date_str+' cycle:'+str(epochs))
# 循环处理
spark = SparkSession.builder.appName('userprofile_features_build').enableHiveSupport().getOrCreate()
partition_date_str = start_date_str
partition_date = start_date
for epoch in range(epochs):
epoch_start_time = time.time()
# 传入日期参数,返回待执行的str
cookie_dau_goods_rela_sql, goods_detail_duration_sql, goods_paid_order_sql, goods_imp_sql, goods_click_gd_event_sql = buid_query_sql(partition_date_str)
df_cookie_dau_goods_rela = spark.sql(cookie_dau_goods_rela_sql)
df_goods_detail_duration = spark.sql(goods_detail_duration_sql)
df_goods_paid_order_duration = spark.sql(goods_paid_order_sql)
df_goods_imp = spark.sql(goods_imp_sql)
df_goods_click_gd_event = spark.sql(goods_click_gd_event_sql)
# 将用户各维度的行为特征union all起来
df_cookie_goods_features = df_cookie_dau_goods_rela.unionAll(df_goods_detail_duration).unionAll(df_goods_paid_order_duration).unionAll(df_goods_imp).unionAll(df_goods_click_gd_event)
df_cookie_goods_features_sum = df_cookie_goods_features.groupBy('cookieid', 'goodsid', 'siteid').sum(*df_cookie_goods_features.columns[3:])
# 创建临时视图
df_cookie_goods_features_sum.createOrReplaceTempView('tmp_view_df_cookie_goods_features_sum')
# 插入目标分区表中
insert_sql = "\
insert overwrite table dw.dw_cookie_goods_log_day partition(data_date='"+ partition_date_str +"')\
select * from tmp_view_df_cookie_goods_features_sum\
"
spark.sql(insert_sql)
partition_elapse = exec_time_utils(epoch_start_time)
#logger.info(partition_date_str+'daily data finished,'+partition_elapse)
# 处理循环变量
partition_date = datetime.datetime.strptime(partition_date_str, "%Y%m%d")+ONE_DAY
partition_date_str = partition_date.strftime("%Y%m%d")
all_elapse = exec_time_utils(start_time)
if __name__ == '__main__':
main()
```
#### File: user_profile_case/code/userprofile_userid_paidinfo.py
```python
from pyspark import SparkContext,SparkConf
from pyspark.sql import SparkSession
import sys
import datetime
def main():
start_date = sys.argv[1]
start_date_str = str(start_date)
format_1 = "%Y%m%d"
format_2 = "%Y-%m-%d"
strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime
old_date_partition = strftime(strptime(start_date_str, format_1), format_2)
target_table = 'dw.profile_tag_user'
# 累计购买金额
insert_all_paid_money = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_all_paid_money') \
select 'A220U083_001' as tagid, \
user_id as userid, \
sum(order_total_amount) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dw.dw_order_fact \
where pay_status in (1,3) \
group by 'A220U083_001',user_id "
# 累计购买次数
insert_all_paid_times = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_all_paid_times') \
select 'A220U087_001' as tagid, \
user_id as userid, \
count(distinct order_id) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dw.dw_order_fact \
where pay_status in (1,3) \
group by 'A220U087_001',user_id "
# 最近一次购买距今天数
insert_last_paid_days = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_last_paid') \
select 'A220U084_001' as tagid, \
t.user_id as userid, \
datediff(to_date("+"'"+old_date_partition+"'"+"),concat(substr(t.result_pay_time,1,10))) as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from ( \
select user_id, \
result_pay_time, \
row_number() over(partition by user_id order by result_pay_time desc) as rank \
from dw.dw_order_fact \
where pay_status in (1,3) \
) t \
where t.rank =1 \
group by 'A220U084_001',t.user_id, \
datediff(to_date("+"'"+old_date_partition+"'"+"),concat(substr(t.result_pay_time,1,10)))"
# 注册未购买
regist_notpaid = " insert overwrite table " + target_table + " partition(data_date="+"'"+start_date_str+"'"+",tagtype='userid_regist_notpaid') \
select 'A220U088_001' as tagid, \
user_id as userid, \
'' as tagweight, \
'' as tagranking, \
'' as reserve, \
'' as reserve1 \
from dim.dim_user_info \
where data_date = "+"'"+start_date_str+"'"+" \
and (paid_order_amount = 0 or paid_order_amount is null ) \
group by 'A220U088_001', user_id "
spark = SparkSession.builder.appName("userid_paidinfo").enableHiveSupport().getOrCreate()
spark.sql(insert_all_paid_money)
spark.sql(insert_all_paid_times)
spark.sql(insert_last_paid_days)
spark.sql(regist_notpaid)
if __name__ == '__main__':
main()
``` |
{
"source": "7131HDMC/Pioneiras",
"score": 3
} |
#### File: database/script/currentData.py
```python
import json
class CurrentData:
data_file = '../database_pioneiras.js'
def getData(self):
with open(self.data_file) as arq:
arq_str = arq.read() #return file as string
arq_str = arq_str.replace("\'","\"")
arq_str = self.removeJS(arq_str)
pioneers = json.loads(arq_str)
return pioneers
"""
file is a string
"""
def removeJS(self,file):
#remove all before the first '{'
key = file.find('{')
rest = file[:key]
file = file.replace(rest,' ')
#remove all after ';', with it
key = file.find(';')
rest = file[key:]
file = file.replace(rest,' ')
file = file.replace("require(\"","\"<")
file = file.replace("\"),", ">\",\n")
return file
def format(self,file):
file = file.replace("{", "{ \n")
file = file.replace("\"require","require")
file = file.replace(")\"", ")")
return file
"""
file is a string
"""
def putJS(self,file):
file = file.replace("\'<","require(\"")
file = file.replace( ">\'" ,"\")")
#key = file.find('{')
#rest = file[:key]
file = 'var res = ' + file
file = file + '; \n export default res;'
file = self.format(file)
#file = file.replace("\'","\"")
return file
``` |
{
"source": "7134g/mySpiderAll",
"score": 3
} |
#### File: common/async_using/async_for.py
```python
import asyncio
class AsyncIteratorWrapper:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
async def run_for(string):
async for letter in AsyncIteratorWrapper(string):
print(letter)
def main():
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(each) for each in [run_for("abcd"), ]]
loop.run_until_complete(asyncio.wait(tasks))
if __name__ == '__main__':
main()
```
#### File: common/design_patterns/Modele.py
```python
class User:
def __init__(self, name, shop, times, number):
self.name = name
self.shop = shop
self.times = times
self.number = number
class Handle:
def __init__(self, user=None):
self.user = user
def invoicen(self):
"""打印小票"""
string = "打印小票" \
"客户:{}" \
"商品:{}" \
"数量:{}" \
"时间:{}".format(self.user.code, self.user.shop, self.user.number, self.user.times)
print(string)
def make(self):
"""开始制作"""
print("制作完成:{} 数量:{}".format(self.user.shop, self.user.number))
def run(self):
self.invoicen()
self.make()
if __name__ == '__main__':
test = Handle()
xiaoming = User("小明", "汉堡", "17:50", "5")
test.user = xiaoming
test.run()
xiaohong = User("小红", "北京卷", "18:00", "2")
test.user = xiaohong
test.run()
```
#### File: common/design_patterns/Status.py
```python
class Base:
"""当状态是CPU使用率,在不同状态下的自动化运维脚本执行不同的操作"""
def executor(self, value):
self.run(value)
def run(self, value):
pass
class Low(Base):
def __init__(self):
self.name = "较低占用率状态"
def run(self, value):
print("当前:{} 值:{}".format(self.name, value))
print("无应急情况执行")
class Large(Base):
def __init__(self):
self.name = "较高占用率状态"
def run(self, value):
print("当前:{} 值:{}".format(self.name, value))
print("发送警报邮件")
class Statu:
def __init__(self):
self.value = 0.1
self.low = Low()
self.large = Large()
self.ststu = None
def monitor(self):
if self.value <0.5:
self.ststu = self.low
else:
self.ststu = self.large
self.ststu.executor(self.value)
if __name__ == '__main__':
test = Statu()
test.monitor()
test.value = 0.9
test.monitor()
```
#### File: common/design_patterns/Visitor.py
```python
class Finance:
"""财务数据结构类"""
def __init__(self):
self.salesvolume = None # 销售额
self.cost = None # 成本
self.history_salesvolume = None # 历史销售额
self.history_cost = None # 历史成本
def set_salesvolume(self, value):
self.salesvolume = value
def set_cost(self, value):
self.cost = value
def set_history_salesvolume(self, value):
self.history_salesvolume = value
def set_history_cost(self, value):
self.history_cost = value
def accept(self, visitor):
pass
class Finance_year(Finance):
"""2018年财务数据类"""
def __init__(self, year):
Finance.__init__(self)
self.work = [] # 安排工作人员列表
self.year = year
def add_work(self, work):
self.work.append(work)
def accept(self):
for obj in self.work:
obj.visit(self)
class Accounting:
"""会计类"""
def __init__(self):
self.ID = "会计"
self.Duty = "计算报表"
def visit(self, table):
print('会计年度: {}'.format(table.year))
print("我的身份是: {} 职责: {}".format(self.ID, self.Duty))
print('本年度纯利润: {}'.format(table.salesvolume - table.cost))
print('------------------')
class Audit:
"""财务总监类"""
def __init__(self):
self.ID = "财务总监"
self.Duty = "分析业绩"
def visit(self, table):
print('会计总监年度: {}'.format(table.year))
print("我的身份是: {} 职责: {}".format(self.ID, self.Duty))
if table.salesvolume - table.cost > table.history_salesvolume - table.history_cost:
msg = "较同期上涨"
else:
msg = "较同期下跌"
print('本年度公司业绩: {}'.format(msg))
print('------------------')
class Adviser:
"""战略顾问"""
def __init__(self):
self.ID = "战略顾问"
self.Duty = "制定明年战略"
def visit(self, table):
print('战略顾问年度: {}'.format(table.year))
print("我的身份是: {} 职责: {}".format(self.ID, self.Duty))
if table.salesvolume > table.history_salesvolume:
msg = "行业上行,扩大生产规模"
else:
msg = "行业下行,减小生产规模"
print('本年度公司业绩: {}'.format(msg))
print('------------------')
class Work:
"""工作类"""
def __init__(self):
self.works = [] # 需要处理的年度数据列表
def add_work(self, obj):
self.works.append(obj)
def remove_work(self, obj):
self.works.remove(obj)
def visit(self):
for obj in self.works:
obj.accept()
if __name__ == '__main__':
work = Work() # 计划安排财务、总监、顾问对2018年数据处理
# 实例化2018年数据结构
finance_2018 = Finance_year(2018)
finance_2018.set_salesvolume(200)
finance_2018.set_cost(100)
finance_2018.set_history_salesvolume(180)
finance_2018.set_history_cost(90)
accounting = Accounting() # 实例化会计
audit = Audit() # 实例化总监
adviser = Adviser() # 实例化顾问
finance_2018.add_work(accounting) # 会计安排到2018分析日程中
finance_2018.add_work(audit) # 总监安排到2018分析日程中
finance_2018.add_work(adviser) # 顾问安排到2018分析日程中
work.add_work(finance_2018) # 添加2018年财务工作安排
work.visit()
```
#### File: common/log_design/decorator.py
```python
import time
import os
import logging
import traceback
# 计时器
def timer(func):
def wrapper(*args, **kwargs):
t1 = time.time()
v = func(*args, **kwargs)
t2 = time.time()
z_time = t2-t1
print("本次操作耗费{}秒".format(z_time))
return v
return wrapper
# 日志编写器
def write_log(name):
def wrapper(func):
def get_log(*args, **kwargs):
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
path = os.path.split(os.path.realpath(__file__))[0]
handler = logging.FileHandler(path + '\\log\\' + name + ".txt", encoding="utf-8")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
result = func(*args, **kwargs)
except:
logger.error(traceback.format_exc())
logger.info(result)
return get_log
return wrapper
# 日志编写器2
class LoggerManage:
def __init__(self, name):
self.name = name
def __call__(self, func):
def wrapper(*args, **kwargs):
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
path = os.path.split(os.path.realpath(__file__))[0]
handler = logging.FileHandler(path + '\\log\\' + self.name + ".txt", encoding="utf-8")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
result = func(*args, **kwargs)
except:
logger.error(traceback.format_exc())
logger.info(result)
return wrapper
if __name__ == '__main__':
@LoggerManage("error")
def test():
print("do something ...")
return "gogogo_finish"
test()
```
#### File: common/pdf/ocr_pdf.py
```python
from sys import stdout
from io import TextIOWrapper
from warnings import filterwarnings
filterwarnings("ignore")
stdout = TextIOWrapper(stdout.buffer, encoding='utf-8')
from pdfminer.high_level import *
from os import path as os_path
from os import remove as os_remove
from cnocr import CnOcr
from re import sub as re_sub
import fitz
def pdf_to_img(pdfPath):
img_paths = []
pdf_doc = fitz.open(pdfPath)
for pg in range(pdf_doc.pageCount):
page = pdf_doc[pg]
rotate = int(0)
zoom_x = 1.3 # (1.33333333-->1056x816) (2-->1584x1224)
zoom_y = 1.3
mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate)
pix = page.get_pixmap(matrix=mat, alpha=False)
temp_dir, _ = os_path.split(pdfPath)
name, ext = os_path.splitext(pdfPath)
image_path = os_path.join(temp_dir, "{}_img_{}.png".format(name, (pg + 1)))
pix.save(image_path)
img_paths.append(image_path)
pdf_doc.close()
return img_paths
def extract_img(path: str):
ocr = CnOcr()
res = ocr.ocr(path)
lines = []
for obj in res:
line = "".join(obj[0])
lines.append(line)
data = "\n".join(lines)
os_remove(path)
return data
def extract_pdf(pdf_path):
text = extract_text(pdf_path)
text = re_sub("\n+", "\n", text)
return text
def main():
pdf_path = sys.argv[1]
text = extract_pdf(pdf_path)
if len(text) < 10:
# pdf 是扫描件,都为图片
paths = pdf_to_img(pdf_path)
text: str = ""
for p in paths:
text += extract_img(p)
print(text)
else:
print(text)
if __name__ == '__main__':
import datetime
n = datetime.datetime.now()
main()
print(datetime.datetime.now() - n)
```
#### File: common/pdf/parse_pdf.py
```python
import io
from pdfminer.high_level import *
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
def return_txt():
name = sys.argv[1]
text = extract_text(name)
print(text)
if __name__ == '__main__':
return_txt()
```
#### File: common/PTA/threading_general.py
```python
import random
import threading
import time
from concurrent.futures import wait
from concurrent.futures.thread import ThreadPoolExecutor
# 线程池, 非阻塞
class MThreadPool:
def __init__(self):
self.db = []
def start(self, params, params2):
pass
def run(self):
count = 0 # 计数, 控制线程池的任务数量
max_workers = 16# 最多同时操作16个
with ThreadPoolExecutor(max_workers=max_workers) as thread_pool:
tasks = []
while True:
for each in self.db:
params = each
params2 = each
futrue = thread_pool.submit(self.start, params, params2)
count += 1
tasks.append(futrue)
if count % 100 == 0:
print(f"此时操作了 {count}", flush=True)
wait(tasks) # 等待任务完成
tasks = []
# 直接开新线程, 会阻塞
class MThread:
def start(self, params, params2):
time.sleep(params)
print(threading.current_thread(), params, params2)
def run(self):
ts = []
# 构建线程
temp = list(range(5))
random.shuffle(temp)
for i in temp:
params, params2 = i, i + 100
ts.append(threading.Thread(target=self.start, args=(params, params2)))
# 线程启动
for t in ts:
t.start()
# 等待所有完成
for t in ts:
t.join()
if __name__ == '__main__':
q = MThread()
q.run()
```
#### File: py/network/add_net_card.py
```python
import sys,os,re
#函数
def pro_continue():
input("按Enter键退出")
def nic_count(x):
if x<2:
print("网络叠加需要两块或两块以上网卡")
exit()
elif x>4:
print("该程序最多支持叠加四块网卡")
exit()
def add_routetables2(i,g):
net_1=[1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63,65,67,69,71,73,75,77,79,81,83,85,87,89,91,93,95,97,99,101,103,105,107,109,111,113,115,117,119,121,123,125,129,131,133,135,137,139,141,143,145,147,149,151,153,155,157,159,161,163,165,167,171,173,175,177,179,181,183,185,187,189,191,193,195,197,199,201,203,205,207,209,211,213,215,217,219,221,223]
net_2=[2,4,6,8,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,160,162,164,166,168,170,174,176,178,180,182,184,186,188,190,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222]
print("开始负载均衡")
os.system("route delete 0.0.0.0")
os.system("route add 0.0.0.0 mask 0.0.0.0 " + str(g[0]) + " metric 30 if " + str(i[0]))
a=0
for x in net_1:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[0]) +" metric 25 if " + str(i[0]))
for x in net_2:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[1]) +" metric 25 if " + str(i[1]))
print("双网卡叠加成功")
def add_routetables3(i,g):
net_1=[1,4,7,13,16,19,22,25,28,31,34,37,40,43,46,49,52,55,58,61,64,67,70,73,76,79,82,85,88,91,94,97,100,103,106,109,112,115,118,121,124,130,133,136,139,142,145,148,151,154,157,160,163,166,175,178,181,184,187,190,193,196,199,202,205,208,211,214,217,220,223]
net_2=[2,5,8,11,14,17,20,23,26,29,32,35,38,41,44,47,50,53,56,59,62,65,68,71,74,77,80,83,86,89,92,95,98,101,104,107,110,113,116,119,122,125,128,131,134,137,140,143,146,149,152,155,158,161,164,167,170,173,176,179,182,185,188,191,194,197,200,203,206,209,212,215,218,221]
net_3=[3,6,9,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57,60,63,66,69,72,75,78,81,84,87,90,93,96,99,102,105,108,111,114,117,120,123,126,129,132,135,138,141,144,147,150,153,156,159,162,165,168,171,174,177,180,183,186,189,195,198,201,204,207,210,213,216,219,222]
print("开始负载均衡")
os.system("route delete 0.0.0.0")
os.system("route add 0.0.0.0 mask 0.0.0.0 " + str(g[0]) + " metric 30 if " + str(i[0]))
a=0
for x in net_1:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[0]) +" metric 25 if " + str(i[0]))
for x in net_2:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[1]) +" metric 25 if " + str(i[1]))
for x in net_3:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[2]) +" metric 25 if " + str(i[2]))
print("三网卡叠加成功")
def add_routetables4(i,g):
net_1=[1,5,9,13,17,21,25,29,33,37,41,45,49,53,57,61,65,69,73,77,81,85,89,93,97,101,105,109,113,117,121,125,129,133,137,141,145,149,153,157,161,165,173,177,181,185,189,193,197,201,205,209,213,217,221]
net_2=[2,6,14,18,22,26,30,34,38,42,46,50,54,58,62,66,70,74,78,82,86,90,94,98,102,106,110,114,118,122,126,130,134,138,142,146,150,154,158,162,166,170,174,178,182,186,190,194,198,202,206,210,214,218,222]
net_3=[3,7,11,15,19,23,27,31,35,39,43,47,51,55,59,63,67,71,75,79,83,87,91,95,99,103,107,111,115,119,123,131,135,139,143,147,151,155,159,163,167,171,175,179,183,187,191,195,199,203,207,211,215,219,223]
net_4=[4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124,128,132,136,140,144,148,152,156,160,164,168,176,180,184,188,196,200,204,208,212,216,220]
print("开始负载均衡")
os.system("route delete 0.0.0.0")
os.system("route add 0.0.0.0 mask 0.0.0.0 " + str(g[0]) + " metric 30 if " + str(i[0]))
a=0
for x in net_1:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[0]) +" metric 25 if " + str(i[0]))
for x in net_2:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[1]) +" metric 25 if " + str(i[1]))
for x in net_3:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[2]) +" metric 25 if " + str(i[2]))
for x in net_4:
os.system ("route add " + str(x) + ".0.0.0 mask 255.0.0.0 "+ str(g[3]) +" metric 25 if " + str(i[3]))
print("四网卡叠加成功")
def check_ip(ip_str):
pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b"
if re.match(pattern, ip_str):
return True
else:
return False
#主程序
os.system("title 网卡叠加-www.slll.info&&color 18")
net_count=int(input("请输入网卡数量(MAX:4,Min:2): "))
nic_count(net_count)
arr_1=[]
arr_2=[]
for x in range(1,net_count+1):
temp=input("请输入第"+str(x)+"块需要叠加的网卡索引号 (cmd下面利用该命令查看:route print | find \"...\"[第一列即索引号]): ")
arr_1.append(temp)
temp=input("请输入网卡(" +str(x)+") 的网关: ")
while True:
if check_ip(temp):
arr_2.append(temp)
break
else:
temp=input("输入错误,请重新输入网卡(" +str(x)+") 的网关: ")
if net_count==2:
add_routetables2(arr_1,arr_2)
elif net_count==3:
add_routetables3(arr_1,arr_2)
elif net_count==4:
add_routetables4(arr_1,arr_2)
pro_continue()
```
#### File: spider/caricature/dldl.py
```python
import requests
import os
import shutil
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
from threading import currentThread
import traceback
from PIL import Image
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def save_image(input_name):
im = Image.open(input_name)
if im.mode=="RGBA":
im.load() # required for png.split()
background = Image.new("RGB", im.size, (255, 255, 255))
background.paste(im, mask=im.split()[3]) # 3 is the alpha channel
im = background
im.save(input_name.replace('.webp', '.jpg'),'JPEG')
os.remove(input_name)
def get_task():
url = 'https://www.manhuatai.com/api/getComicInfoBody?product_id=2&productname=mht&platformname=pc&comic_id=25933'
response = requests.get(url, verify=False)
response_json = response.json()
tasks = response_json['data']['comic_chapter']
with ProcessPoolExecutor() as pool:
for index,task in enumerate(tasks[::-1]):
pool.submit(deal_task, index, task)
def deal_task(index, task):
# for index,task in enumerate(tasks[::-1]):
print(currentThread())
page_count = task['end_num']
page_name = task['chapter_name']
path = "../download/dldl/{}".format(page_name)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
for count in range(page_count):
img_url = "https://mhpic.manhualang.com/comic/D/%E6%96%97%E7%BD%97%E5%A4%A7%E9%99%86%E6%8B%86%E5%88%86%E7%89%88/{}话/{}.jpg-mht.middle.webp".format(index+1,count+1)
response_img = requests.get(img_url)
img_path = path + '/{}_{}.webp'.format(page_name,count)
with open(img_path, 'wb') as f:
f.write(response_img.content)
f.close()
save_image(img_path)
print(page_name, '已经完成')
if __name__ == '__main__':
get_task()
```
#### File: spider/miscellany/Airlines_xiecheng_pyppeteer.py
```python
import asyncio
from pyppeteer import launch
async def main(tasks):
try:
for task in tasks:
browser = await creat()
page = await browser.newPage()
task["msg"] = "0000"
airline = task['company']
price = 0
# https://flights.ctrip.com/international/search/oneway-{start}-{end}?depdate={date}directflight=1
# https://flights.ctrip.com/international/search/oneway-{start}-{end}?depdate={date}&cabin=y_s&adult=1&child=0&infant=0&directflight=1
url = "https://flights.ctrip.com/international/search/oneway-{start}-{end}?depdate={date}&cabin=y_s&adult=1&child=0&infant=0&directflight=1&airline={airline}".format(
start=task['start'],
end=task['end'],
date=task['date'],
airline=airline[:2])
await page.setViewport({'width': 1366, 'height': 768})
await page.goto(url)
while not await page.xpath("//div[contains(@id, 'comfort-{}')]".format(airline)):
pass
tag_index = -1 # 用于判断是否有该航线
place_list = await page.xpath('//span[@class="plane-No"]')
for index, place in enumerate(place_list):
place = await (await (place.getProperty("textContent"))).jsonValue()
print(place)
if task['company'] in str(place):
tag_index = index
break
if tag_index != -1:
# 价格
while not await page.xpath('//div[@class="price-box"]/div'):
pass
price_list = await page.xpath('//div[@class="price-box"]/div')
price = (await (await price_list[tag_index].getProperty("textContent")).jsonValue())[1:]
print("当前价格{},价格区间{}-{}".format(price, task['min_price'], task['max_price']))
# 是否没超出价格
if not (int(task['min_price']) < int(price) < int(task['max_price'])):
break
if price:
pay_id = "#{}_0".format(tag_index)
await page.click(pay_id)
await asyncio.gather(
page.waitForNavigation(),
page.click(pay_id, clickOptions),
)
finally:
await browser.close()
print("close")
async def creat():
launch_kwargs = {
# 控制是否为无头模式
"headless": False,
# chrome启动命令行参数
"args": [
'--window-size=1366,850',
# 不显示信息栏 比如 chrome正在受到自动测试软件的控制 ...
"--disable-infobars",
# log等级设置 在某些不是那么完整的系统里 如果使用默认的日志等级 可能会出现一大堆的warning信息
"--log-level=3",
# 设置UA
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
],
# 用户数据保存目录 这个最好也自己指定一个目录
# 如果不指定的话,chrome会自动新建一个临时目录使用,在浏览器退出的时候会自动删除临时目录
# 在删除的时候可能会删除失败(不知道为什么会出现权限问题,我用的windows) 导致浏览器退出失败
# 然后chrome进程就会一直没有退出 CPU就会狂飙到99%
"userDataDir": r"D:\FXJprograme\FXJ\test\temp",
}
browser = await launch({'headless': False})
return browser
params = [
{
'up': 1234567890,
'type': 1,
"modele_name": '1.1.1',
'start': 'tpe',
'end': 'osa',
'company': 'D7370',
'date': '2019-08-09',
'min_price': 0,
'max_price': 40000,
'user': {
'surnames': 'LAO',
'name': 'WANG',
'gender': 'M',
'country': '中国大陆',
'passport': 'XS1245378',
'born': '1996-12-30',
'passport_time': '2029-11-11',
'phone': '16644663659',
'email': '<EMAIL>',
}
},
{
'up': 1234567890,
'type': 1,
'is_stop': 0,
"modele_name": '1.1.1',
'start': 'tpe',
'end': 'osa',
'company': 'D7370',
'date': '2019-08-09',
'min_price': 0,
'max_price': 40000,
'user': {
'surnames': 'LAO',
'name': 'WANG',
'gender': 'M',
'country': '中国大陆',
'passport': 'XS1245378',
'born': '1996-12-30',
'passport_time': '2029-11-11',
'phone': '16644663659',
'email': '<EMAIL>',
}
},
]
asyncio.get_event_loop().run_until_complete(main(params))
```
#### File: spider/miscellany/bsbdj_vedio.py
```python
import requests
from bs4 import BeautifulSoup
import os
import asyncio
import aiohttp
import aiofiles
#解析网页
async def open_url(url):
#添加头部信息反爬取
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36"}
connector = aiohttp.TCPConnector(verify_ssl=False)
async with aiohttp.ClientSession(connector=connector,headers=header) as session:
async with session.get(url=url) as response:
return response
#获取视频网页列表
async def get_url(url):
page_url = []
response = await open_url(url)
soup = BeautifulSoup(await response.text(),"html.parser")
for href in soup.select(".j-r-list-c .j-r-list-c-desc a"):
page = href['href']
print(page)
#print(page)
href_url = "".join(["http://www.budejie.com" , page])
page_url.append(href_url)
#print(page_url)
return page_url
#获取视频地址并将视频下载到文件中
async def down_video(url):
os.mkdir('mp4')
os.chdir('mp4')
page_url = await get_url(url)
for addres in page_url:
response = await open_url(addres)
soup = BeautifulSoup(await response.text(),"html.parser")
#print(soup)
#视频地址
mp4 = soup.select('.j-r-list-c .j-video-c .j-video')[0]['data-mp4']
#print(mp4)
file = "".join(["../download/dldl/",mp4.split('/')[-1]])
async with aiofiles.open(file,"wb") as f:
#图片和视频用.content,不用.text
video = await open_url(mp4)
f.write(video)
if __name__ == "__main__":
count = 10
loop = asyncio.get_event_loop()
urls = ["http://www.budejie.com/video/{}".format(index) for index in range(1,count)]
future = [asyncio.ensure_future(down_video(url)) for url in urls]
loop.run_until_complete(asyncio.wait(future))
```
#### File: spider/miscellany/city2017.py
```python
import requests
import bs4
import openpyxl
import re
def open_url(url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'}
html = requests.get(url, headers = headers)
return html
def find_data(html):
content = bs4.BeautifulSoup(html.text,'html.parser')
paragraph = content.find(id="Cnt-Main-Article-QQ")
target = paragraph.find_all('p',style="TEXT-INDENT: 2em")
for each in target:
print(target)
target = iter(target)
data = []
for each in target:
if each.text.isnumeric():
data.append([
re.search(r'\[(.+)\]',next(target).text).group(1),
re.search(r'\d.*',next(target).text).group(0),
re.search(r'\d.*',next(target).text).group(0),
re.search(r'\d.*',next(target).text).group(0)
])
return data
def save_excel(data):
wb = openpyxl.Workbook()
wb.guess_types = True
ws = wb.active
ws.append(['城市','平均房价','平均工资','房价工资比'])
for each in data:
ws.append(each)
ws.column_dimensions['B'].width = 15
ws.column_dimensions['C'].width = 15
ws.freeze_panes = 'A2'
wb.save('2017年中国主要城市房价工资比排行榜.xlsx')
def main():
url = 'http://news.house.qq.com/a/20170702/003985.htm'
html = open_url(url)
data = find_data(html)
save_excel(data)
if __name__ == '__main__':
main()
```
#### File: spider/miscellany/db250.py
```python
import requests
import bs4
import re
#打开网页
def open_url(url):
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
res = requests.get(url, headers = header)
return res
#查找出需要的内容
def find_movies(res):
soup = bs4.BeautifulSoup(res.text, 'html.parser')
#电影名
movies = []
targets = soup.find_all('div',class_='hd')
for each in targets:
movies.append(each.a.span.text)
#评分
ranks = []
targets = soup.find_all('span',class_='rating_num')
for each in targets:
ranks.append(' 评分:%s' % each.text)
#资料
messages = []
targets = soup.find_all('div',class_='bd')
for each in targets:
try:
messages.append(each.p.text.split('\n')[1].strip() + each.p.text.split('\n')[2].strip())
except:
continue
result = []
length = len(movies)
for i in range(length):
result.append(movies[i] + ranks[i] + messages[i] + '\n')
return result
#找出一共有多少页面
def find_depth(res):
soup = bs4.BeautifulSoup(res.text,'html.parser')
depth = soup.find('span',class_='next').previous_sibling.previous_sibling.text
return int(depth)
def main():
host = "https://movie.douban.com/top250"
res = open_url(host)
depth = find_depth(res)
result = []
for i in range(depth):
url = host + '?start='+ str(25 * i) +'&filter='
res = open_url(url)
result.extend(find_movies(res))
with open('豆瓣TOP250电影.txt','w',encoding = 'utf-8') as f:
for each in result:
f.write(each)
if __name__ == '__main__':
main()
```
#### File: scrapyTemplates/Middleware/ip_pool_middlewares.py
```python
import requests
import base64
class Get_ip(object):
def __init__(self):
self.PROXY_POOL_URL = 'http://localhost:5555/random'
def get_proxy(self):
try:
response = requests.get(self.PROXY_POOL_URL)
if response.status_code == 200:
return response.text
except ConnectionError:
return None
class Ip_Pool_Middlewares(object):
def __init__(self):
self.PROXIES = Get_ip().get_proxy()
# 传进带密码的
def process_request(self,request,spider):
if self.PROXIES is dict and self.PROXIES['user_password'] != None:
base64_userpasswd = base64.b64encode(self.PROXIES['user_password'])
request.meta['proxy'] = "http://" + self.PROXIES['proxy']
request.headers['Proxy-Authorization'] = 'Basic' + base64_userpasswd
else:
request.meta['proxy'] = "http://"+ self.PROXIES
```
#### File: scrapyTemplates/Middleware/random_ip_mysql.py
```python
import MySQLdb
import MySQLdb.cursor
import requests
class GetIP(object):
def __init__(self):
self.test_ip = crawler.setting.get('TEST_IP_HTML','')
@classmethon
def from_setting(cls,crawler):
return cls(crawler)
def delete_ip(self,id):
delete_sql ='''
delete from proxy_ip where ip='{}'
'''.format(ip)
cusor.execute(delete_sql)
conn.commit()
return True
def judge_ip(self,ip,post):
# 判断ip是否可用
try:
proxies = {'http':ip+post}
response = requests.get(self.test_ip,proxies=proxies)
return True
except Exception as e:
print('错误')
self.delete_ip(ip)
return False
else:
code = response.status_code
if code>=200 and code <= 300:
print('有效IP')
return True
else:
print('无效IP')
return False
def get_random_ip(self):
random_sql_ip ='''
SELECT ip,port FROM proxy_ip
ORDER BY RAND()
LIMIT 1
'''
result = cursor.execute(random_sql_ip)
for ip_info in cursor.fetchall():
ip = ip_info[0]
port = ip_info[1]
judge_result = self.judge_ip(ip,port)
if judge_result:
return 'http://{0}{1}'.format(ip,port)
else:
return self.get_random_ip()
# 在setting.py中
TEST_IP_HTML = '爬取的网址'
# 在middlewares.py中
from jingdong.tools.random_ip_mysql import GetIP
class RandomIpMiddleware(object):
def process_requests(self,request,spider):
get_ip = GetIP()
request.meta['proxy'] = get_ip.get_random_ip()
```
#### File: scrapyTemplates/tools/DataStruct.py
```python
import re
from datetime import datetime, timedelta
from dateparser import parse as d_parse
from dateutil.parser import parse
class EumeOrigin(object):
producers = "producers"
Consumers = "Consumers"
middlemen = "middlemen"
class JudgeField:
NUMTYPE = ["count"]
STRTYPE = ["content"]
NONETYPE = ["task_name"]
TIMETPYE = ["publish_time"]
ENUMTPYE = ["category"]
PHONE = ["phone", "landline"]
class ResultInfo:
task_name = None # 任务名
content = "" # 内容
count = "" # 数量
publish_time = "" # 发布时间
category = "" # 类型
phone = "" # 手机
landline = "" # 座机
def __init__(self, spider_name=""):
self._spiderName = spider_name
def __getattribute__(self, item):
if item in JudgeField.NUMTYPE:
return self.deal_int(item) # 数字
elif item in JudgeField.STRTYPE:
return self.deal_str(item, signal=1) # 字符串
elif item in JudgeField.NONETYPE:
return self.deal_str(item, signal=2) # 空值
# elif item in SalesJudgeField.ENUMTPYE:
# return self.deal_enum(item) # 枚举
return object.__getattribute__(self, item)
def __setattr__(self, key, value):
if key in JudgeField.TIMETPYE:
return self.deal_datetime(key, value)
elif key in JudgeField.PHONE:
return self.deal_phone(key, value)
object.__setattr__(self, key, value)
def deal_int(self, name):
msg = "传入值有误, 此时数据包为: {}, {} 应该为 int ".format(self.__dict__, name)
value = self.__dict__.get(name)
if value is None:
value = 0
_type = type(value)
if _type is int:
return value
elif _type is str:
try:
return int(value)
except ValueError:
pass
else:
raise ValueError(msg)
def deal_str(self, name, signal):
value = self.__dict__.get(name)
if 1 == signal:
if not value:
return ""
elif 2 == signal:
if not value:
return None
try:
result = re.sub(r"\s", "", value)
result = result.replace(" ", "")
head_tail_removed_ch = [";", ":", " ", ":", ";", " :"]
for ch in head_tail_removed_ch:
result = result.strip(ch)
return result
except TypeError:
print("ERROR TypeError: commons.SalesConstants.deal_str() 参数 {name}: {value} ".format(
name=name, value=value))
def deal_datetime(self, key, value):
# 预处理
if int is type(value) and (1900 < value < 2100 or value > 1000000000):
value = str(value)
if str is type(value) and value:
if re.search(".*?([\u4E00-\u9FA5]+).*?", value):
value = d_parse(value)
else:
value = parse(value)
if value > datetime.now():
value = value.replace(datetime.now().year - 1)
value = value - timedelta(hours=8)
# 判断
if type(value) is datetime:
self.__dict__[key] = value
else:
print(f"{key} 无法parse成 datetime 对象, 来源: {self.spiderName}")
return ""
def deal_phone(self, key, value):
phone_three = [
'130', '131', '132', '133', '134', '135', '136', '137',
'138', '139', '145', '146', '147', '148', '149', '150',
'151', '152', '153', '154', '155', '156', '157', '158',
'159', '162', '165', '166', '167', '170', '170', '170',
'171', '172', '173', '175', '176', '177', '178', '180',
'181', '182', '183', '184', '185', '186', '187', '188',
'189', '190', '191', '192', '193', '195', '196', '197',
'198', '199']
_type = type(value)
# if not hasattr(self, key):
# self.__dict__[key] = []
if _type == list:
value = [each for each in value if each]
self.__dict__[key] = value
elif _type == str:
if not value:
return
if len(value) < 5:
return
# 清洗
if re.search(r'\D\d*\D', value):
if key == "phone":
_v = re.sub(r"\D", "", value)
if len(_v) == 11: # 字符串数字不是11位长度
return
_v = re.findall(r'\d{11}', _v)
_v = _v[0] if _v else ""
elif key == "landline":
_v = re.findall(r'\d+.\d+.\d+|86.{0,1}\d+.\d+|\d+.\d+', value, re.S)
if _v:
_v = _v[-1]
if r"\n" in _v:
_v = _v.replace("\n", "-")
else:
_v = ""
# 非 phone 和 landline
else:
_v = value
# 无需清洗
else:
_v = value
# 没有值
if not _v:
return
_v1 = re.sub(r"\s|-", "", _v)
# 去重
for each in JudgeField.PHONE:
is_exist = self.__dict__.get(each, [])
if not is_exist:
self.__dict__[each] = []
if _v in is_exist or _v1 in is_exist:
return
if len(_v1) < 5 or len(_v) < 5:
return
# 存储
# 判断是否是手机号
if _v1[:3] in phone_three:
self.__dict__["phone"].append(_v1)
else:
self.__dict__["landline"].append(_v)
def get_dict(self):
pr = {}
for name in dir(self):
value = getattr(self, name)
if not name.startswith('__') and not callable(value) and not name.startswith('_'):
if value is None or isinstance(value, EumeOrigin):
msg = "{} 的值为空, 来源于: {}".format(name, getattr(self, "source"))
raise ValueError(msg)
pr[name] = value
return pr
```
#### File: sql_and_nosql/sql_async/mysql_aio_sa.py
```python
import asyncio
import sqlalchemy as sa
from aiomysql.sa import create_engine
async def go(loop):
engine = await create_engine(user='root', db='test_pymysql',
host='127.0.0.1', password='<PASSWORD>', loop=loop
# , autocommit=True
)
async with engine.acquire() as conn:
await conn.execute(user.insert().values(val=40))
await conn.execute(user.insert().values(val=50))
row = await conn.execute(user.select().where(user.columns.val > 20))
a = await row.fetchall()
# async for i in row.fetchall():
# print(i)
async for i in a:
print(i)
# print(type(a))
# print(a[0].val)
# print(dict(a[0]))
engine.close()
await engine.wait_closed()
if __name__ == '__main__':
metadata = sa.MetaData()
user = sa.Table('user', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('val', sa.String(255)))
loop = asyncio.get_event_loop()
loop.run_until_complete(go(loop))
```
#### File: sql_and_nosql/sql_sqlalchemy/mysqldb.py
```python
import datetime
from dateutil.parser import parse
from random import randint
from sqlalchemy import MetaData, Table, select, and_
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
MYSQL_URL = ""
# 后面改,父类继承初始化MYSQL_URL
class MysqlClient(object):
def __init__(self):
# 寻找Base的所有子类,按照子类的结构在数据库中生成对应的数据表信息
self.engine = create_engine(MYSQL_URL, encoding='utf-8', pool_size=100)
# self.connection = self.engine.connect()
def conn_table(self, table_name):
"""
第一种连接方式
:param table_name:
:return:
"""
metadata = MetaData(self.engine)
census = Table(table_name, metadata, autoload=True, autoload_with=self.engine)
connection = self.engine.connect()
return census, connection
def creat_session(self):
"""
第二种连接方式
:return:
"""
# 创建session类型
DBSession = sessionmaker(bind=self.engine)
# 创建session对象
session = DBSession()
return session
def data_return(self, data):
"""
处理mysql对象
:param data: mysql对象,mysql对象,...
:return: list[list],str
"""
if isinstance(data, list):
for index, value in enumerate(data):
data[index] = list(value)
return data
else:
return list(data)
def count(self, table_name):
"""
数据总数
:param table_name: 表名
:return: int
"""
census, connection = self.conn_table(table_name)
stmt = select([census])
count = len(connection.execute(stmt).fetchall())
connection.close()
return count
def index(self, className, session, indexParams):
obtain = session.query(className).filter(and_(
*indexParams
))
return obtain
def updata(self, className, indexParams, pack_dict=None):
session = self.creat_session()
obtain = self.index(className, session, indexParams)
if pack_dict:
result = obtain.update(pack_dict)
session.commit()
else:
result = 0
session.close()
return result
def r_timestamp(self, table_name, timer=0):
"""
读取mysql_seatpost任务
:param table_name: 表名
:param timer: 时间
:return: list[list],str
"""
task_result = []
census, connection = self.conn_table(table_name)
stmt = select([census])
stmt = stmt.where(census.columns.ts > timer)
data_list = connection.execute(stmt).fetchall()
# 筛选超过今天的出发日期
for data in data_list:
start_time = parse(data[3])
now_time = datetime.datetime.now()
if now_time < start_time:
task_result.append(data)
connection.close()
return self.data_return(task_result)
def r_all(self, table_name):
"""
返回全部数据
table_name 表名字
:return: list[list],str
"""
census, connection = self.conn_table(table_name)
stmt = select([census])
results = connection.execute(stmt).fetchall()
connection.close()
return self.data_return(results)
def r_area(self, table_name, area: int):
"""
随机获取容量范围内的部分
:param table_name: 表名
:param area: 获取资源数
:return: list[list],str
"""
census, connection = self.conn_table(table_name)
stmt = select([census])
lenght = self.count(table_name)
helf_lenght = int(lenght / 2)
if area < helf_lenght:
index = randint(area+1, helf_lenght)
elif area == lenght:
return self.r_all(table_name)
else:
index = randint(area+1, lenght)
stmt = stmt.where(and_(census.columns.id >= index-area, census.columns.id < index))
results = connection.execute(stmt).fetchall()
# print(" 起{} -终{} -得{} -求{}".format(index-area, index, str(len(results)), area))
connection.close()
return self.data_return(results)
def r_absolute_area(self, table_name, area: int):
"""
动态申请需要msg参数,不足时自我复制
:param table_name: 表名
:param area: 获取数
:return: list[list],str
"""
lenght = self.count(table_name) # 获取数据库总个数
residue = area % lenght
# census = self.conn_table(table_name)
# stmt = select([census])
if area <= lenght:
return self.r_area(table_name, area)
elif area > lenght and area < lenght*2:
first_part = self.r_all(table_name)
second_part = first_part[:residue]
return first_part + second_part
else:
copy_count = area // lenght
data1 = []
all_area = self.r_all(table_name)
for _count in range(copy_count):
data1 += all_area
data2 = all_area[:residue]
return data1+data2
def r_choice_list(self, table_name, count: int):
"""
随机选择一批数据
:param table_name:
:param count:
:return: list[list],str
"""
lenght = self.count(table_name)
num_list = []
census, connection = self.conn_table(table_name)
for x in range(count):
stmt = select([census])
num = randint(0, lenght)
stmt = stmt.where(census.columns.id == num)
result = connection.execute(stmt).fetchall()
num_list += self.data_return(result)
connection.close()
return num_list
def r_index_ts(self, table_name, num: int):
"""
根据索引选一条
:param table_name: 表名
:return:
"""
# lenght = self.count(table_name)
census, connection = self.conn_table(table_name)
stmt = select([census])
stmt = stmt.where(census.columns.ts == num)
result = connection.execute(stmt).fetchall()
connection.close()
return self.data_return(result)
def w_data(self, data):
"""
SeatPost(start=start,...)
:param data:
:return:
"""
session = self.creat_session()
session.add(data)
session.commit()
session.close()
def d_once(self, table_name, base_str, num: int):
connection = self.engine.connect()
meta = MetaData(bind=self.engine)
tb_user = Table(table_name, meta, autoload=True, autoload_with=self.engine)
dlt = tb_user.delete().where(tb_user.columns[base_str] == num)
# 执行delete操作
result = connection.execute(dlt)
# 显示删除的条数
connection.close()
return result.rowcount
def d_SeatCount(self, className, indexParams):
session = self.creat_session()
obtain = self.index(className, session, indexParams).delete()
if obtain:
session.commit()
session.close()
else:
print("没有seatCount为0的数据")
def r_seleter_once(self, className, indexParams):
"""
根据索引选一条
:param className: 模型名
:param indexParams: 筛选,indexParams = [ViewData.start == start,....]
:return: 值
"""
# lenght = self.count(table_name)
session = self.creat_session()
obtain = self.index(className, session, indexParams).all()
session.close()
return obtain
class SeatPost:
def __init__(self,start, end, company, date):
self.start = start
self.end = end
self.company = company
self.date = date
pass
if __name__ == '__main__':
# [103, 'ZHANG/YIDI ', '男', '中国 ', 'E57013072', datetime.datetime(1992, 10, 2, 0, 0), datetime.datetime(2025, 8, 10, 0, 0), '中国 ']
import time
timer = int(time.time()*1000)
num = 2
m = MysqlClient()
user = SeatPost(start="ctu",end="tyo",company="CZ4081",date="2019-07-19")
pack = {"seatcount":200}
result = m.r_seleter_once(SeatPost,)
print(result)
``` |
{
"source": "714627034/Paddle-Lite",
"score": 2
} |
#### File: demo/python/mobilenetv1_full_api.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from paddlelite.lite import *
import numpy as np
import platform
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument("--model_file", default="", type=str, help="Model file")
parser.add_argument(
"--param_file", default="", type=str, help="Combined model param file")
parser.add_argument(
"--input_shape",
default=[1, 3, 224, 224],
nargs='+',
type=int,
required=False,
help="Model input shape, eg: 1 3 224 224. Defalut: 1 3 224 224")
parser.add_argument(
"--backend",
default="",
type=str,
help="To use a particular backend for execution. Should be one of: arm|opencl|x86|x86_opencl|metal|nnadapter"
)
parser.add_argument(
"--image_path", default="", type=str, help="The path of test image file")
parser.add_argument(
"--label_path", default="", type=str, help="The path of label file")
parser.add_argument(
"--print_results",
type=bool,
default=False,
help="Print results. Default: False")
parser.add_argument(
"--nnadapter_device_names",
default="",
type=str,
help="Set nnadapter device names")
parser.add_argument(
"--nnadapter_context_properties",
default="",
type=str,
help="Set nnadapter context properties")
parser.add_argument(
"--nnadapter_model_cache_dir",
default="",
type=str,
help="Set nnadapter model cache dir")
parser.add_argument(
"--nnadapter_subgraph_partition_config_path",
default="",
type=str,
help="Set nnadapter subgraph partition config path")
parser.add_argument(
"--nnadapter_mixed_precision_quantization_config_path",
default="",
type=str,
help="Set nnadapter mixed precision quantization config path")
def RunModel(args):
# 1. Set config information
config = CxxConfig()
if args.model_file != '' and args.param_file != '':
config.set_model_file(args.model_file)
config.set_param_file(args.param_file)
else:
config.set_model_dir(args.model_dir)
if platform.machine() in ["x86_64", "x64", "AMD64"]:
platform_place = Place(TargetType.X86, PrecisionType.FP32)
else:
platform_place = Place(TargetType.ARM, PrecisionType.FP32)
if args.backend.upper() in ["ARM"]:
places = [Place(TargetType.ARM, PrecisionType.FP32)]
elif args.backend.upper() in ["X86"]:
places = [Place(TargetType.X86, PrecisionType.FP32)]
elif args.backend.upper() in ["OPENCL", "X86_OPENCL"]:
places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
platform_place, Place(TargetType.Host, PrecisionType.FP32)
]
'''
Set opencl kernel binary.
Large addtitional prepare time is cost due to algorithm selecting and
building kernel from source code.
Prepare time can be reduced dramitically after building algorithm file
and OpenCL kernel binary on the first running.
The 1st running time will be a bit longer due to the compiling time if
you don't call `set_opencl_binary_path_name` explicitly.
So call `set_opencl_binary_path_name` explicitly is strongly
recommended.
Make sure you have write permission of the binary path.
We strongly recommend each model has a unique binary name.
'''
bin_path = "./"
bin_name = "lite_opencl_kernel.bin"
config.set_opencl_binary_path_name(bin_path, bin_name)
'''
opencl tune option:
CL_TUNE_NONE
CL_TUNE_RAPID
CL_TUNE_NORMAL
CL_TUNE_EXHAUSTIVE
'''
tuned_path = "./"
tuned_name = "lite_opencl_tuned.bin"
config.set_opencl_tune(CLTuneMode.CL_TUNE_NORMAL, tuned_path,
tuned_name, 4)
'''
opencl precision option:
CL_PRECISION_AUTO, first fp16 if valid, default
CL_PRECISION_FP32, force fp32
CL_PRECISION_FP16, force fp16
'''
config.set_opencl_precision(CLPrecisionType.CL_PRECISION_AUTO)
elif args.backend.upper() in ["METAL"]:
# set metallib path
import paddlelite, os
module_path = os.path.dirname(paddlelite.__file__)
config.set_metal_lib_path(module_path + "/libs/lite.metallib")
config.set_metal_use_mps(True)
# set places for Metal
places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
elif args.backend.upper() in ["NNADAPTER"]:
places = [
Place(TargetType.NNAdapter, PrecisionType.FP32), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
if args.nnadapter_device_names == "":
print(
"Please set nnadapter_device_names when backend = nnadapter!")
return
config.set_nnadapter_device_names(
args.nnadapter_device_names.split(","))
config.set_nnadapter_context_properties(
args.nnadapter_context_properties)
config.set_nnadapter_model_cache_dir(args.nnadapter_model_cache_dir)
config.set_nnadapter_subgraph_partition_config_path(
args.nnadapter_subgraph_partition_config_path)
config.set_nnadapter_mixed_precision_quantization_config_path(
args.nnadapter_mixed_precision_quantization_config_path)
else:
raise ValueError("Unsupported backend: %s." % args.backend)
config.set_valid_places(places)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
optimized_model_dir = "opt_" + args.backend
predictor.save_optimized_model(optimized_model_dir)
# 3. Set input data
input_tensor = predictor.get_input(0)
c, h, w = args.input_shape[1], args.input_shape[2], args.input_shape[3]
read_image = len(args.image_path) != 0 and len(args.label_path) != 0
if read_image == True:
import cv2
with open(args.label_path, "r") as f:
label_list = f.readlines()
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
image_data = cv2.imread(args.image_path)
image_data = cv2.resize(image_data, (h, w))
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = image_data.transpose((2, 0, 1)) / 255.0
image_data = (image_data - np.array(image_mean).reshape(
(3, 1, 1))) / np.array(image_std).reshape((3, 1, 1))
image_data = image_data.reshape([1, c, h, w]).astype('float32')
input_tensor.from_numpy(image_data)
else:
input_tensor.from_numpy(np.ones((1, c, h, w)).astype("float32"))
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
output_data = output_tensor.numpy()
if args.print_results == True:
print("result data:\n{}".format(output_data))
print("mean:{:.6e}, std:{:.6e}, min:{:.6e}, max:{:.6e}".format(
np.mean(output_data),
np.std(output_data), np.min(output_data), np.max(output_data)))
# 6. Post-process
if read_image == True:
output_data = output_data.flatten()
class_id = np.argmax(output_data)
class_name = label_list[class_id]
score = output_data[class_id]
print("class_name: {} score: {}".format(class_name, score))
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
```
#### File: unittest_py/op/test_sequence_pad_op.py
```python
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
from functools import partial
import numpy as np
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
class TestSequencePadOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
def generate_input(*args, **kwargs):
if kwargs["type"] == "int32":
return np.random.randint(kwargs["low"], kwargs["high"],
kwargs["shape"]).astype(np.int32)
elif kwargs["type"] == "int64":
return np.random.randint(kwargs["low"], kwargs["high"],
kwargs["shape"]).astype(np.int64)
elif kwargs["type"] == "float32":
return (kwargs["high"] - kwargs["low"]) * np.random.random(
kwargs["shape"]).astype(np.float32) + kwargs["low"]
out_dtype_dict = {
"int32": np.int32,
"int64": np.int64,
"float32": np.float32
}
input_type = draw(st.sampled_from(["int32", "int64", "float32"]))
x_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=10), min_size=2, max_size=7))
x_len_lod = generate_input(
type="int64", low=0, high=10, shape=[1, len(x_shape)])
x_len_lod = np.sort(x_len_lod)
x_len_lod[-1] = x_shape[0]
padded_length = len(x_shape)
pad_value_shape = [1]
# assume
time_step_shape = x_shape[1:]
assume(len(x_shape) >= 2)
assume(len(pad_value_shape) == 1 or pad_value_shape == time_step_shape)
assume(len(np.array(x_len_lod).shape) >= 2)
# assume
seq_num = len(x_len_lod[0]) - 1
max_seq_len = 0
for i in range(0, seq_num):
max_seq_len = max(max_seq_len,
x_len_lod[0][i + 1] - x_len_lod[0][i])
real_padded_length = padded_length
if real_padded_length == -1:
real_padded_length = max_seq_len
assume(real_padded_length >= max_seq_len)
sequence_pad_op = OpConfig(
type="sequence_pad",
inputs={"X": ["x_data"],
"PadValue": ["pad_value"]},
outputs={"Out": ["output_data"],
"Length": ["length_data"]},
attrs={"padded_length": padded_length},
outputs_dtype={
"output_data": out_dtype_dict[input_type],
"length_data": np.int64
})
program_config = ProgramConfig(
ops=[sequence_pad_op],
weights={},
inputs={
"x_data": TensorConfig(
data_gen=partial(
generate_input,
type=input_type,
low=-10,
high=10,
shape=x_shape),
lod=x_len_lod),
"pad_value": TensorConfig(data_gen=partial(
generate_input,
type=input_type,
low=0,
high=10,
shape=pad_value_shape))
},
outputs=["output_data", "length_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["sequence_expand_pad"], (1e-5,
1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
```
#### File: unittest_py/pass/test_conv_elementwise_fuser_pass.py
```python
import sys
sys.path.append('..')
sys.path.append('.')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize, ConvTransposeOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestConvElementwiseFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
#conv or conv_transpose
Transpose = draw(st.sampled_from([True, False]))
#conv param or conv_transpose param
in_shape = draw(
st.lists(
st.integers(
min_value=3, max_value=128),
min_size=3,
max_size=3))
in_shape = [draw(st.integers(min_value=1, max_value=4))] + in_shape
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
paddings = draw(
st.lists(
st.integers(
min_value=0, max_value=2), min_size=2, max_size=2))
dilations = draw(st.sampled_from([[2, 2]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
output_padding = draw(
st.sampled_from([[], [
draw(
st.integers(
min_value=0,
max_value=max(strides[0], dilations[0]) - 1)), draw(
st.integers(
min_value=0,
max_value=max(strides[1], dilations[1]) - 1))
]]))
scale_in = draw(st.floats(min_value=0.001, max_value=0.1))
scale_out = draw(st.floats(min_value=0.001, max_value=0.1))
if Transpose:
bias_sample_shape = weight_shape[1] * groups
else:
bias_sample_shape = weight_shape[0]
elementwise_bias_shape = [bias_sample_shape]
conv_out_shape = []
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups,
padding_algorithm, strides)
if Transpose:
assume(in_shape[1] == weight_shape[0])
assume(in_shape[1] % groups == 0) #TODO
if len(output_padding):
assume(output_padding[0] < max(strides[0], dilations_[0]))
assume(output_padding[1] < max(strides[1], dilations_[1]))
conv_out_shape = [in_shape[0], weight_shape[1] * groups]
oh, ow = ConvTransposeOutputSize(in_shape, weight_shape,
dilations_, paddings_, strides)
if len(output_padding):
oh = oh + output_padding[0]
ow = ow + output_padding[1]
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
if len(output_padding):
conv_output_h = (oh + output_padding[0] + paddings[0] +
paddings[1] -
(dilations[0] *
(weight_shape[2] - 1) + 1)) / strides[0] + 1
conv_output_w = (oh + output_padding[1] + paddings[0] +
paddings[1] -
(dilations[1] *
(weight_shape[3] - 1) + 1)) / strides[1] + 1
assume(in_shape[2] == (int)(conv_output_h))
assume(in_shape[3] == (int)(conv_output_w))
else:
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
conv_out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_,
paddings_, strides)
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
conv_type = ""
conv_attrs = {}
if Transpose:
conv_type = "conv2d_transpose"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out,
"output_size": [],
"output_padding": output_padding
}
else:
conv_type = "conv2d"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out
}
conv_op = OpConfig(
type=conv_type,
inputs={"Input": ["input_data"],
"Filter": ["filter_data"]},
outputs={"Output": ["conv_output_data"]},
attrs=conv_attrs)
elementwise_add_op = OpConfig(
type="elementwise_add",
inputs={"X": ["conv_output_data"],
"Y": ["add_bias_data"]},
outputs={"Out": ["output_data"]},
attrs={"axis": 1})
ops = [conv_op, elementwise_add_op]
self.ops = ops
program_config = ProgramConfig(
ops=ops,
weights={
"filter_data": TensorConfig(shape=weight_shape),
"add_bias_data": TensorConfig(shape=elementwise_bias_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(), [self.ops[0].type], (1e-4, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=500,
passes=["lite_conv_elementwise_fuser_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
```
#### File: unittest_py/pass/test_elementwise_activation_fuse_pass.py
```python
import sys
sys.path.append('..')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
from test_elementwise_util import trim_trailing_singular_dims, check_input_shape_available
import hypothesis.strategies as st
class TestElementwiseActivationFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
if predictor_config.target() == TargetType.OpenCL:
input_shape_x = list(program_config.inputs["input_data_x"].shape)
input_shape_y = list(program_config.inputs["input_data_y"].shape)
if len(input_shape_x) > 4 or len(input_shape_y) > 4:
return False
return True
def sample_program_configs(self, draw):
elementwise_type = draw(
st.sampled_from(
["elementwise_add", "elementwise_sub", "elementwise_mul"]))
in_shape_x = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=2, max_size=5))
in_shape_y = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=2, max_size=5))
axis = draw(
st.integers(
min_value=-1, max_value=max(len(in_shape_x), len(in_shape_y))))
assume(
check_input_shape_available(
in_shape_x=in_shape_x, in_shape_y=in_shape_y, axis=axis) ==
True)
elementwise_op = OpConfig(
type=elementwise_type,
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["elementwise_output_data"]},
attrs={"data_format": 'nchw',
"axis": axis})
target_str = self.get_target()
act_list = ['relu']
if target_str == "OpenCL":
act_list.append("relu6")
act_list.append("gelu")
act_type = draw(st.sampled_from(act_list))
def generate_act_attrs(act_type_str):
attrs = {}
if act_type_str == 'relu':
attrs = {}
return attrs
active_op = OpConfig(
type=act_type,
inputs={"X": ["elementwise_output_data"]},
outputs={"Out": ["output_data"]},
attrs=generate_act_attrs(act_type))
ops = [elementwise_op, active_op]
self.ops = ops
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"input_data_x": TensorConfig(shape=in_shape_x),
"input_data_y": TensorConfig(shape=in_shape_y)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(
), ["fusion_" + self.ops[0].type + "_activation"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=300,
passes=["lite_elementwise_activation_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
```
#### File: unittest_py/pass/test_squeeze2_matmul_fuse_pass.py
```python
import sys
sys.path.append('..')
from auto_scan_test import FusePassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestSqueeze2MatmulFusePass(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#x86
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
#Metal
metal_places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.ARM, PrecisionType.FP32),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=metal_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
target_type = predictor_config.target()
in_shape = list(program_config.inputs["squeeze2_input_x"].shape)
if target_type in [TargetType.Metal]:
if in_shape[1] != 1:
return False
return True
def sample_program_configs(self, draw):
alpha = draw(st.floats(min_value=1, max_value=1)) #required in pass
x_num_col_dims = draw(st.floats(min_value=0, max_value=1))
y_num_col_dims = draw(st.floats(min_value=0, max_value=1))
int32_values_1 = draw(st.integers(min_value=1, max_value=40))
int32_values_2 = draw(st.integers(min_value=1, max_value=40))
int32_values_3 = draw(st.integers(min_value=1, max_value=40))
squeeze2_input_shape = [int32_values_1, int32_values_2, 1, 1]
matmul_input_shape = [squeeze2_input_shape[1], int32_values_3]
scale_x = draw(st.sampled_from([0.1, 1.1]))
scale_y = draw(st.sampled_from([0.1, 1.1]))
scale_out = draw(st.sampled_from([0.1, 1.1]))
force_fp32_output = draw(st.booleans())
squeeze2_op = OpConfig(
type="squeeze2",
inputs={"X": ["squeeze2_input_x"]},
outputs={
"Out": ["squeeze2_output"],
"XShape": ["squeeze2_output_XShape"]
},
attrs={
"axes": [2, 3] #required in pass
})
matmul_op = OpConfig(
type="matmul",
inputs={"X": ["squeeze2_output"],
"Y": ["matmul_input"]},
outputs={"Out": ["output_data"]},
attrs={
"transpose_X": False, #required in pass
"transpose_Y": False, #required in pass
"x_num_col_dims": x_num_col_dims,
"y_num_col_dims": y_num_col_dims,
"Scale_x": scale_x,
"Scale_y": scale_y,
"Scale_out": scale_out,
"force_fp32_output": force_fp32_output,
"alpha": alpha,
"fused_reshape_X": [],
"fused_transpose_X": [],
"fused_reshape_Y": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": [],
"head_number": int(1)
})
ops = [squeeze2_op, matmul_op]
program_config = ProgramConfig(
ops=ops,
weights={},
inputs={
"squeeze2_input_x": TensorConfig(shape=squeeze2_input_shape),
"matmul_input": TensorConfig(shape=matmul_input_shape)
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
atol, rtol = 1e-5, 1e-5
config_lists = self.get_predictor_configs()
for config in config_lists:
if config.target() in [TargetType.Metal]:
atol, rtol = 1e-2, 1e-2
return self.get_predictor_configs(), ["mul"], (atol, rtol)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
target_str = self.get_target()
max_examples = 25
if target_str in ["Metal"]:
# Make sure to generate enough valid cases for specific targets
max_examples = 500
self.run_and_statis(
quant=False,
max_examples=max_examples,
passes=["lite_squeeze2_matmul_fuse_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
```
#### File: tools/cmake_tools/ast.py
```python
import logging
import sys
class SyntaxParser(object):
def __init__(self, str):
self.str = str
self.cur_pos = 0
self.N = len(self.str)
self.token = ''
def eat_char(self):
self.cur_pos += 1
def eat_str(self):
'''
"xx"
'''
self.token = ''
assert self.cur == '"'
self.cur_pos += 1
assert self.cur_pos < self.N
while self.cur != '"':
self.token += self.cur
self.cur_pos += 1
assert self.cur_pos < self.N
assert self.cur == '"'
self.cur_pos += 1
#logging.warning('get: %s' % self.token)
def eat_word(self):
self.token = ''
str = ''
while self.cur.isalnum() or self.cur in (
'_',
':', ):
self.token += self.cur
self.forward()
#logging.warning('get: %s' % self.token)
def eat_left_parentheses(self):
'''
(
'''
self.assert_is('(')
self.token = '('
self.forward()
#logging.warning('get: %s' % self.token)
def eat_right_parentheses(self):
'''
)
'''
self.assert_is(')')
self.token = ')'
self.forward()
#logging.warning('get: %s' % self.token)
def eat_left_brace(self):
'''
{
'''
self.assert_is('{')
self.token = '{'
self.forward()
#logging.warning('get: %s' % self.token)
def eat_right_brace(self):
'''
}
'''
self.assert_is('}')
self.token = '}'
self.forward()
#logging.warning('get: %s' % self.token)
def eat_comma(self):
'''
,
'''
self.assert_is(',')
self.token = ','
self.forward()
#logging.warning('get: %s' % self.token)
def eat_spaces(self):
'''
eat space like string.
'''
while self.cur_pos < len(self.str):
if self.cur in (' ', '\t', '\n'):
self.forward()
else:
break
def eat_point(self):
'''
.
'''
self.assert_is('.')
self.token = '.'
self.forward()
#logging.warning('get: %s' % self.token)
def eat_any_but_brace(self):
'''
anything but {}
'''
start = self.cur_pos
while self.cur not in ('{', '}'):
self.cur_pos += 1
self.token = self.str[start:self.cur_pos]
#logging.warning('get: %s' % self.token)
def eat_semicolon(self):
'''
;
'''
self.assert_is(';')
self.token = ';'
self.forward()
#logging.warning('get: %s' % self.token)
def assert_is(self, w):
assert self.cur == w, "token should be %s, but get %s" % (w, self.cur)
@property
def cur(self):
assert self.cur_pos < self.N
return self.str[self.cur_pos]
#logging.warning('get: %s' % self.token)
def forward(self):
self.cur_pos += 1
class IO:
def __init__(self):
self.name = ''
self.type = ''
def __repr__(self):
return "- %s: %s" % (self.name, self.type)
class KernelRegistry:
def __init__(self):
self.op_type = ''
self.target = ''
self.precision = ''
self.data_layout = ''
self.class_ = ''
self.alias = ''
self.inputs = []
self.outputs = []
self.op_versions = []
def __repr__(self):
str = "Kernel({op_type}, {target}, {precision}, {data_layout}, {alias}):".format(
op_type=self.op_type,
target=self.target,
precision=self.precision,
data_layout=self.data_layout,
alias=self.alias, )
str += '\n' + '\n'.join(repr(io) for io in self.inputs)
str += '\n' + '\n'.join(repr(io) for io in self.outputs)
str += '\n'
return str
class SubgraphBridgeRegistry:
def __init__(self):
self.op_type = ''
self.target = ''
class RegisterLiteKernelParser(SyntaxParser):
KEYWORD = 'REGISTER_LITE_KERNEL'
def __init__(self, str):
super(RegisterLiteKernelParser, self).__init__(str)
self.kernels = []
def parse(self, with_extra, enable_arm_fp16):
find_registry_command = False
extra_command = []
arm_fp16_command = []
# Get the code location of extra kernels registry
# extra kernels registries are surrounded by
# "#ifdef LITE_BUILD_EXTRA" and "#endif // LITE_BUILD_EXTRA"
tmp_pos = self.cur_pos
while tmp_pos < len(self.str):
start = self.str.find("#ifdef LITE_BUILD_EXTRA", tmp_pos)
if start != -1:
tmp_pos = start
end = self.str.find("#endif // LITE_BUILD_EXTRA", tmp_pos)
if end != -1:
extra_command += extra_command + list(
range(start, end + 1))
tmp_pos = end + len("#endif // LITE_BUILD_EXTRA") - 1
else:
break
else:
break
# Get the code location of arm_fp16 kernels registry
# arm_fp16 kernels registries are surrounded by
# "#ifdef ENABLE_ARM_FP16" and "#endif"
tmp_pos = self.cur_pos
while tmp_pos < len(self.str):
start = self.str.find("#ifdef ENABLE_ARM_FP16", tmp_pos)
if start != -1:
tmp_pos = start
end = self.str.find("#endif // ENABLE_ARM_FP16", tmp_pos)
if end != -1:
arm_fp16_command += arm_fp16_command + list(
range(start, end + 1))
tmp_pos = end + len("#endif // ENABLE_ARM_FP16") - 1
else:
break
else:
break
self.cur_pos = 0
while self.cur_pos < len(self.str):
start = self.str.find(self.KEYWORD, self.cur_pos)
if start != -1:
#print 'str ', start, self.str[start-2: start]
if start != 0 and '/' in self.str[start - 2:start]:
'''
skip commented code
'''
self.cur_pos = start + 1
continue
# if with_extra == "OFF", extra kernels will not be parsed
if with_extra.upper() != "ON" and start in extra_command:
self.cur_pos = start + len(self.KEYWORD) - 1
continue
# if enable_arm_fp16 == "OFF", arm_fp16 kernels will not be parsed
if enable_arm_fp16.upper(
) != "ON" and start in arm_fp16_command:
self.cur_pos = start + len(self.KEYWORD) - 1
continue
self.cur_pos = start
k = KernelRegistry()
self.kernels.append(self.parse_register(k))
else:
break
def pick_kernel_class(self, op_name, device_target, data_type, layout_type,
alias_name, first_flag, file_path):
"""pick the actual used kernel on the basis of kernel attribute information.
self.str() stores the original source content. Kernel attributes include op_name,
device_target, data_type, layout_type and alias_name and these five attributes is
unique with regard to each kernel. We first divide the whole code into two sections,
one is the kernel class definition code and the other one is kernel register code
indentified by `REGISTER_LITE_KERNEL` and only class name alias indentified by
`using` or `typedef` keyword is allowed between them. We subtract the kernel class
definition code and class name alias code when first_flag is `True` and register
code is obtained whenever first_flag is `True` or `False`.
Args:
op_name: the 1st attribute of the kernel, such as `conv2d`.
device_target: the 2nd attribute of the kernel, such as `kARM`.
data_type: the 3rd attribute of the kernel, such as `kFloat`.
layout_type: the 4th attribute of the kernel, such as `kNCHW`.
alias_name: the 5th attribute of the kernel, such as `def`.
first_flag: the first time to pick the some kind of kernel.
file_path: the path to store the tailored kernel result.
Returns:
no val is returned as the `res_str` is stored into file_path.
"""
f = open(file_path, 'a+')
dst = f.read()
res_str = ""
main_idx = self.str.find("} // namespace paddle", 0)
if main_idx != -1:
main_idx += len("} // namespace paddle")
else:
main_idx = self.str.find("} /* namespace paddle */", 0)
if main_idx != -1:
main_idx += len("} /* namespace paddle */")
else:
sys.exit(-1)
if first_flag == "True":
res_str += self.str[:main_idx] + "\n"
self.cur_pos = main_idx + 1
while self.cur_pos < len(self.str):
start = self.str.find("typedef", self.cur_pos)
if start != -1:
end = self.str.find(";", start)
if end != -1:
res_str += self.str[start:end + len(";")] + "\n"
self.cur_pos = end + len(";")
else:
break
else:
break
self.cur_pos = main_idx + 1
while self.cur_pos < len(self.str):
start = self.str.find("using", self.cur_pos)
if start != -1:
end = self.str.find(";", start)
if end != -1:
res_str += self.str[start:end + len(";")] + "\n"
self.cur_pos = end + len(";")
else:
break
else:
break
self.cur_pos = main_idx + 1
while self.cur_pos < len(self.str):
start = self.str.find(self.KEYWORD, self.cur_pos)
if start != -1:
end = self.str.find(".Finalize();", self.cur_pos)
if end != -1:
end += len(".Finalize();")
else:
break
left_brace = self.str.find("(", start)
pos = left_brace + 1
brace_num = 1
while True:
if self.str[pos] == ')':
brace_num -= 1
elif self.str[pos] == '(':
brace_num += 1
if brace_num == 0:
break
pos += 1
right_brace = pos
kernel_attr = self.str[left_brace + 1:right_brace].replace(
'\n', '').replace(' ', '').split(",")
if len(kernel_attr) != 6:
sys.exit(1)
op_name_ = kernel_attr[0]
device_target_ = kernel_attr[1]
data_type_ = kernel_attr[2]
layout_type_ = kernel_attr[3]
alias_name_ = kernel_attr[5]
if ((op_name_ == op_name) and
(device_target_ == device_target) and
(data_type_ == data_type) and
(layout_type_ == layout_type) and
(alias_name_ == alias_name)):
res_str += self.str[start:end] + "\n\n"
self.cur_pos = end + 1
else:
break
f.write(res_str)
f.close()
def eat_class(self):
start = self.cur_pos
self.eat_word()
stack = ''
if self.cur == '<':
stack = stack + '<'
self.forward()
while stack:
if self.cur == '<':
stack = stack + '<'
elif self.cur == '>':
stack = stack[1:]
else:
pass
self.forward()
self.token = self.str[start:self.cur_pos]
def parse_register(self, k):
self.eat_word()
assert self.token == self.KEYWORD
self.eat_spaces()
self.eat_left_parentheses()
self.eat_spaces()
self.eat_word()
k.op_type = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
k.target = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
k.precision = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
k.data_layout = self.token
self.eat_comma()
self.eat_spaces()
self.eat_class()
k.class_ = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
k.alias = self.token
self.eat_spaces()
self.eat_right_parentheses()
self.eat_spaces()
def eat_io(is_input, io):
self.eat_left_parentheses()
self.eat_str()
io.name = self.token
self.eat_comma()
self.eat_spaces()
self.eat_left_brace()
self.eat_any_but_brace()
io.type = self.token
self.eat_right_brace()
self.eat_spaces()
self.eat_right_parentheses()
self.eat_spaces()
def eat_op_version(io):
self.eat_left_parentheses()
self.eat_str()
io.name = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
io.version = self.token
self.eat_right_parentheses()
self.eat_spaces()
# eat input and output
while self.cur_pos < len(self.str):
self.eat_point()
self.eat_spaces()
self.eat_word()
assert self.token in ('BindInput', 'BindOutput', 'SetVersion',
'BindPaddleOpVersion', 'Finalize')
io = IO()
if self.token == 'BindInput':
eat_io(True, io)
k.inputs.append(io)
elif self.token == 'BindOutput':
eat_io(False, io)
k.outputs.append(io)
elif self.token == 'SetVersion':
self.eat_left_parentheses()
self.eat_str()
self.version = self.token
self.eat_right_parentheses()
self.eat_spaces()
# skip `BindPaddleOpVersion` command during parsing kernel registry
elif self.token == 'BindPaddleOpVersion':
# eg BindPaddleOpVersion("fill_constant", 1)
eat_op_version(io)
k.op_versions.append(io)
else:
self.eat_left_parentheses()
self.eat_right_parentheses()
self.eat_semicolon()
self.eat_spaces()
return k
break
class RegisterLiteOpParser(SyntaxParser):
KEYWORD = 'REGISTER_LITE_OP'
def __init__(self, str):
super(RegisterLiteOpParser, self).__init__(str)
self.ops = []
def parse(self, with_extra):
extra_command = []
while self.cur_pos < len(self.str):
start = self.str.find("#ifdef LITE_BUILD_EXTRA", self.cur_pos)
if start != -1:
self.cur_pos = start
end = self.str.find("#endif // LITE_BUILD_EXTRA",
self.cur_pos)
if end != -1:
extra_command += extra_command + list(
range(start, end + 1))
self.cur_pos = end + len("#endif // LITE_BUILD_EXTRA") - 1
else:
break
else:
break
self.cur_pos = 0
while self.cur_pos < len(self.str):
start = self.str.find(self.KEYWORD, self.cur_pos)
if start != -1:
#print 'str ', start, self.str[start-2: start]
if start != 0 and '/' in self.str[start - 2:start]:
'''
skip commented code
'''
self.cur_pos = start + 1
continue
# if with_extra == "OFF", extra kernels will not be parsed
if with_extra != "ON" and start in extra_command:
self.cur_pos = start + len(self.KEYWORD) - 1
continue
self.cur_pos = start
self.ops.append(self.__parse_register())
else:
break
return self.ops
def __parse_register(self):
self.eat_word()
assert self.token == self.KEYWORD
self.eat_spaces()
self.eat_left_parentheses()
self.eat_spaces()
self.eat_word()
return self.token
class RegisterSubgraphBridgeParser(SyntaxParser):
KEYWORD = 'REGISTER_SUBGRAPH_BRIDGE'
def __init__(self, str):
super(RegisterSubgraphBridgeParser, self).__init__(str)
self.subgraph_bridge = []
def parse(self):
self.cur_pos = 0
while self.cur_pos < len(self.str):
start = self.str.find(self.KEYWORD, self.cur_pos)
if start != -1:
#print 'str ', start, self.str[start-2: start]
if start != 0 and '/' in self.str[start - 2:start]:
'''
skip commented code
'''
self.cur_pos = start + 1
continue
self.cur_pos = start
k = SubgraphBridgeRegistry()
self.subgraph_bridge.append(self.parse_register(k))
else:
break
def parse_register(self, k):
self.eat_word()
assert self.token == self.KEYWORD
self.eat_spaces()
self.eat_left_parentheses()
self.eat_spaces()
self.eat_word()
k.op_type = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
k.target = self.token
self.eat_comma()
self.eat_spaces()
return k
class RegisterNNadapterBridgeParser(SyntaxParser):
KEYWORD = 'USE_SUBGRAPH_BRIDGE'
def __init__(self, str):
super(RegisterNNadapterBridgeParser, self).__init__(str)
self.subgraph_bridge = []
def parse(self):
self.cur_pos = 0
while self.cur_pos < len(self.str):
start = self.str.find(self.KEYWORD, self.cur_pos)
if start != -1:
#print 'str ', start, self.str[start-2: start]
if start != 0 and '/' in self.str[start - 2:start]:
'''
skip commented code
'''
self.cur_pos = start + 1
continue
self.cur_pos = start
for k in self.parse_register():
self.subgraph_bridge.append(k)
else:
break
def parse_register(self):
ks = list()
self.eat_word()
assert self.token == self.KEYWORD
self.eat_spaces()
self.eat_left_parentheses()
self.eat_spaces()
self.eat_word()
op_type = self.token
self.eat_comma()
self.eat_spaces()
self.eat_word()
self.eat_comma()
self.eat_spaces()
'''
"xx, yy"
'''
self.token = ''
assert self.cur == '"'
self.cur_pos += 1
assert self.cur_pos < self.N
while self.cur != ')':
if (self.cur == ','):
temp = SubgraphBridgeRegistry()
temp.op_type = op_type
temp.target = self.token
ks.append(temp)
self.token = ''
self.cur_pos += 1
else:
if (self.cur != '"' and self.cur != ' ' and self.cur != '\n'):
self.token += self.cur
self.cur_pos += 1
assert self.cur_pos < self.N
assert self.cur == ')'
temp = SubgraphBridgeRegistry()
temp.op_type = op_type
temp.target = self.token
ks.append(temp)
self.eat_right_parentheses()
self.eat_spaces()
self.eat_semicolon()
self.eat_spaces()
return ks
if __name__ == '__main__':
with open('/Paddle-Lite/lite/kernels/arm/conv_compute.cc') as f:
c = f.read()
kernel_parser = RegisterLiteKernelParser(c)
kernel_parser.pick_kernel_class(
"conv2d", "kARM", "kFloat", "kNCHW", "def", "True",
"/Paddle-Lite/build.lite.android.armv8.clang/conv_compute.cc")
```
#### File: tools/coverage/coverage_lines.py
```python
import os
import sys
def get_lines(info_file):
"""
:param info_file:
:return:
"""
hits = .0
total = .0
with open(info_file) as info_file:
for line in info_file:
line = line.strip()
if not line.startswith('DA:'):
continue
line = line[3:]
total += 1
if int(line.split(',')[1]) > 0:
hits += 1
if total == 0:
print 'no data found'
exit()
return hits / total
if __name__ == '__main__':
if len(sys.argv) < 3:
exit()
info_file = sys.argv[1]
expected = float(sys.argv[2])
if not os.path.isfile(info_file):
print 'info file {} is not exists, ignored'.format(info_file)
exit()
actual = get_lines(info_file)
actual = round(actual, 3)
if actual < expected:
print 'expected >= {} %, actual {} %, failed'.format(
round(expected * 100, 1), round(actual * 100, 1))
exit(1)
print 'expected >= {} %, actual {} %, passed'.format(
round(expected * 100, 1), round(actual * 100, 1))
``` |
{
"source": "714627034/PaddlePaddle-MobileFaceNets",
"score": 2
} |
#### File: PaddlePaddle-MobileFaceNets/utils/arcmargin.py
```python
import paddle
import paddle.nn as nn
import math
class ArcNet(nn.Layer):
def __init__(self,
feature_dim,
class_dim,
margin=0.2,
scale=30.0,
easy_margin=False):
super().__init__()
self.feature_dim = feature_dim
self.class_dim = class_dim
self.margin = margin
self.scale = scale
self.easy_margin = easy_margin
self.weight = self.create_parameter(
shape=[self.feature_dim, self.class_dim],
is_bias=False,
default_initializer=paddle.nn.initializer.XavierNormal())
def forward(self, input, label):
input_norm = paddle.sqrt(paddle.sum(paddle.square(input), axis=1, keepdim=True))
input = paddle.divide(input, input_norm)
weight_norm = paddle.sqrt(paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
weight = paddle.divide(self.weight, weight_norm)
cos = paddle.matmul(input, weight)
sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
cos_m = math.cos(self.margin)
sin_m = math.sin(self.margin)
phi = cos * cos_m - sin * sin_m
th = math.cos(self.margin) * (-1)
mm = math.sin(self.margin) * self.margin
if self.easy_margin:
phi = self._paddle_where_more_than(cos, 0, phi, cos)
else:
phi = self._paddle_where_more_than(cos, th, phi, cos - mm)
one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
one_hot = paddle.squeeze(one_hot, axis=[1])
output = paddle.multiply(one_hot, phi) + paddle.multiply((1.0 - one_hot), cos)
output = output * self.scale
return output
def _paddle_where_more_than(self, target, limit, x, y):
mask = paddle.cast(x=(target > limit), dtype='float32')
output = paddle.multiply(mask, x) + paddle.multiply((1.0 - mask), y)
return output
```
#### File: PaddlePaddle-MobileFaceNets/utils/utils.py
```python
import distutils.util
import cv2
import numpy as np
import paddle
from tqdm import tqdm
def print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
type = distutils.util.strtobool if type == bool else type
argparser.add_argument("--" + argname,
default=default,
type=type,
help=help + ' 默认: %(default)s.',
**kwargs)
# 获取lfw全部路径
def get_lfw_list(pair_list):
with open(pair_list, 'r') as fd:
pairs = fd.readlines()
data_list = []
for pair in pairs:
splits = pair.split()
if splits[0] not in data_list:
data_list.append(splits[0])
if splits[1] not in data_list:
data_list.append(splits[1])
return data_list
# 加载图片并预处理
def load_image(img_path):
image = cv2.imread(img_path)
if image is None:
return None
image = cv2.resize(image, (112, 112))
image_flip = np.fliplr(image)
image = np.array([image, image_flip], dtype='float32')
image = image.transpose((0, 3, 1, 2))
image = image.astype(np.float32, copy=False)
image = (image - 127.5) / 127.5
return image
# 获取图像特征
def get_features(model, test_list, batch_size=32):
images = None
features = None
for i, img_path in enumerate(tqdm(test_list)):
image = load_image(img_path)
assert image is not None, '{} 图片错误'.format(img_path)
if images is None:
images = image
else:
images = np.concatenate((images, image), axis=0)
if images.shape[0] % batch_size == 0 or i == len(test_list) - 1:
data = paddle.to_tensor(images, dtype='float32')
output = model(data)
output = output.numpy()
feature_1 = output[0::2]
feature_2 = output[1::2]
feature = np.hstack((feature_1, feature_2))
if features is None:
features = feature
else:
features = np.vstack((features, feature))
images = None
return features
# 将文件路径名跟模型输出的图像特征打包成字典
def get_feature_dict(test_list, features):
feature_dict = {}
for i, each in enumerate(test_list):
feature_dict[each] = features[i]
return feature_dict
# 计算对角余弦值
def cosin_metric(x1, x2):
return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
# 根据对角余弦值计算准确率
def cal_accuracy(y_score, y_true):
y_score = np.asarray(y_score)
y_true = np.asarray(y_true)
best_accuracy = 0
best_threshold = 0
for i in range(len(y_score)):
threshold = y_score[i]
y_test = (y_score >= threshold)
acc = np.mean((y_test == y_true).astype(int))
if acc > best_accuracy:
best_accuracy = acc
best_threshold = threshold
return best_accuracy, best_threshold
# 计算lfw每一对的相似度
def test_performance(feature_dict, lfw_data_list):
with open(lfw_data_list, 'r') as fd:
pairs = fd.readlines()
sims = []
labels = []
for pair in pairs:
splits = pair.split()
feature_1 = feature_dict[splits[0]]
feature_2 = feature_dict[splits[1]]
label = int(splits[2])
sim = cosin_metric(feature_1, feature_2)
sims.append(sim)
labels.append(label)
accuracy, threshold = cal_accuracy(sims, labels)
return accuracy, threshold
``` |
{
"source": "71619997a/71619997a.github.io",
"score": 4
} |
#### File: 71619997a/71619997a.github.io/userdb.py
```python
import hashlib
def verify(u,p):
d={}
uname=str(u)
pwd=str(p)
f=open('database.txt','r').read().strip().split("\n")
for i in range(len(f)):
f[i] = f[i].split("|")
for i in f:
d[i[0]]=i[1]
if uname in d:
if d[uname]==password_hash(pwd):
return True
else:
print 'wrong pass'
return False
else:
print 'cant find uname'
return False
def add(uname, pwd):
f=open('database.txt','a')
f.write(uname + "|" + password_hash(pwd) + "\n")
f.close()
return True
def password_hash(password):
# Hash alg:
m = hashlib.md5()
m.update(password)
hashpass = m.hexdigest()
return hashpass
if __name__ == "__main__":
if str(raw_input("Add user? ")) == "YES":
username = str(raw_input("New username: "))
password = str(raw_input("New password: "))
f = open("database.txt", 'a')
add(username, password)
f.close()
``` |
{
"source": "718970079816800/TelegramCrawler",
"score": 3
} |
#### File: 718970079816800/TelegramCrawler/SearchGroupLinkInMessages.py
```python
from telethon.sync import TelegramClient
from telethon.tl.functions.messages import SearchRequest
from telethon.tl.types import InputMessagesFilterEmpty
from telethon.errors.rpcerrorlist import ChannelPrivateError
import re
LINK1 = re.compile('https://t.me/(?P<link>[A-Za-z0-9_]+)')
LINK2 = re.compile('https://t.me/joinchat/(?P<link>[A-Za-z0-9_-]{22})')
CHANNELID = re.compile('channel_id=(?P<channelid>\d+)')
PEERS = []
Q = 'https://t.me/'
RESULTS = []
GROUPLINKS = []
api_id = XXX
api_hash = 'XXX'
phone = '+XXX'
client = TelegramClient(phone, api_id, api_hash)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
client.sign_in(phone, input('Enter the code: '))
def get_peer():
'Get all your Telegram chat groups'
global PEERS
dialogs = client.iter_dialogs()
for dialog in dialogs:
if CHANNELID.findall(str(dialog.message)):
PEERS.append(CHANNELID.search(str(dialog.message)).group('channelid'))
PEERS = list(set(PEERS))
PEERS.sort()
def search(q, peers):
'Search group messages using the key word Q'
global RESULTS
global Q
for peer in peers:
try:
result = client(SearchRequest(
peer = int(peer), # On which chat/conversation
q = Q, # What to search for
filter = InputMessagesFilterEmpty(), # Filter to use (maybe filter for media)
min_date = None, # Minimum date
max_date = None, # Maximum date
offset_id = 0, # ID of the message to use as offset
add_offset = 0, # Additional offset
limit = 99999, # How many results
max_id = 0, # Maximum message ID
min_id = 0, # Minimum message ID
hash = 0,
from_id = None # Who must have sent the message (peer)
))
RESULTS.append(result)
except ValueError as e:
print(e)
except ChannelPrivateError as e:
print('Channel: {}'.format(int(peer)))
print(e)
get_peer()
search(Q, PEERS)
for result in RESULTS:
'Extract the group links from the searching result'
if LINK1.findall(result.stringify()):
GROUPLINKS.append('https://t.me/' + LINK1.search(result.stringify()).group('link'))
if LINK2.findall(result.stringify()):
GROUPLINKS.append('https://t.me/joinchat/' + LINK2.search(result.stringify()).group('link'))
GROUPLINKS = list(set(GROUPLINKS))
GROUPLINKS.sort()
for grouplink in GROUPLINKS:
with open('grouplinks', 'a') as f:
f.write(grouplink)
f.write('\n')
``` |
{
"source": "719733328/drcom",
"score": 2
} |
#### File: 719733328/drcom/eappacket.py
```python
from struct import *
from zlib import crc32
## Constants
# Reference: http://tools.ietf.org/html/rfc3748
ETHERTYPE_PAE = 0x888e
PAE_GROUP_ADDR = "\x01\x80\xc2\x00\x00\x03" # same for all
BROADCAST_ADDR = "\xff\xff\xff\xff\xff\xff"
EAPOL_VERSION = 1
EAPOL_EAPPACKET = 0
# packet info for EAPOL_EAPPACKET
EAPOL_START = 1
EAPOL_LOGOFF = 2
EAPOL_KEY = 3
EAPOL_ASF = 4
EAP_REQUEST = 1
EAP_RESPONSE = 2
EAP_SUCCESS = 3
EAP_FAILURE = 4
# packet info followed by EAP_RESPONSE
# 1 Identity
# 2 Notification
# 3 Nak (Response only)
# 4 MD5-Challenge
# 5 One Time Password (OTP)
# 6 Generic Token Card (GTC)
# 254 Expanded Types
# 255 Experimental use
EAP_TYPE_ID = 1 # identity
EAP_TYPE_MD5 = 4 # md5 Challenge
### Packet builders
def get_crc32(data):
return pack("!i", crc32(data))
def get_EAPOL(type, payload=""):
return pack("!BBH", EAPOL_VERSION, type, len(payload))+payload
def get_EAP(code, id, type=0, data=""):
if code in [EAP_SUCCESS, EAP_FAILURE]:
return pack("!BBH", code, id, 4)
else:
return pack("!BBHB", code, id, 5+len(data), type)+data
def get_ethernet_header(src, dst, type):
return dst+src+pack("!H",type)
def get_identity_data(login_info, _ = []):
if not _:
_.append(True)
return login_info['username']
return login_info['username'][:-1] + chr(ord(login_info['username'][-1]) + 3)
def fill_bytes(data):
return data.ljust(96, '\x00')
``` |
{
"source": "719Ben/BuffOneSwiper",
"score": 2
} |
#### File: 719Ben/BuffOneSwiper/main.py
```python
import bcrypt
import pymysql.cursors
import json
import os
import sys
from flask import Flask, request, session, redirect, render_template
from flask_api import status
from werkzeug.serving import run_simple
from datetime import timezone, datetime
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# open up the config file
with open("config.json") as config:
config = json.loads(config.read())
app = Flask(__name__, template_folder='templates')
app.debug = config["debug"]
app.secret_key = config["session-secret"]
# check if the user is logged in
def isLoggedin():
try:
if session["loggedin"] == True:
return True
else:
return False
# this happens if session["loggedin"] is undefined
except:
return False
# create the vars that we use for the sessions
def createSession(userID, chapterID):
session["loggedin"] = True
session["userID"] = userID
session["chapterID"] = chapterID
# wrapper to create DB connections
def createDBConnection():
return pymysql.connect(host=config["host"],
user=config["user"],
password=config["password"],
db=config["dbname"],
charset=config["charset"],
cursorclass=pymysql.cursors.DictCursor)
# wraper to hash passwords
def hashPassword(passwrd):
return bcrypt.hashpw(passwrd.encode(), bcrypt.gensalt())
# wraper to check hashed passwords, returns a bool
def checkPassword(passwrd, hashedPass):
return hashedPass.encode() == bcrypt.hashpw(passwrd.encode(),
hashedPass.encode())
@app.route("/", methods=["GET"])
def index():
if isLoggedin():
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT users.email, chapters.name, chapters.short_name FROM users, chapters WHERE " + \
"users.id=%s AND chapters.id = users.chapterID"
cursor.execute(sql, (session["userID"]))
results = cursor.fetchone()
sql = "SELECT id, name, chapterID FROM events WHERE chapterID = %s " + \
"ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"]))
event_list = cursor.fetchall()
try:
eventID = request.args.get("event_id")
if eventID == None:
eventID = event_list[0]["id"]
except:
eventID = 0
for item in event_list:
if item["id"] == int(eventID):
item["selected"] = True
else:
item["selected"] = False
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE " + \
"chapterID = %s AND eventID = %s ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"], eventID))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return render_template('index.html', chapter=results["name"],
email=results["email"], dataList=dataList,
event_list=event_list, eventID=eventID,
chapter_short=results["short_name"])
else:
return render_template('login.html')
@app.route("/add_event", methods=["POST"])
def createEvent():
connection = createDBConnection()
name = request.form["event_name"]
chapterID = session["chapterID"]
cursor = connection.cursor()
sql = "INSERT INTO events(name, chapterID) VALUES(%s, %s)"
cursor.execute(sql, (name, chapterID))
cursor.execute("SELECT LAST_INSERT_ID()")
new_id = cursor.fetchone()["LAST_INSERT_ID()"]
cursor.close()
connection.commit()
connection.close()
returnDic = {"name": name, "url": new_id}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
@app.route("/get_event", methods=["GET"])
def getEvent():
event_id = request.args.get("event_id")
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE" + \
" eventID=%s AND chapterID=%s"
cursor.execute(sql, (event_id, session["chapterID"]))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return json.dumps(dataList), status.HTTP_202_ACCEPTED
@app.route("/login", methods=["POST"])
def login():
email = request.form["email"]
password = request.form["password"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT id, password, chapterID FROM users WHERE email=%s"
cursor.execute(sql, (email))
results = cursor.fetchone()
cursor.close()
connection.close()
validCredentials = False
try:
if checkPassword(password, results["password"]):
validCredentials = True
createSession(results["id"], results["chapterID"])
except:
pass
if validCredentials:
return "", status.HTTP_202_ACCEPTED
else:
return "", status.HTTP_401_UNAUTHORIZED
@app.route("/logout", methods=["GET"])
def removeSession():
session["loggedin"] = False
session.clear()
return redirect("/", code=303)
@app.route("/blacklist", methods=["POST"])
def blacklist():
studentID = request.form["studentID"]
adminPassword = request.form["password"]
shouldBlacklist = not bool(request.form["shouldBlacklist"])
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT password FROM users WHERE id=%s"
cursor.execute(sql, (session["userID"]))
dbPassword = cursor.fetchone()["password"]
if not checkPassword(adminPassword, dbPassword):
return "", status.HTTP_401_UNAUTHORIZED
if shouldBlacklist == True:
sql = "INSERT INTO blacklist(studentID, chapterID) VALUES(%s, %s) " + \
" ON DUPLICATE KEY UPDATE blacklisted = %s"
cursor.execute(sql, (studentID, session["chapterID"], shouldBlacklist))
else:
sql = "UPDATE blacklist SET blacklisted = 0 WHERE studentID = %s AND chapterID = %s"
cursor.execute(sql, (studentID, session["chapterID"]))
cursor.close()
connection.commit()
connection.close()
return "", status.HTTP_202_ACCEPTED
@app.route("/card-reader", methods=["POST"])
def cardReader():
if isLoggedin() == False:
return "", status.HTTP_401_UNAUTHORIZED
try:
studentID = request.form["studentID"]
name = request.form["name"]
raw = request.form["raw"]
eventID = request.form["eventID"]
except:
return "", status.HTTP_400_BAD_REQUEST
userID = session["userID"]
chapterID = session["chapterID"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "INSERT INTO dataList(name, studentID, card_text, userID, " + \
"chapterID, eventID) VALUES(%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (name, studentID, raw, userID, chapterID, eventID))
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (studentID))
blacklist_names = cursor.fetchall()
cursor.close()
connection.commit()
connection.close()
blacklist_names_string = ""
self_blacklisted = False
blacklisted = False
for bl_item in blacklist_names:
blacklisted = True
if bl_item["id"] == session["chapterID"]:
self_blacklisted = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
returnDic = {"name": name, "time": datetime.now(
).strftime("%I:%M %p %m/%d/%Y "), "blackList": blacklist_names_string,
"self_blacklisted": self_blacklisted, "blacklisted": blacklisted}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
# reload the templates without restarting
extra_dirs = ['templates']
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
if __name__ == "__main__":
run_simple(config["website"], config["port"], app,
use_reloader=True, use_debugger=True, use_evalex=True,
extra_files=extra_files)
``` |
{
"source": "71andy/kaiten-api-client-python",
"score": 2
} |
#### File: kaiten-api-client-python/kaiten/client.py
```python
import http.client
# import base64
import json
import weakref
import pprint
import urllib
from kaiten.exceptions import *
API_VERSION = 'v1'
USER_AGENT = "KaitenAPIClientPython"
class KaitenObject (object):
__parent = None
def __str__(self):
return pprint.PrettyPrinter( indent = 4 ).pformat( self.__dict__ )
def __init__( self, parent, data={} ):
self.__parent = weakref.ref( parent )
for key in data: setattr( self, key, data[key] )
def __get_parent__(self):
return self.__parent()
def __get_item_by_id__(self, path, item_class, id, params = {}):
item = self.__request__( 'GET', path + '/' + str(id), params)
return globals()[ item_class ](self, item)
def __get_items__(self, path, item_class, params = {}):
items = self.__request__('GET', path, params)
return [ globals()[ item_class ]( self, item ) for item in items ]
def __update__(self, item_class, params ):
data = self.__request__('PATCH', '', params)
for key in data: setattr( self, key, data[key] )
def __delete__(self, params = {}):
self.__request__('DELETE', '', params)
def __create_item__(self, path, item_class, params ):
item = self.__request__('POST', path, params)
return globals()[ item_class ](self, item)
def __request__(self, method, path, params = {}):
path = path if path and path[0] == '/' else ( self.__get_uri__() + '/' + path )
return self.__get_parent__().__request__( method, path, params )
def __get_uri__(self):
raise NotImplementedError('You should implement methot __get_uri__ in descendant class')
def __deserialize_item__( self, field, item_class, data ):
if field in data :
setattr( self, field, globals()[ item_class ]( self, data.pop(field) ) )
def __deserialize_list__( self, field, item_class, data ):
setattr( self, field, [] )
if field in data :
for item in data.pop(field):
getattr(self, field).append( globals()[ item_class ](self, item) )
class Client (KaitenObject):
"""Performs requests to the Kaiten API service."""
END_POINT = '/api/' + API_VERSION
host = None
username = None
password = <PASSWORD>
debug = False
def __init__(self, host, bearer, debug=False ):
"""
:param host: IP or hostname of Kaiten server
:type host: string
:param bearer: Bearer for connection, must be used instead username and password
:type bearer: string
:param debug: this is a flag, which enables printing debug informationю
:type channel: bool
"""
self.host = host
self.bearer = bearer
self.debug = debug
def __request__(self, method, path, params = {}):
"""Performs HTTP request with credentials, returning the deserialized body json of request
:param method: Method name for HTTP request
:type method: string
:param path: Absolut path after entry point of API( /api/v1 )
:type path: string
:param params: Parameters for HTTP Request,
which will be serialized to json and putted in request body
:type params: dict
"""
conn = http.client.HTTPSConnection( self.host )
request_body = ''
if method == 'GET' :
query_string = urllib.parse.urlencode(params)
if query_string:
path = '?'.join([ path, query_string ])
else :
request_body = json.dumps(params)
conn.request(
method,
self.__get_url_for__(path),
request_body,
self.__get_headers__(),
)
if self.debug :
print(
"Sending request to {} with method {}.\nRequest body:\n{}\n".format(
path, method, request_body
)
)
resp = conn.getresponse()
body = resp.read().decode()
if self.debug :
print(
"Response code: {}\nResponse body:\n{} \n".format(
resp.status, body
)
)
if resp.status == 200:
try:
return json.loads(body)
except json.decoder.JSONDecodeError:
raise InvalidResponseFormat( path, method, body )
elif resp.status == 401:
raise UnauthorizedAccess( 'bearer' )
elif resp.status == 403:
raise AccessDenied( 'bearer', path, method )
else:
raise UnexpectedError( resp.status, path, method, body )
def __get_url_for__(self, path):
"""Returns absolute path for request with entry point of API
:param path: Absolut path after entry point of API( /api/v1 )
:type path: string
"""
return self.END_POINT + ( path if path[0] == '/' else '/' + path )
def __get_headers__(self):
"""Returns HTTP headers for request"""
return {
'Authorization': self.__get_auth_key__(),
'Content-Type' : 'application/json',
'User-Agent' : USER_AGENT,
}
def __get_auth_key__(self):
"""Returns auth key for API"""
return "Bearer " + self.bearer
def get_spaces(self):
"""Returns a list of all avalible spaces"""
return self.__get_items__('/spaces', 'Space')
def get_space(self, id):
"""Returns a space with requested id
:param id: id of requested space
:type id: int
"""
return self.__get_item_by_id__('/spaces', 'Space', id)
def create_space(self, title):
"""Creates a new space and after that returns the space
:param title: name of a new space
:type method: string
"""
return self.__create_item__('/spaces', 'Space', { 'title': title })
def get_cards(self, params = {}):
"""Returns a list of all cards which fits to requested parameters.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-get
:type params: dict
"""
return self.__get_items__('/cards', 'Card', params)
def get_card(self, id):
"""Returns a card with requested id
:param id: id of requested card
:type id: int
"""
return self.__get_item_by_id__('/cards', 'Card', id)
def get_users(self):
"""Returns a list of all avalible users"""
return self.__get_items__('/users', 'User')
def get_user(self, id):
"""Returns a user with requested id
:param id: id of requested user
:type id: int
"""
return self.__get_item_by_id__('/users', 'User', id)
def get_tags(self):
"""Returns a list of all avalible tags"""
return self.__get_items__('/tags', 'Tag')
def get_card_types(self):
"""Returns a list of all avalible card types"""
return self.__get_items__('/card-types', 'CardType')
def create_card_type(self, letter, name, color):
"""Adds new card type
:param letter: Character that represents type
:type letter: string
:param name: Type name
:type name: string
:param color: Color number
:type color: int
"""
return self.__create_item__(
'/card-types',
'CardType',
{ 'letter': letter, 'name': name, 'color': color }
)
def get_custom_properties(self):
"""Returns a list of all avalible spaces"""
return self.__get_items__('/company/custom-properties', 'CustomProperty')
class Space (KaitenObject):
def __init__(self, parent, data={}):
self.__deserialize_list__('boards', 'Board', data)
KaitenObject.__init__( self, parent, data )
def __get_uri__(self):
return '/spaces/' + str(self.id)
def update(self, params={}):
"""Updates the space.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#spaces-patch
:type params: dict
"""
return self.__update__( 'Space', params )
def get_boards(self):
"""Returns a list of all avalible boards for the current space"""
return self.__get_items__('boards', 'Board')
def get_board(self, id):
"""Returns a board with requested id
:param id: id of requested board
:type id: int
"""
return self.__get_item_by_id__('boards', 'Board', id)
def create_board(self, title, params={}):
"""Creates a new board
:param title: Title of the new board
:type title: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#boards-post
:type params: dict
"""
params['title'] = title
return self.__create_item__('boards', 'Board', params)
def get_cards(self, params = {}):
"""Returns a list of all cards for that space which fits to requested parameters.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-get
:type params: dict
"""
params['space_id'] = self.id
return self.__get_parent__().get_cards(params)
def get_users(self):
"""Returns a list of all avalible users for the current space"""
return self.__get_items__('users', 'User')
def get_user(self, id):
"""Returns a user with requested id
:param id: id of requested user
:type id: int
"""
return self.__get_item_by_id__('users', 'User', id)
def create_card(self, board_id, column_id, lane_id, title, params={}):
"""Adds new card type in current space
:param title: Title of new card
:type title: string
:param board_id: Board ID
:type board_id: int
:param column_id: Column ID
:type column_id: int
:param lane_id: Lane ID
:type lane_id: int
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-post
:type params: dict
"""
params['space_id'] = self.id
params['board_id'] = board_id
params['column_id'] = column_id
params['lane_id'] = lane_id
params['title'] = title
return self.__create_item__('/cards', 'Card', params)
class Board (KaitenObject):
def __init__(self, parent, data={}):
self.__deserialize_list__('columns', 'Column', data)
self.__deserialize_list__('lanes', 'Lane', data)
self.__deserialize_list__('cards', 'Card', data)
KaitenObject.__init__( self, parent, data )
def __get_uri__(self):
return '/boards/' + str(self.id)
def update(self, params={}):
"""Updates the board.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#boards-patch
:type params: dict
"""
return self.__update__( 'Board', params )
def delete(self):
"""Deletes this board
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#boards-delete
:type params: dict
"""
return self.__delete__()
def create_column(self, title, params={}):
"""Creates a new column
:param title: Title of the new column
:type title: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#columns-post
:type params: dict
"""
params['title'] = title
return self.__create_item__('columns', 'Column', params)
def create_lane(self, title, params={}):
"""Creates a new lane
:param title: Title of the new lane
:type title: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#lanes-post
:type params: dict
"""
params['title'] = title
return self.__create_item__('lanes', 'Lane', params)
def get_cards(self, params = {}):
"""Returns a list of all cards for that board which fits to requested parameters.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-get
:type params: dict
"""
params['board_id'] = self.id
return self.__get_parent__().get_cards(params)
def create_card(self, column_id, lane_id, title, params={}):
"""Adds new card type in current board
:param title: Title of new card
:type title: string
:param column_id: Column ID
:type column_id: int
:param lane_id: Lane ID
:type lane_id: int
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-post
:type params: dict
"""
return self.__get_parent__().create_card(
board_id = self.id,
column_id = column_id,
lane_id = lane_id,
title = title,
params = params,
)
class Column (KaitenObject):
def __get_uri__(self):
return 'columns/' + str(self.id)
def update(self, params={}):
"""Updates the column.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#columns-patch
:type params: dict
"""
return self.__update__( 'Column', params )
def delete(self, params={}):
"""Deletes this column
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#columns-delete
:type params: dict
"""
return self.__delete__(params)
def get_cards(self, params = {}):
"""Returns a list of all cards for that column which fits to requested parameters.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-get
:type params: dict
"""
params['column_id'] = self.id
return self.__get_parent__().get_cards(params)
def create_card(self, lane_id, title, params={}):
"""Adds new card type in current column
:param title: Title of new card
:type title: string
:param lane_id: Lane ID
:type lane_id: int
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-post
:type params: dict
"""
return self.__get_parent__().create_card(
column_id = self.id,
lane_id = lane_id,
title = title,
params = params,
)
class Lane (KaitenObject):
def __get_uri__(self):
return 'lanes/' + str(self.id)
def update(self, params={}):
"""Updates the lane.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#lanes-patch
:type params: dict
"""
return self.__update__( 'Lane', params )
def delete(self, params={}):
"""Deletes this lane
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#lanes-delete
:type params: dict
"""
return self.__delete__(params)
def get_cards(self, params = {}):
"""Returns a list of all cards for that lane which fits to requested parameters.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-get
:type params: dict
"""
params['lane_id'] = self.id
return self.__get_parent__().get_cards(params)
def create_card(self, column_id, title, params={}):
"""Adds new card type in current lane
:param title: Title of new card
:type title: string
:param column_id: Column ID
:type column_id: int
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#cards-post
:type params: dict
"""
return self.__get_parent__().create_card(
lane_id = self.id,
column_id = column_id,
title = title,
params = params,
)
class User (KaitenObject):
pass
class TimeSheet (KaitenObject):
pass
class Card (KaitenObject):
def __init__(self, parent, data={}):
self.__deserialize_item__('type', 'CardType', data)
self.__deserialize_list__('tags', 'Tag', data)
self.__deserialize_list__('members', 'User', data)
self.__deserialize_item__('owner', 'User', data)
self.__deserialize_list__('parents', 'Card', data)
self.__deserialize_list__('children', 'Card', data)
self.__deserialize_list__('checklists', 'Checklist', data)
self.__deserialize_list__('files', 'CardFile', data)
if 'board' in data :
self.board = Board( self, data.pop('board') )
if 'column' in data :
self.column = Column( self.board, data.pop('column') )
if 'lane' in data :
self.lane = Lane( self.board, data.pop('lane') )
else :
if 'column' in data :
del data['column']
if 'lane' in data :
del data['lane']
KaitenObject.__init__( self, parent, data )
def __get_uri__(self):
return '/cards/' + str(self.id)
def update(self, params={}):
"""Updates the card.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://developers.kaiten.io/docs/api-v1.html#cards-patch
:type params: dict
"""
return self.__update__( 'Card', params )
def arhive(self):
"""Puts the card to arhive"""
return self.__update__( 'Card', { 'condition' : 2 } )
def unarhive(self):
"""Returns the card from arhive"""
return self.__update__( 'Card', { 'condition' : 1 } )
def unblock(self):
"""Unblocks card"""
return self.__update__( 'Card', { 'blocked' : False } )
def block(self, params={}):
"""Creates card blocker
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#lanes-post
:type params: dict
"""
return self.__create_item__('blockers', 'CardBlocker', params)
def add_tag(self, name):
"""Adds new tag to card
:param text: Tag's name
:type text: string
"""
return self.__create_item__('tags', 'Tag', { 'name' : name })
def get_comments(self):
"""Returns a list of comments"""
return self.__get_items__('comments', 'Comment')
def add_comment(self, text, params={}):
"""Adds new comment to card
:param text: Comment text
:type text: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-comments-post
:type params: dict
"""
params['text'] = text
return self.__create_item__('comments', 'Comment', params)
def add_external_link(self, url, params={}):
"""Adds new external link to card
:param url: URL
:type url: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-external-links-post
:type params: dict
"""
params['url'] = url
return self.__create_item__('external-links', 'ExternalLink', params)
def add_child(self, card_id):
"""Adds new child card
:param card_id: ID of child card
:type card_id: int
"""
params['card_id'] = text
return self.__create_item__('external-links', 'ExternalLink', params)
def get_time_logs(self):
"""Returns a list of time logs"""
return self.__get_items__('time-logs', 'CardTimeLog')
def add_time_log(self, role_id, time_spent, for_date, params={}):
"""Adds new time log to card
:param role_id: Role id, predefined role is: -1 - Employee
:type role_id: int
:param time_spent: amount of time in minutes
:type time_spent: int
:param for_date: Log date in format YYYY-MM-DD, for example 2025-12-24
:type for_date: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-time-logs,-not-stable-yet-post
:type params: dict
"""
params['role_id'] = role_id
params['time_spent'] = time_spent
params['for_date'] = for_date
return self.__create_item__('time-logs', 'CardTimeLog', params)
def add_checklist(self, name, params={}):
"""Adds new check list to card
:param name: name of check list
:type name: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-checklists-post
:type params: dict
"""
params['name'] = name
return self.__create_item__('checklists', 'Checklist', params)
def add_definition_of_done(self, text, params={}):
"""Adds new definition of done to card
:param text: Content of item
:type text: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-definition-of-done-(acceptance-criteria)-post
:type params: dict
"""
params['text'] = text
return self.__create_item__('definition-of-done', 'CardDefinitionOfDone', params)
class Tag (KaitenObject):
def __get_uri__(self):
return 'tags/' + str(self.id)
def delete(self):
"""Deletes this tag"""
return self.__delete__()
class ExternalLink (KaitenObject):
def __get_uri__(self):
return 'external-links/' + str(self.id)
def update(self, params={}):
"""Updates the external link.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-external-links-patch
:type params: dict
"""
return self.__update__( 'ExternalLink', params )
def delete(self):
"""Deletes this external link"""
return self.__delete__()
class Comment (KaitenObject):
def __get_uri__(self):
return 'comments/' + str(self.id)
def update(self, params={}):
"""Updates the comment.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-comments-patch
:type params: dict
"""
return self.__update__( 'Comment', params )
def delete(self):
"""Deletes this comment"""
return self.__delete__()
class CardType (KaitenObject):
def __get_uri__(self):
return '/card-types/' + str(self.id)
def update(self, params={}):
"""Updates the card type.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-types-patch
:type params: dict
"""
return self.__update__( 'CardType', params )
def delete(self):
"""Deletes this card type"""
return self.__delete__()
class CardChild (KaitenObject):
def __get_uri__(self):
return 'children/' + str(self.id)
def delete(self):
"""Deletes this card child"""
return self.__delete__()
class CardBlocker (KaitenObject):
pass
class CardFile (KaitenObject):
def __init__(self, parent, data={}):
self.__deserialize_item__('author', 'User', data)
KaitenObject.__init__( self, parent, data )
class CardTimeLog (KaitenObject):
def __get_uri__(self):
return 'time-logs/' + str(self.id)
def update(self, params={}):
"""Updates the card time log.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-time-logs,-not-stable-yet-patch
:type params: dict
"""
return self.__update__( 'CardTimeLog', params )
def delete(self):
"""Deletes this time log"""
return self.__delete__()
class CardDefinitionOfDone (KaitenObject):
def __get_uri__(self):
return 'definition-of-done/' + str(self.id)
def update(self, params={}):
"""Updates the definition of done.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-definition-of-done-(acceptance-criteria)-patch
:type params: dict
"""
return self.__update__( 'CardDefinitionOfDone', params )
def delete(self):
"""Deletes this definition of done"""
return self.__delete__()
class Checklist (KaitenObject):
def __init__(self, parent, data={}):
self.__deserialize_list__('items', 'ChecklistItem', data)
KaitenObject.__init__( self, parent, data )
def __get_uri__(self):
return 'checklists/' + str(self.id)
def update(self, params={}):
"""Updates the check list.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-checklists-patch
:type params: dict
"""
return self.__update__( 'Checklist', params )
def delete(self):
"""Deletes this checklist"""
return self.__delete__()
def add_item(self, text, params={}):
"""Adds new item to check list
:param text: text for new item
:type text: string
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-checklist-items-post
:type params: dict
"""
params['text'] = text
return self.__create_item__('items', 'ChecklistItem', params)
class ChecklistItem (KaitenObject):
def __get_uri__(self):
return 'items/' + str(self.id)
def update(self, params={}):
"""Updates the check list item.
:param params: Dictionary with parametrs for request.
Full list of avalible parameters is avalible on
https://faq.kaiten.io/docs/api#card-checklist-items-patch
:type params: dict
"""
return self.__update__( 'ChecklistItem', params )
def delete(self):
"""Deletes this check list item"""
return self.__delete__()
class CustomProperty(KaitenObject):
def __get_uri__(self):
return 'company/custom-properties/' + str(self.id)
def get_select_values(self):
"""Returns a list of all avalible select values for the current custom property"""
select_values = self.__get_items__("select-values", "CustomPropertySelectValue")
setattr(self, 'select_values', select_values)
return select_values
def create_select_value(self, name, params={}):
"""Create custom property select value:
:param name: name for new select value
:type name: string
:param params: Dictionary with attributes for request.
Supported attributes:
value (String, min length 1, max length 128) [required]
color (Number from 1 to 16). (color of the chip) [optional]
"""
params = {'value': name}
return self.__create_item__(
'select-values', 'CustomPropertySelectValue', params
)
class CustomPropertySelectValue(KaitenObject):
def __get_uri__(self):
return 'select-values/' + str(self.id)
def update(self, name, params={}):
"""Update custom property select value:
:param name: new name for select value
:type name: string
:param params: Dictionary with attributes for request.
Supported attributes:
value (String, min length 1, max length 128) [optional]
color (Number from 1 to 16). (color of the chip) [optional]
"""
params = {'value': name}
return self.__update__('CustomPropertySelectValue', params)
```
#### File: kaiten-api-client-python/kaiten/exceptions.py
```python
class InvalidResponseFormat(Exception):
"""Error when the response which was gotten from api server couldn't be deserialize"""
def __init__(self, path, method, body):
self.path = path
self.method = method
self.body = body
Exception.__init__(self)
def __str__(self):
return "Can't parse response from {} with method {}".format(
self.path, self.method
)
class UnauthorizedAccess(Exception):
"""Error when username or/and password are incorrect"""
def __init__(self, username):
self.username = username
Exception.__init__(self)
def __str__(self):
return "Fail to get access for {}".format(self.username)
class AccessDenied(Exception):
"""Error when access for username for requested resource is denied"""
def __init__(self, username, path, method):
self.username = username
self.path = path
self.method = method
Exception.__init__(self)
def __str__(self):
return "For {} access denied to {} with method {}".format(
self.username, self.path, self.method
)
class UnexpectedError(Exception):
"""Error when response has unexpected response code"""
def __init__(self, status, path, method, body):
self.status = status
self.path = path
self.method = method
self.body = body
Exception.__init__(self)
def __str__(self):
return "For {} with method {} is got unexpected status code {}".format(
self.path, self.method, self.status
)
``` |
{
"source": "71unxv/Python_DTG",
"score": 4
} |
#### File: Python_DTG/4_function/doublePower.py
```python
def doublePower(x) :
hasil = x ** x
return hasil
result = doublePower(2)
print(result) # 4
result = doublePower(3)
print(result) # 27
result = doublePower(4)
print(result)
```
#### File: Python_DTG/4_function/PrintPrime.py
```python
def PrintPrime(numbers : list) :
for number in numbers :
factor = 0
for divider in range(1,number+1) :
if number % divider == 0 :
factor += 1
if factor == 2 :
print(f"The {number} is a prime number")
else :
print(f"The {number} is not a prime number")
numbers = []
for i in range (1,1001) :
numbers.append(i)
PrintPrime(numbers)
```
#### File: Python_DTG/5_OOP/encapsulation.py
```python
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
```
#### File: Python_DTG/5_OOP/OOP.py
```python
class Bird():
def __init__(self) :
self.wings = True
self.fur = True
self.fly = True
def isFly(self) :
return self.fly
Parrot = Bird()
if Parrot.isFly() == "fly" :
print("Parrot must be can fly")
else :
print("Why Parrot can't fly ?")
###### Inheritance #####
class Penguin(Bird) :
def __init__(self) :
self.fly = False
Penguin = Penguin()
if Penguin.isFly() == True :
print("No way, penguin can't fly")
else :
print("Penguin is swimming")
######################
``` |
{
"source": "71USERX/AzurLane_Up_Collect_Script",
"score": 3
} |
#### File: 71USERX/AzurLane_Up_Collect_Script/functions.py
```python
import requests
import json
import re
import time
import os
import threading
import variable as var
# ********************************************************
# 请安装requests模块!否者将无法运行
# 可以使用一下指令安装:
# pip install requests
# ********************************************************
#Lambda Functions
compareIn = lambda new_uids,old_uids : [x for x in new_uids if x not in old_uids]
#取出在new_uids里,old_uid里没有的数据
# 参数 list 两个列表
#Standard Functions
#发送请求,获取搜索“碧蓝航线”,分区为轻小说(你也可以在variable.py里换成其他的)的所有结果
# 参数 int 搜索结果的第几页
def sendGetRequest( page_num ):
response = requests.get(url = var.address + str(page_num))
return response.text
#获取该专栏作者的uid
# 参数 string 一个json文本
# 一般来说把sendGetRequest()函数的结果作为此函数的参数就好了
def getMid( text ):
temp = var.regeX.findall(text)
mids = []
i = 0
while i < len(temp):
mids.append(re.sub(r"\D+","",temp[i]))
mids[i] = int(mids[i])
i += 1
return mids
#根据uid获取该用户的昵称
# 参数 int 用户uid
def getName( uid ):
responses = requests.get(url = var.InfoAddr + str(uid))
temp = var.nameRegeX.findall(responses.text)
if temp == []:
return "[用户不存在]"
name = temp[0].replace('"name":"',"")
name = name.replace('",',"")
return name
#将倒序线程与顺序线程获取的uid加入variable.py里的uids列表,并去重
# 参数 无参数
def addToUids():
temp = var.thread_in + var.thread_seq
uids = delDuplicated(temp)
new_uids = compareIn(uids,var.uids)
var.uids.extend(new_uids)
#列表去重
# 参数 list 一个列表
def delDuplicated( list0 ):
return sorted(set(list0), key = list0.index)
def writeNames():
fo = open("names.listf",mode="w",encoding="utf-8")
var.finder_seq.reverse()
names = []
names.extend(var.finder_in)
names.extend(var.finder_seq)
i = 0
while i < len(names):
fo.write(names[i] + "\n")
i += 1
fo.close()
#读取uids.listf里面的所有数据并加入variable.py里的uids列表
# 参数 无参数
def readFromFile():
fo = open("uids.listf",mode="r",encoding="utf-8")
temp = fo.readlines()
i = 0
while i < len(temp):
temp[i] = temp[i][0 : (len(temp[i]) - 1)]
temp[i] = int(temp[i])
i += 1
fo.close()
temp = delDuplicated(temp)
return temp
#将variable里的所有uid写到uids.listf文件里
# 参数 无参数
def writeToFile():
fo = open("uids.listf",mode="a+",encoding="utf-8")
fo.seek(0,0)
origin = fo.readlines()
i = 0
while i < len(origin):
origin[i] = origin[i][0 : (len(origin[i]) - 1)]
origin[i] = int(origin[i])
i += 1
temp = compareIn(var.uids,origin)
fo.seek(0,2)
i = 0
while i < len(temp):
fo.write(str(temp[i]) + "\n")
i += 1
fo.close()
# 将uids.listf与names.listf合并成一个markdown文件
# 参数 无参数
def makeMarkdown():
nfo = open("names.listf",mode="r",encoding="utf-8")
ufo = open("uids.listf",mode="r",encoding="utf-8")
mdfo = open("up.md",mode="w",encoding="utf-8")
names = nfo.readlines()
uids = ufo.readlines()
mdfo.write("**排名不分前后** \n")
i = 0
while i < len(names):
names[i] = names[i][0:(len(names[i])-1)]
i += 1
nfo.close()
i = 0
while i < len(uids):
uids[i] = uids[i][0:(len(uids[i])-1)]
i += 1
ufo.close()
i = 0
while i < len(uids):
mdfo.write("[" + names[i] + "]" + "(" + var.UsrPage + uids[i] + ") \n")
i += 1
mdfo.close()
#顺序线程主函数,查找1-25页的所有搜索结果
# 参数 无参数
def inverted_thread():
i = 1
text = ""
while i <= 25:
text = sendGetRequest(i)
GotUid = getMid(text)
new_uid = compareIn(GotUid,var.thread_in)
var.thread_in.extend(new_uid)
print("Thread_in:page " + str(i) +" Processed.")
i += 1
time.sleep(var.waitingTime)
#倒序线程主函数,查找26-50页的所有搜索结果
# 参数 无参数
def sequential_thread():
i = 50
text = ""
while i > 25:
text = sendGetRequest(i)
GotUid = getMid(text)
new_uid = compareIn(GotUid,var.thread_seq)
var.thread_seq.extend(new_uid)
print("Thread_seq:page " + str(i) +" Processed.")
i -= 1
time.sleep(var.waitingTime)
#查找昵称线程主函数,将所有的uid匹配昵称并写入variable.py里的finder_*.py中
# 参数 无参数
# 别在此函数前放置readFromFil()函数!不然会重复匹配
def find_name_in_thread():
uids = readFromFile()
i = 0
while i <= len(uids)//2 :
name = getName(uids[i])
var.finder_in.append(name)
print("Finder_in:The no. "+ str(i) + " is " + name)
i += 1
time.sleep(var.waitingTime)
#倒序查找昵称
def find_name_seq_thread():
uids = readFromFile()
i = len(uids) - 1
while i > len(uids)//2 :
name = getName(uids[i])
var.finder_seq.append(name)
print("Finder_seq:The no. "+ str(i) + " is " + name)
i -= 1
time.sleep(var.waitingTime)
#顺序线程类
# 参数 无参数
class inverted_thread_class(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print("Thread Inverted Start!")
def __del__(self):
print("Thread Inverted Exit")
def run(self):
inverted_thread()
#倒叙线程类
# 参数 无参数
class sequential_thread_class(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print("Thread Sequential Start!")
def __del__(self):
print("Thread Sequential Exit")
def run(self):
sequential_thread()
#查找昵称线程类
# 参数 无参数
class find_name_in_thread_class(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print("Find Name Inverted Thread Start!")
def __del__(self):
print("Find Name Inverted Thread Exit")
def run(self):
find_name_in_thread()
#倒序查找昵称线程类
# 参数 无参数
class find_name_seq_thread_class(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print("Find Name Sequential Thread Strat")
def __del__(self):
print("Find Name Sequential Thread Exit")
def run(self):
find_name_seq_thread()
``` |
{
"source": "722C/django-prices-taxjar",
"score": 2
} |
#### File: management/commands/get_tax_rates.py
```python
from django.core.management.base import BaseCommand
from ... import utils
class Command(BaseCommand):
help = 'Get current tax rates in regions and saves to database'
def handle(self, *args, **options):
json_response_rates = utils.fetch_tax_rates()
utils.create_objects_from_json(json_response_rates)
json_response_types = utils.fetch_categories()
utils.save_tax_categories(json_response_types)
```
#### File: django-prices-taxjar/django_prices_taxjar/models.py
```python
from jsonfield import JSONField
from django.db import models
from django.utils.translation import pgettext_lazy
DEFAULT_TYPES_INSTANCE_ID = 1
class Tax(models.Model):
country_code = models.CharField(
pgettext_lazy('Tax field', 'country code'), max_length=2,
db_index=True)
region_code = models.CharField(
pgettext_lazy('Tax field', 'region code'), max_length=2, db_index=True,
blank=True, null=True)
data = JSONField(pgettext_lazy('Tax field', 'data'))
def __str__(self):
return self.country_code
class TaxCategoriesQuerySet(models.QuerySet):
def singleton(self):
return self.filter(id=DEFAULT_TYPES_INSTANCE_ID).first()
class TaxCategories(models.Model):
types = JSONField(pgettext_lazy('Tax field', 'types'))
objects = TaxCategoriesQuerySet.as_manager()
```
#### File: django-prices-taxjar/tests/test_taxjar.py
```python
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django_prices_taxjar import utils
from django_prices_taxjar.models import Tax, TaxCategories
from prices import Money, TaxedMoney
from django_prices_taxjar import LineItem
@pytest.fixture
def tax_country(db, json_success):
data = json_success['summary_rates'][0]
# Convert to strings to avoid potential issues with rates being floats.
try:
data['minimum_rate']['rate'] = str(data['minimum_rate']['rate'])
except (KeyError):
pass
try:
data['average_rate']['rate'] = str(data['average_rate']['rate'])
except (KeyError):
pass
return Tax.objects.create(country_code=data['country_code'], region_code=data['region_code'], data=data)
@pytest.fixture
def rate_type(db, json_types_success):
return TaxCategories.objects.create(
types=json_types_success['categories'])
@pytest.fixture
def fetch_tax_rates_success(monkeypatch, json_success):
monkeypatch.setattr(utils, 'fetch_tax_rates', lambda: json_success)
@pytest.fixture
def fetch_tax_rates_error(monkeypatch, json_error):
monkeypatch.setattr(utils, 'fetch_tax_rates', lambda: json_error)
@pytest.fixture
def fetch_categories_success(monkeypatch, json_types_success):
monkeypatch.setattr(utils, 'fetch_categories', lambda: json_types_success)
@pytest.fixture
def fetch_categories_error(monkeypatch, json_error):
monkeypatch.setattr(utils, 'fetch_categories', lambda: json_error)
@pytest.fixture
def fetch_tax_rate_for_address_success(monkeypatch, json_success_for_address):
monkeypatch.setattr(utils, 'fetch_tax_for_address',
lambda *args, **kwargs: json_success_for_address)
@pytest.fixture
def fetch_tax_rate_for_order_success(monkeypatch, json_success_for_order):
monkeypatch.setattr(utils, 'fetch_tax_for_order',
lambda *args, **kwargs: json_success_for_order)
def test_validate_data_invalid(json_error):
with pytest.raises(ImproperlyConfigured):
utils.validate_data(json_error)
def test_validate_data_valid(json_success):
assert utils.validate_data(json_success) is None
@pytest.mark.django_db
def test_create_objects_from_json_error(json_error, json_success):
tax_counts = Tax.objects.count()
with pytest.raises(ImproperlyConfigured):
utils.create_objects_from_json(json_error)
utils.create_objects_from_json(json_success)
assert tax_counts + 3 == Tax.objects.count()
@pytest.mark.django_db
def test_create_objects_from_json_success(json_success):
for json_dict in [json_success]:
utils.create_objects_from_json(json_dict)
assert Tax.objects.count() == 3
@pytest.mark.django_db
def test_save_tax_categories(json_types_success):
utils.save_tax_categories(json_types_success)
assert 1 == TaxCategories.objects.count()
utils.save_tax_categories(json_types_success)
assert 1 == TaxCategories.objects.count()
@pytest.mark.django_db
def test_get_tax_categories(rate_type):
categories = utils.get_tax_categories()
assert categories == rate_type.types
@pytest.mark.django_db
def test_get_tax_categories_no_categories():
categories = utils.get_tax_categories()
assert categories == []
def test_get_tax_rates_for_country_region(tax_country):
country_code = tax_country.country_code
region_code = tax_country.region_code
tax_rates = utils.get_tax_rates_for_region(country_code, region_code)
assert tax_rates['country'] == 'United States'
assert tax_rates['region'] == 'California'
assert tax_rates['minimum_rate']['rate'] == '0.065'
assert tax_rates['average_rate']['rate'] == '0.0827'
@pytest.mark.django_db
def test_get_tax_rates_for_country_invalid_code():
tax_rates = utils.get_tax_rates_for_region('XX')
assert tax_rates is None
def test_get_tax_rate_standard_rate(tax_country):
tax_rates = tax_country.data
standard_rate = utils.get_tax_rate(tax_rates)
assert standard_rate == tax_rates['average_rate']['rate']
def test_get_tax_rate_fallback_to_standard_rate(tax_country):
tax_rates = tax_country.data
hotels_rate = utils.get_tax_rate(tax_rates, 'hotels')
assert hotels_rate == tax_rates['average_rate']['rate']
def test_get_tax_for_rate_standard_rate(tax_country):
tax_rates = tax_country.data
standard_tax = utils.get_tax_for_rate(tax_rates)
assert standard_tax(Money(100, 'USD')) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert standard_tax(Money(100, 'USD'), keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
taxed_money = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
assert standard_tax(taxed_money) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert standard_tax(taxed_money, keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
def test_get_tax_for_rate_fallback_to_standard_rate(tax_country):
tax_rates = tax_country.data
hotels_tax = utils.get_tax_for_rate(tax_rates, 'hotels')
assert hotels_tax(Money(100, 'USD')) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert hotels_tax(Money(100, 'USD'), keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
taxed_money = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
assert hotels_tax(taxed_money) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert hotels_tax(taxed_money, keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
def test_get_tax_for_rate_reduced_rate(tax_country):
tax_rates = tax_country.data
books_tax = utils.get_tax_for_rate(tax_rates, 'books')
assert books_tax(Money(100, 'USD')) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert books_tax(Money(100, 'USD'), keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
taxed_money = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
assert books_tax(taxed_money) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('108.27', 'USD'))
assert books_tax(taxed_money, keep_gross=True) == TaxedMoney(
net=Money('92.36', 'USD'), gross=Money(100, 'USD'))
def test_get_tax_for_address(fetch_tax_rate_for_address_success):
tax_for_address = utils.get_tax_for_address(
'05495-2086', 'US', 'VT', 'Williston', '312 Hurricane Lane')
assert tax_for_address(Money(100, 'USD')) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('107.00', 'USD'))
assert tax_for_address(Money(100, 'USD'), keep_gross=True) == TaxedMoney(
net=Money('93.46', 'USD'), gross=Money(100, 'USD'))
taxed_money = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
assert tax_for_address(taxed_money) == TaxedMoney(
net=Money(100, 'USD'), gross=Money('107.00', 'USD'))
assert tax_for_address(taxed_money, keep_gross=True) == TaxedMoney(
net=Money('93.46', 'USD'), gross=Money(100, 'USD'))
def test_get_shipping_taxable_for_address(fetch_tax_rate_for_address_success):
shipping_taxable_for_address = utils.is_shipping_taxable_for_address(
'05495-2086', 'US', 'VT', 'Williston', '312 Hurricane Lane')
assert shipping_taxable_for_address == True
def test_get_taxes_for_order(fetch_tax_rate_for_order_success):
tax_for_order = utils.get_taxes_for_order(
Money('1.5', 'USD'), 'US', '90002', 'CA', 'Los Angeles',
'1335 E 103rd St', None,
[
LineItem('1', 1, Money(15, 'USD'), '20010')
]
)
assert tax_for_order(Money(15, 'USD')) == TaxedMoney(
net=Money(15, 'USD'), gross=Money('16.35', 'USD'))
assert tax_for_order(Money(15, 'USD'), keep_gross=True) == TaxedMoney(
net=Money('13.65', 'USD'), gross=Money(15, 'USD'))
taxed_money = TaxedMoney(net=Money(15, 'USD'), gross=Money(15, 'USD'))
assert tax_for_order(taxed_money) == TaxedMoney(
net=Money(15, 'USD'), gross=Money('16.35', 'USD'))
assert tax_for_order(taxed_money, keep_gross=True) == TaxedMoney(
net=Money('13.65', 'USD'), gross=Money(15, 'USD'))
``` |
{
"source": "722C/saleor-collection-extensions",
"score": 2
} |
#### File: collection_extensions/dashboard_views/filters.py
```python
from django.utils.translation import npgettext, pgettext_lazy
from django_filters import (CharFilter, OrderingFilter)
from saleor.core.filters import SortedFilterSet
from ..models import CollectionExtension
SORT_BY_FIELDS = {
'collection__name': pgettext_lazy('Collection list sorting option', 'name')}
class CollectionExtensionFilter(SortedFilterSet):
collection__name = CharFilter(
label=pgettext_lazy('Collection list filter label', 'Name'),
lookup_expr='icontains')
sort_by = OrderingFilter(
label=pgettext_lazy('Collection list filter label', 'Sort by'),
fields=SORT_BY_FIELDS.keys(),
field_labels=SORT_BY_FIELDS)
class Meta:
model = CollectionExtension
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard collection extensions list',
'Found %(counter)d matching collection extension',
'Found %(counter)d matching collection extensions',
number=counter) % {'counter': counter}
```
#### File: collection_extensions/templatetags/collection_extensions.py
```python
from django import template
from ..models import CollectionExtension
register = template.Library()
@register.inclusion_tag('collection_extensions/dashboard/side_nav_inclusion.html',
takes_context=True)
def collection_extensions_side_nav(context):
return context
@register.filter
def collection_alternative_name(collection):
if collection:
ce = CollectionExtension.objects.filter(collection=collection).first()
if ce:
return ce.alternative_name
return ''
@register.filter
def collection_content(collection):
if collection:
ce = CollectionExtension.objects.filter(collection=collection).first()
if ce:
return ce.content
return ''
``` |
{
"source": "724686158/cisot_network_monitor",
"score": 2
} |
#### File: cisot_network_monitor/lib/measurement_repositories_test.py
```python
import unittest
import time
from lib.measurement_repositories import DatapathResponseTimeRepository, \
LinkLatencyRepository, BandwidthPortMeasurementData, PlrPortMeasurementData, PortStatsRepository
from lib.packets import ReceivedTestPacket
from lib.time_units import TimeStamp
class TestDatapathResponseTimeRepository(unittest.TestCase):
def test_get_response_time(self):
repo = DatapathResponseTimeRepository()
dpid = 1
repo.write_send_time(dpid)
repo.write_receive_time(dpid)
error = 1.0 # ms
self.assertLess(repo.get_response_time(dpid).milliseconds(), error)
def test_write_receive_time__no_such_dpid(self):
repo = DatapathResponseTimeRepository()
with self.assertRaisesRegex(
KeyError, r'dpid 1 is not in datapath time repository'):
repo.write_receive_time(1)
def test_get_response_time__no_such_dpid(self):
repo = DatapathResponseTimeRepository()
self.assertEqual(repo.get_response_time(1).milliseconds(), 0.0)
class TestLinkLatencyRepository(unittest.TestCase):
def test_get_latency_between(self):
repo = LinkLatencyRepository()
rpkt = ReceivedTestPacket(1, 2, TimeStamp(1586869012.1606))
self.assertEqual(repo.get_latency_between(1, 2).milliseconds(), 0.0)
repo.parse_test_packet(rpkt)
self.assertGreater(repo.get_latency_between(1, 2).milliseconds(), 0.0)
def test_get_latency_between__no_such_dpid(self):
repo = LinkLatencyRepository()
self.assertEqual(repo.get_latency_between(1, 2).milliseconds(), 0.0)
class TestBandwidthPortMeasurementData(unittest.TestCase):
def test__sub__(self):
d1 = BandwidthPortMeasurementData(1, 842000000, 13, 16)
d2 = BandwidthPortMeasurementData(2, 842000000, 20, 21)
self.assertEqual(d2 - d1, 96.0)
class TestPlrPortMeasurementData(unittest.TestCase):
def test__sub__(self):
d1 = PlrPortMeasurementData(40, 60, 10, 5)
d2 = PlrPortMeasurementData(80, 120, 20, 10)
self.assertEqual(d2 - d1, 15.0)
# no packets
d1 = PlrPortMeasurementData(40, 60, 10, 5)
d2 = PlrPortMeasurementData(40, 60, 10, 5)
self.assertEqual(d2 - d1, 0.0)
# no errors
d1 = PlrPortMeasurementData(40, 60, 10, 5)
d2 = PlrPortMeasurementData(80, 120, 10, 5)
self.assertEqual(d2 - d1, 0.0)
class TestPortStatsRepository(unittest.TestCase):
def test_add_stats__bandwidth(self):
repo = PortStatsRepository()
repo.add_stats(1, 2,
BandwidthPortMeasurementData(1, 842000000, 13, 16))
repo.add_stats(1, 2,
BandwidthPortMeasurementData(2, 842000000, 20, 21))
self.assertEqual(repo.get_stats(1, 2), 96.0)
repo.add_stats(1, 2,
BandwidthPortMeasurementData(3, 842000000, 25, 39))
self.assertEqual(repo.get_stats(1, 2), 184.0)
def test_add_stats__plr(self):
repo = PortStatsRepository()
repo.add_stats(1, 2, PlrPortMeasurementData(40, 60, 10, 5))
repo.add_stats(1, 2, PlrPortMeasurementData(80, 120, 20, 10))
self.assertEqual(repo.get_stats(1, 2), 15.0)
repo.add_stats(1, 2, PlrPortMeasurementData(100, 200, 20, 10))
self.assertEqual(repo.get_stats(1, 2), 0.0)
def test_add_stats__empty_stats(self):
repo = PortStatsRepository()
self.assertEqual(repo.get_stats(1, 2), 0.0)
repo.add_stats(1, 2,
BandwidthPortMeasurementData(1, 842000000, 13, 16))
self.assertEqual(repo.get_stats(1, 2), 0.0)
if __name__ == '__main__':
unittest.main()
```
#### File: cisot_network_monitor/lib/time_units.py
```python
class TimeDelta:
def __init__(self, delta_in_seconds):
self._delta_in_seconds = delta_in_seconds
def milliseconds(self, ndigits=3):
return round(self._delta_in_seconds * 1000, ndigits)
def seconds(self, ndigits=3):
return round(self._delta_in_seconds, ndigits)
def __str__(self):
return str(self._delta_in_seconds)
class TimeStamp:
def __init__(self, seconds):
if isinstance(seconds, float):
self._seconds = TimeStamp._normalize(seconds)
return
if isinstance(seconds, str):
self._seconds = TimeStamp._normalize(float(seconds))
return
raise ValueError('seconds must be float or string')
@staticmethod
def _normalize(t):
k = 10000
return int(t * k) / k
def __str__(self):
return str(self._seconds)
def __sub__(self, other):
return TimeDelta(self._seconds - other._seconds)
```
#### File: cisot_network_monitor/lib/time_units_test.py
```python
import unittest
from lib.time_units import TimeStamp, TimeDelta
class TestTimeStamp(unittest.TestCase):
def test__init__(self):
# int arg
with self.assertRaises(ValueError):
TimeStamp(3)
# ok
TimeStamp(3.21)
TimeStamp('3.21')
def test__str__(self):
ts1 = TimeStamp(3.21)
self.assertEqual(str(ts1), '3.21')
ts2 = TimeStamp('12.456')
self.assertEqual(str(ts2), '12.456')
def test__sub__(self):
ts1 = TimeStamp(10.51)
ts2 = TimeStamp(8.49)
delta = ts1 - ts2
self.assertEqual(delta.milliseconds(), 2020.0)
class TestTimeDelta(unittest.TestCase):
def test_milliseconds(self):
td = TimeDelta(2.1234567)
self.assertEqual(td.milliseconds(), 2123.457)
self.assertEqual(td.milliseconds(1), 2123.5)
self.assertEqual(td.milliseconds(2), 2123.46)
self.assertEqual(td.milliseconds(3), 2123.457)
self.assertEqual(td.milliseconds(4), 2123.4567)
self.assertEqual(td.milliseconds(5), 2123.45670)
def test_seconds(self):
td = TimeDelta(2.1234567)
self.assertEqual(td.seconds(), 2.123)
self.assertEqual(td.seconds(1), 2.1)
self.assertEqual(td.seconds(2), 2.12)
self.assertEqual(td.seconds(3), 2.123)
self.assertEqual(td.seconds(4), 2.1235)
self.assertEqual(td.seconds(5), 2.12346)
if __name__ == '__main__':
unittest.main()
```
#### File: cisot_network_monitor/lib/topology.py
```python
from lib.util import delete_duplicates_from_list
class Topology:
def __init__(self):
self._dpid_and_opposite_port_to_opposite_dpid = {}
self._dpid_to_ports = {}
def get_ports(self, dpid):
return self._dpid_to_ports.setdefault(dpid, [])
def register_link(self, src_dpid, src_port_no, dst_dpid, dst_port_no):
self._dpid_and_opposite_port_to_opposite_dpid.setdefault(
src_dpid, {})[dst_port_no] = dst_dpid
self._dpid_and_opposite_port_to_opposite_dpid.setdefault(
dst_dpid, {})[src_port_no] = src_dpid
self._dpid_to_ports.setdefault(src_dpid, []).append(src_port_no)
self._dpid_to_ports[src_dpid] = delete_duplicates_from_list(
self._dpid_to_ports[src_dpid])
self._dpid_to_ports.setdefault(dst_dpid, []).append(dst_port_no)
self._dpid_to_ports[dst_dpid] = delete_duplicates_from_list(
self._dpid_to_ports[dst_dpid])
def get_opposite_dpid(self, dpid, opposite_port_no):
return self._dpid_and_opposite_port_to_opposite_dpid.setdefault(
dpid, {}).setdefault(opposite_port_no, 0)
class Link:
def __init__(self, src_dpid, src_port_no, dst_dpid, dst_port_no):
self.src_dpid = src_dpid
self.src_port_no = src_port_no
self.dst_dpid = dst_dpid
self.dst_port_no = dst_port_no
def __hash__(self):
return hash(
str(self.src_dpid) + str(self.src_port_no) + str(self.dst_dpid) +
str(self.dst_port_no))
def __eq__(self, other):
return self.src_dpid == other.src_dpid and \
self.src_port_no == other.src_port_no and \
self.dst_dpid == other.dst_dpid and \
self.dst_port_no == other.dst_port_no
def __str__(self):
return 'src_dpid=' + str(self.src_dpid) + ';' + \
'src_port_no=' + str(self.src_port_no) + ';' + \
'dst_dpid=' + str(self.dst_dpid) + ';' + \
'dst_port_no=' + str(self.dst_port_no)
class LinkRepository:
def __init__(self):
self._links = {}
def register_link(self, src_dpid, src_port_no, dst_dpid, dst_port_no):
link = Link(src_dpid, src_port_no, dst_dpid, dst_port_no)
self._links[link] = link
def find_directed_links(self):
return self._links.values()
def find_bidirectional_links(self):
s = BidirectionalLinkSet()
for link in self.find_directed_links():
s.add(link)
return s.get_all()
class BidirectionalLinkSet:
def __init__(self):
self._directed_links = {}
self._bidirectional_links = []
def _get_opposit_link(self, link):
first_direction_link = self._directed_links.setdefault(
link.src_dpid, {}).setdefault(link.dst_dpid, None)
if first_direction_link:
return first_direction_link
second_direction_link = self._directed_links.setdefault(
link.dst_dpid, {}).setdefault(link.src_dpid, None)
return second_direction_link
def add(self, link):
opposite_link = self._get_opposit_link(link)
if opposite_link:
self._bidirectional_links.append(link)
else:
self._directed_links[link.src_dpid][link.dst_dpid] = link
def get_all(self):
return self._bidirectional_links
``` |
{
"source": "724686158/MachineLearningTest",
"score": 3
} |
#### File: MachineLearningTest/RandomWalk/chess.py
```python
import random
import numpy as np
x = 0
y = 0
def move(x,y):
flag = 0
while flag == 0:
c = random.randint(1,8)
if c == 1:
if x - 2 >= 0 and y - 1 >= 0:
x = x - 2
y = y - 1
flag = 1
elif c == 2:
if x - 1 >= 0 and y - 2 >= 0:
x = x - 1
y = y - 2
flag = 1
elif c == 3:
if x - 2 >= 0 and y + 1 <= 7:
x = x - 2
y = y + 1
flag = 1
elif c == 4:
if x - 1 >= 0 and y + 2 <= 7:
x = x - 1
y = y + 2
flag = 1
elif c == 5:
if x + 2 <= 7 and y + 1 <= 7:
x = x + 2
y = y + 1
flag = 1
elif c == 6:
if x + 1 <= 7 and y + 2 <= 7:
x = x + 1
y = y + 2
flag = 1
elif c == 7:
if x + 2 <= 7 and y - 1 >= 0:
x = x + 2
y = y - 1
flag = 1
elif c == 8:
if x + 1 <= 7 and y - 2 >= 0:
x = x + 1
y = y - 2
flag = 1
return x, y
a = np.zeros((8,8))
n=200000
for i in range(0,n):
x,y = move(x,y)
a[x][y] += 1
for i in range(0,8):
for j in range(0,8):
a[i][j] = a[i][j]/n
print(a)
``` |
{
"source": "724686158/NosqlEXP3",
"score": 3
} |
#### File: docutils/transforms/writer_aux.py
```python
__docformat__ = 'reStructuredText'
from docutils import nodes, utils, languages
from docutils.transforms import Transform
class Compound(Transform):
"""
Flatten all compound paragraphs. For project, transform ::
<compound>
<paragraph>
<literal_block>
<paragraph>
into ::
<paragraph>
<literal_block classes="continued">
<paragraph classes="continued">
"""
default_priority = 910
def apply(self):
for compound in self.document.traverse(nodes.compound):
first_child = True
for child in compound:
if first_child:
if not isinstance(child, nodes.Invisible):
first_child = False
else:
child['classes'].append('continued')
# Substitute children for compound.
compound.replace_self(compound[:])
class Admonitions(Transform):
"""
Transform specific admonitions, like this:
<note>
<paragraph>
Note contents ...
into generic admonitions, like this::
<admonition classes="note">
<title>
Note
<paragraph>
Note contents ...
The admonition title is localized.
"""
default_priority = 920
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
for node in self.document.traverse(nodes.Admonition):
node_name = node.__class__.__name__
# Set class, so that we know what node this admonition came from.
node['classes'].append(node_name)
if not isinstance(node, nodes.admonition):
# Specific admonition. Transform into a generic admonition.
admonition = nodes.admonition(node.rawsource, *node.children,
**node.attributes)
title = nodes.title('', language.labels[node_name])
admonition.insert(0, title)
node.replace_self(admonition)
```
#### File: docutils/utils/punctuation_chars.py
```python
import sys, re
import unicodedata
"""Docutils character category patterns.
Patterns for the implementation of the `inline markup recognition rules`_
in the reStructuredText parser `docutils.parsers.rst.states.py` based
on Unicode character categories.
The patterns are used inside ``[ ]`` in regular expressions.
Rule (5) requires determination of matching open/close pairs. However, the
pairing of open/close quotes is ambiguous due to different typographic
conventions in different languages. The ``quote_pairs`` function tests
whether two characters form an open/close pair.
The patterns are generated by
``docutils/tools/dev/generate_punctuation_chars.py`` to prevent dependence
on the Python version and avoid the time-consuming generation with every
Docutils run. See there for motives and implementation details.
The category of some characters changed with the development of the
Unicode standard. The current lists are generated with the help of the
"unicodedata" module of Python 2.7.13 (based on Unicode version 5.2.0).
.. _inline markup recognition rules:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules
"""
openers = ('"\'(<\\[{\u0f3a\u0f3c\u169b\u2045\u207d\u208d\u2329\u2768'
'\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea'
'\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991'
'\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28'
'\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d'
'\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41'
'\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
'\xab\u2018\u201c\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
'\u201a\u201e\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d'
'\u2e1d\u2e21\u201b\u201f')
closers = ('"\')>\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a\u2769'
'\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb'
'\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992'
'\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29'
'\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e'
'\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42'
'\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
'\u201b\u201f\xab\u2018\u201c\u2039\u2e02\u2e04\u2e09\u2e0c'
'\u2e1c\u2e20\u201a\u201e')
delimiters = ('\\-/:\u058a\xa1\xb7\xbf\u037e\u0387\u055a-\u055f\u0589'
'\u05be\u05c0\u05c3\u05c6\u05f3\u05f4\u0609\u060a\u060c'
'\u060d\u061b\u061e\u061f\u066a-\u066d\u06d4\u0700-\u070d'
'\u07f7-\u07f9\u0830-\u083e\u0964\u0965\u0970\u0df4\u0e4f'
'\u0e5a\u0e5b\u0f04-\u0f12\u0f85\u0fd0-\u0fd4\u104a-\u104f'
'\u10fb\u1361-\u1368\u1400\u166d\u166e\u16eb-\u16ed\u1735'
'\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u180a\u1944\u1945'
'\u19de\u19df\u1a1e\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-'
'\u1b60\u1c3b-\u1c3f\u1c7e\u1c7f\u1cd3\u2010-\u2017\u2020-'
'\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-'
'\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe\u2cff\u2e00'
'\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e1b\u2e1e\u2e1f\u2e2a-'
'\u2e2e\u2e30\u2e31\u3001-\u3003\u301c\u3030\u303d\u30a0'
'\u30fb\ua4fe\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7'
'\ua874-\ua877\ua8ce\ua8cf\ua8f8-\ua8fa\ua92e\ua92f\ua95f'
'\ua9c1-\ua9cd\ua9de\ua9df\uaa5c-\uaa5f\uaade\uaadf\uabeb'
'\ufe10-\ufe16\ufe19\ufe30-\ufe32\ufe45\ufe46\ufe49-\ufe4c'
'\ufe50-\ufe52\ufe54-\ufe58\ufe5f-\ufe61\ufe63\ufe68\ufe6a'
'\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c-\uff0f\uff1a'
'\uff1b\uff1f\uff20\uff3c\uff61\uff64\uff65')
if sys.maxunicode >= 0x10FFFF: # "wide" build
delimiters += ('\U00010100\U00010101\U0001039f\U000103d0\U00010857'
'\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f'
'\U00010b39-\U00010b3f\U000110bb\U000110bc\U000110be-'
'\U000110c1\U00012470-\U00012473')
closing_delimiters = '\\\\.,;!?'
# Matching open/close quotes
# --------------------------
quote_pairs = {# open char: matching closing characters # usage project
'\xbb': '\xbb', # » » Swedish
'\u2018': '\u201a', # ‘ ‚ Albanian/Greek/Turkish
'\u2019': '\u2019', # ’ ’ Swedish
'\u201a': '\u2018\u2019', # ‚ ‘ German ‚ ’ Polish
'\u201c': '\u201e', # “ „ Albanian/Greek/Turkish
'\u201e': '\u201c\u201d', # „ “ German „ ” Polish
'\u201d': '\u201d', # ” ” Swedish
'\u203a': '\u203a', # › › Swedish
}
"""Additional open/close quote pairs."""
def match_chars(c1, c2):
"""Test whether `c1` and `c2` are a matching open/close character pair.
Matching open/close pairs are at the same position in
`punctuation_chars.openers` and `punctuation_chars.closers`.
The pairing of open/close quotes is ambiguous due to different
typographic conventions in different languages,
so we test for additional matches stored in `quote_pairs`.
"""
try:
i = openers.index(c1)
except ValueError: # c1 not in openers
return False
return c2 == closers[i] or c2 in quote_pairs.get(c1, '')
```
#### File: site-packages/jinja2/ext.py
```python
import re
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for project
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this project:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
trimmed = None
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
trimmed = name.value == 'trimmed'
continue
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
if trimmed is None:
trimmed = self.environment.policies['ext.i18n.trimmed']
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
return _ws_re.sub(' ', string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
pass
class AutoEscapeExtension(Extension):
pass
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This project explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'trimmed'):
environment.policies['ext.i18n.trimmed'] = True
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
```
#### File: site-packages/tests/test_embedded.py
```python
from collections import OrderedDict
import pytest
from django.test import TestCase
from mongoengine import Document, EmbeddedDocument, fields
from rest_framework import fields as drf_fields
from rest_framework.compat import unicode_repr
from rest_framework.serializers import Field, Serializer
from rest_framework_mongoengine.fields import DocumentField
from rest_framework_mongoengine.serializers import (
DocumentSerializer, EmbeddedDocumentSerializer
)
from .models import DumbEmbedded, OtherEmbedded
from .utils import dedent
class NestedEmbeddedDoc(EmbeddedDocument):
name = fields.StringField()
embedded = fields.EmbeddedDocumentField(DumbEmbedded)
class SelfEmbeddingDoc(EmbeddedDocument):
name = fields.StringField()
embedded = fields.EmbeddedDocumentField('self')
class EmbeddingDoc(Document):
embedded = fields.EmbeddedDocumentField(DumbEmbedded)
class NestedEmbeddingDoc(Document):
embedded = fields.EmbeddedDocumentField(NestedEmbeddedDoc)
class RequiredEmbeddingDoc(Document):
embedded = fields.EmbeddedDocumentField(DumbEmbedded, required=True)
class ListEmbeddingDoc(Document):
embedded_list = fields.EmbeddedDocumentListField(DumbEmbedded)
class RecursiveEmbeddingDoc(Document):
embedded = fields.EmbeddedDocumentField(SelfEmbeddingDoc)
class GenericEmbeddingDoc(Document):
embedded = fields.GenericEmbeddedDocumentField()
class TestEmbeddingMapping(TestCase):
def test_embbedded(self):
class TestSerializer(EmbeddedDocumentSerializer):
class Meta:
model = DumbEmbedded
expected = dedent("""
TestSerializer():
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = NestedEmbeddingDoc
depth = 1
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_nodepth(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = NestedEmbeddingDoc
depth = 0
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_restricted(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = NestedEmbeddingDoc
depth_embedding = 1
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = HiddenField(default=None, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_recursive(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = RecursiveEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = HiddenField(default=None, required=False)
""")
serializer = TestSerializer()
assert unicode_repr(serializer) == expected
def test_embedding_recursive_restricted(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = RecursiveEmbeddingDoc
depth_embedding = 2
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = HiddenField(default=None, required=False)
""")
serializer = TestSerializer()
assert unicode_repr(serializer) == expected
def test_embedding_nested(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = NestedEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
embedded = EmbeddedSerializer(required=False):
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_list(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = ListEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded_list = EmbeddedSerializer(many=True, required=False):
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_required(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = RequiredEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=True):
name = CharField(required=False)
foo = IntegerField(required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_generic(self):
class TestSerializer(DocumentSerializer):
class Meta:
model = GenericEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = GenericEmbeddedDocumentField(model_field=<mongoengine.fields.GenericEmbeddedDocumentField: embedded>, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_custom_generic(self):
class CustomEmbedding(DocumentField):
pass
class TestSerializer(DocumentSerializer):
serializer_embedded_generic = CustomEmbedding
class Meta:
model = GenericEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = CustomEmbedding(model_field=<mongoengine.fields.GenericEmbeddedDocumentField: embedded>, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_custom_nested(self):
class CustomTestSerializer(Serializer):
bla = drf_fields.CharField()
class TestSerializer(DocumentSerializer):
serializer_embedded_nested = CustomTestSerializer
class Meta:
model = NestedEmbeddingDoc
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = EmbeddedSerializer(required=False):
bla = CharField()
""")
assert unicode_repr(TestSerializer()) == expected
def test_embedding_custom_bottom(self):
class CustomEmbedding(Field):
bla = drf_fields.CharField()
class TestSerializer(DocumentSerializer):
serializer_embedded_bottom = CustomEmbedding
class Meta:
model = NestedEmbeddingDoc
depth_embedding = 0
expected = dedent("""
TestSerializer():
id = ObjectIdField(read_only=True)
embedded = CustomEmbedding(default=None, required=False)
""")
assert unicode_repr(TestSerializer()) == expected
class EmbeddingSerializer(DocumentSerializer):
class Meta:
model = EmbeddingDoc
class NestedEmbeddingSerializer(DocumentSerializer):
class Meta:
model = NestedEmbeddingDoc
class TestEmbeddedIntegration(TestCase):
""" should work on isolated embedded docs """
def test_retrieve(self):
""" serializing standalone doc """
class TestSerializer(EmbeddedDocumentSerializer):
class Meta:
model = OtherEmbedded
instance = OtherEmbedded(name="qwe", bar=123)
serializer = TestSerializer(instance)
assert serializer.data == OrderedDict([('name', "qwe"), ('bar', 123)])
def test_create(self):
""" creating standalone instance """
class TestSerializer(EmbeddedDocumentSerializer):
class Meta:
model = OtherEmbedded
data = {'name': "qwe", 'bar': 123}
serializer = TestSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance, OtherEmbedded)
assert instance.name == "qwe"
assert instance.bar == 123
def test_update(self):
""" updating standalone instance with partial data """
class TestSerializer(EmbeddedDocumentSerializer):
class Meta:
model = OtherEmbedded
instance = OtherEmbedded(name="qwe", bar=123)
data = {'bar': 234}
serializer = TestSerializer(instance, data=data, partial=True)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance, OtherEmbedded)
assert instance.name == "qwe"
assert instance.bar == 234
class TestEmbeddingIntegration(TestCase):
def doCleanups(self):
EmbeddingDoc.drop_collection()
def test_retrieve(self):
instance = EmbeddingDoc.objects.create(
embedded=DumbEmbedded(name="Foo")
)
serializer = EmbeddingSerializer(instance)
expected = {
'id': str(instance.id),
'embedded': OrderedDict((('name', "Foo"), ('foo', None))),
}
assert serializer.data == expected
def test_create(self):
data = {
'embedded': {'name': "emb"}
}
serializer = EmbeddingSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.embedded, DumbEmbedded)
assert instance.embedded.name == "emb"
expected = {
'id': str(instance.id),
'embedded': OrderedDict((('name', "emb"), ('foo', None))),
}
assert serializer.data == expected
def test_update(self):
instance = EmbeddingDoc.objects.create(
embedded=DumbEmbedded(name="emb", foo=123)
)
data = {
'embedded': {'foo': 321}
}
serializer = EmbeddingSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.embedded, DumbEmbedded)
assert instance.embedded.name is None
assert instance.embedded.foo == 321
expected = {
'id': str(instance.id),
'embedded': OrderedDict((('name', None), ('foo', 321))),
}
assert serializer.data == expected
@pytest.mark.skipif(True, reason="TODO")
def test_update_partial(self):
pass
class TestNestedEmbeddingIntegration(TestCase):
def doCleanups(self):
NestedEmbeddingDoc.drop_collection()
def test_retrieve(self):
instance = NestedEmbeddingDoc.objects.create(
embedded=NestedEmbeddedDoc(
name='Foo',
embedded=DumbEmbedded(name="Bar")
)
)
serializer = NestedEmbeddingSerializer(instance)
expected = {
'id': str(instance.id),
'embedded': OrderedDict((
('name', "Foo"),
('embedded', OrderedDict((
('name', "Bar"),
('foo', None)
)))
)),
}
assert serializer.data == expected
def test_create(self):
data = {
'embedded': {
'name': 'Foo',
'embedded': {'name': "emb"}
}
}
serializer = NestedEmbeddingSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.embedded, NestedEmbeddedDoc)
assert instance.embedded.name == "Foo"
assert isinstance(instance.embedded.embedded, DumbEmbedded)
assert instance.embedded.embedded.name == 'emb'
assert instance.embedded.embedded.foo is None
expected = {
'id': str(instance.id),
'embedded': OrderedDict((
('name', "Foo"),
('embedded', OrderedDict((('name', "emb"), ('foo', None))))
)),
}
assert serializer.data == expected
def test_update(self):
instance = NestedEmbeddingDoc.objects.create(
embedded=NestedEmbeddedDoc(
name='Foo',
embedded=DumbEmbedded(name="Bar")
)
)
data = {
'embedded': {
'name': 'Bar',
'embedded': {"foo": 321}
}
}
serializer = NestedEmbeddingSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance.embedded, NestedEmbeddedDoc)
assert instance.embedded.name == "Bar"
assert isinstance(instance.embedded.embedded, DumbEmbedded)
assert instance.embedded.embedded.name is None
assert instance.embedded.embedded.foo == 321
expected = {
'id': str(instance.id),
'embedded': OrderedDict((
('name', 'Bar'),
('embedded', OrderedDict((
('name', None),
('foo', 321)
)))
)),
}
assert serializer.data == expected
@pytest.mark.skipif(True, reason="TODO")
def test_update_partial(self):
pass
class ListEmbeddingSerializer(DocumentSerializer):
class Meta:
model = ListEmbeddingDoc
class TestListEmbeddingIntegration(TestCase):
def doCleanups(self):
ListEmbeddingDoc.drop_collection()
def test_retrieve(self):
instance = ListEmbeddingDoc.objects.create(
embedded_list=[DumbEmbedded(name="Foo"), DumbEmbedded(name="Bar")]
)
serializer = ListEmbeddingSerializer(instance)
expected = {
'id': str(instance.id),
'embedded_list': [
OrderedDict((('name', "Foo"), ('foo', None))),
OrderedDict((('name', "Bar"), ('foo', None)))
],
}
assert serializer.data == expected
def test_create(self):
data = {
'embedded_list': [
{'name': "Foo"},
{'foo': 123}
]
}
serializer = ListEmbeddingSerializer(data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance, ListEmbeddingDoc)
assert isinstance(instance.embedded_list[0], DumbEmbedded)
assert instance.embedded_list[0].name == "Foo"
assert instance.embedded_list[0].foo is None
assert instance.embedded_list[1].name is None
assert instance.embedded_list[1].foo == 123
expected = {
'id': str(instance.id),
'embedded_list': [
OrderedDict((('name', "Foo"), ('foo', None))),
OrderedDict((('name', None), ('foo', 123)))
]
}
assert serializer.data == expected
def test_update(self):
instance = ListEmbeddingDoc.objects.create(
embedded_list=[DumbEmbedded(name="Foo"), DumbEmbedded(name="Bar")]
)
data = {
'embedded_list': [
OrderedDict((('name', "Baz"), ('foo', 321)))
]
}
serializer = ListEmbeddingSerializer(instance, data=data)
assert serializer.is_valid(), serializer.errors
instance = serializer.save()
assert isinstance(instance, ListEmbeddingDoc)
assert isinstance(instance.embedded_list[0], DumbEmbedded)
assert len(instance.embedded_list) == 1
assert instance.embedded_list[0].name == "Baz"
assert instance.embedded_list[0].foo == 321
expected = {
'id': str(instance.id),
'embedded_list': [OrderedDict((('name', "Baz"), ('foo', 321)))],
}
assert serializer.data == expected
@pytest.mark.skipif(True, reason="TODO")
def test_update_partial(self):
pass
class ValidatingEmbeddedModel(EmbeddedDocument):
text = fields.StringField(min_length=3)
class ValidatingEmbeddingModel(Document):
embedded = fields.EmbeddedDocumentField(ValidatingEmbeddedModel)
class ValidatingSerializer(DocumentSerializer):
class Meta:
model = ValidatingEmbeddingModel
depth = 1
class ValidatingListEmbeddingModel(Document):
embedded_list = fields.EmbeddedDocumentListField(ValidatingEmbeddedModel)
class ValidatingListSerializer(DocumentSerializer):
class Meta:
model = ValidatingListEmbeddingModel
depth = 1
class TestEmbeddedValidation(TestCase):
def test_validation_failing(self):
serializer = ValidatingSerializer(data={'embedded': {'text': 'Fo'}})
assert not serializer.is_valid()
assert 'embedded' in serializer.errors
assert 'text' in serializer.errors['embedded']
def test_validation_passing(self):
serializer = ValidatingSerializer(data={'embedded': {'text': 'Text'}})
assert serializer.is_valid(), serializer.errors
def test_nested_validation_failing(self):
serializer = ValidatingListSerializer(data={'embedded_list': [{'text': 'Fo'}]})
assert not serializer.is_valid()
assert 'embedded_list' in serializer.errors
assert 'text' in serializer.errors['embedded_list']
def test_nested_validation_passing(self):
serializer = ValidatingListSerializer(data={'embedded_list': [{'text': 'Text'}]})
assert serializer.is_valid(), serializer.errors
```
#### File: site-packages/tests/test_generics.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework import permissions, status
from rest_framework.test import APIRequestFactory
from rest_framework_mongoengine import generics
from rest_framework_mongoengine.serializers import DocumentSerializer
from .models import DumbDocument
class DumbSerializer(DocumentSerializer):
class Meta:
model = DumbDocument
class ListView(generics.ListAPIView):
queryset = DumbDocument.objects
serializer_class = DumbSerializer
class BasicPermission(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.name != 'baz'
class RetrView(generics.RetrieveAPIView):
queryset = DumbDocument.objects
serializer_class = DumbSerializer
permission_classes = [BasicPermission]
class TestBasicViews(TestCase):
client_class = APIRequestFactory
def setUp(self):
"""
Create 3 DumbDocument instances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
DumbDocument(name=item).save()
self.objects = DumbDocument.objects
self.data = [
{'id': str(obj.id), 'name': obj.name, 'foo': None}
for obj in self.objects.all()
]
def doCleaups(self):
DumbDocument.drop_collection()
def test_list(self):
view = ListView.as_view()
request = self.client.get('/')
response = view(request).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data
def test_retr(self):
view = RetrView.as_view()
oid = self.objects[1].id
request = self.client.get('/' + str(oid))
response = view(request, id=oid).render()
assert response.status_code == status.HTTP_200_OK
assert response.data == self.data[1]
def test_retr_denied(self):
view = RetrView.as_view()
oid = self.objects[2].id
request = self.client.get('/' + str(oid))
response = view(request, id=oid).render()
assert response.status_code == status.HTTP_403_FORBIDDEN
```
#### File: site-packages/tests/test_patching.py
```python
from django.test import TestCase
from mongoengine import Document, fields
from rest_framework.test import APIRequestFactory, APITestCase
from rest_framework_mongoengine.contrib.patching import Patch, PatchModelMixin
from rest_framework_mongoengine.serializers import DocumentSerializer
from rest_framework_mongoengine.viewsets import GenericViewSet
from .models import DumbEmbedded
class PatchingDumbDocument(Document):
name = fields.StringField()
int_fld = fields.IntField()
lst_fld = fields.ListField()
dct_fld = fields.DictField()
intlst_fld = fields.ListField(fields.IntField())
intdct_fld = fields.MapField(fields.IntField())
emb = fields.EmbeddedDocumentField(DumbEmbedded)
emb_lst = fields.EmbeddedDocumentListField(DumbEmbedded)
class DumbSerializer(DocumentSerializer):
class Meta:
model = PatchingDumbDocument
class TestPatchParsing(TestCase):
def test_nonlist(self):
patch = Patch(data={'path': "/name", 'op': "set", 'value': "Foo"})
assert not patch.is_valid()
def test_nondict(self):
patch = Patch(data=["xxx"])
assert not patch.is_valid()
def test_incomplete(self):
patch = Patch(data=[{'path': "/name", 'value': "Foo"}])
assert not patch.is_valid()
def test_nonpath(self):
patch = Patch(data=[{'path': "name", 'value': "Foo"}])
assert not patch.is_valid()
def test_parsing_path(self):
patch = Patch(data=[
{'path': "/name", 'op': "set", 'value': None},
{'path': "/emb_fld/name", 'op': "set", 'value': None},
{'path': "/lst_fld/0", 'op': "set", 'value': None},
{'path': "/emb_fld/emb/emb/name", 'op': "set", 'value': None}
])
assert patch.is_valid(), patch.errors
expected = [
{'path': ("name", ), 'op': "set", 'value': None},
{'path': ("emb_fld", "name"), 'op': "set", 'value': None},
{'path': ("lst_fld", "0"), 'op': "set", 'value': None},
{'path': ("emb_fld", "emb", "emb", "name"), 'op': "set", 'value': None},
]
assert patch.validated_data == expected
def test_parsing_path_fail(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/name", 'op': "set", 'value': None},
{'path': "/bla", 'op': "set", 'value': None},
{'path': "/name/bla", 'op': "set", 'value': None},
{'path': "/emb/name/bla", 'op': "set", 'value': None},
])
assert not patch.is_valid()
assert patch.errors == [[u'Not a valid string.'], {'path': "Missing elem: 'bla'"}, {'path': "Missing elem: 'bla'"}, {'path': "Missing elem: 'bla'"}]
def test_parsing_values(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/name", 'op': "set", 'value': "123"},
{'path': "/int_fld", 'op': "set", 'value': "123"},
])
assert patch.is_valid(), patch.errors
expected = [
{'path': ("name",), 'op': "set", 'value': "123"},
{'path': ("int_fld",), 'op': "set", 'value': 123},
]
assert patch.validated_data == expected
def test_parsing_values_fail(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/name", 'op': "set", 'value': "xxx"},
{'path': "/int_fld", 'op': "set", 'value': "xxx"},
])
assert not patch.is_valid()
assert patch.errors == [{}, ['A valid integer is required.']]
def test_parsing_nested_values(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/emb/name", 'op': "set", 'value': "123"},
{'path': "/emb/foo", 'op': "set", 'value': "123"},
])
assert patch.is_valid(), patch.errors
expected = [
{'path': ("emb", "name"), 'op': "set", 'value': "123"},
{'path': ("emb", "foo"), 'op': "set", 'value': 123},
]
assert patch.validated_data == expected
def test_parsing_item_values(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/lst_fld/1", 'op': "set", 'value': "123"},
{'path': "/intlst_fld/1", 'op': "set", 'value': "123"},
{'path': "/intlst_fld", 'op': "push", 'value': "123"},
])
assert patch.is_valid(), patch.errors
expected = [
{'path': ("lst_fld", "1"), 'op': "set", 'value': "123"},
{'path': ("intlst_fld", "1"), 'op': "set", 'value': 123},
{'path': ("intlst_fld",), 'op': "push", 'value': 123},
]
assert patch.validated_data == expected
def test_parsing_elem_values(self):
patch = Patch(DumbSerializer(), data=[
{'path': "/dct_fld/item", 'op': "set", 'value': "123"},
{'path': "/intdct_fld/item", 'op': "set", 'value': "123"}
])
assert patch.is_valid(), patch.errors
expected = [
{'path': ("dct_fld", "item"), 'op': "set", 'value': "123"},
{'path': ("intdct_fld", "item"), 'op': "set", 'value': 123}
]
assert patch.validated_data == expected
class TestPatchApplying(TestCase):
def doCleanups(self):
PatchingDumbDocument.drop_collection()
def test_patch_obj(self):
objects = [
PatchingDumbDocument.objects.create(name="dumb1", int_fld=1, lst_fld=['a', 'b', 'c'], emb=DumbEmbedded(name="emb1")),
PatchingDumbDocument.objects.create(name="dumb2", int_fld=2, lst_fld=['b', 'c', 'd'], emb=DumbEmbedded(name="emb2")),
PatchingDumbDocument.objects.create(name="dumb3", int_fld=3, lst_fld=['d', 'e', 'f'], emb=DumbEmbedded(name="emb3"))
]
patch = Patch(data=[{'path': '/int_fld', 'op': 'inc', 'value': 100},
{'path': '/lst_fld', 'op': 'push', 'value': 'z'},
{'path': '/dct_fld/foo', 'op': 'set', 'value': "f"},
{'path': '/dct_fld/bar', 'op': 'set', 'value': "b"},
{'path': '/emb/name', 'op': 'set', 'value': "Foo"}])
assert patch.is_valid(), patch.errors
obj = PatchingDumbDocument.objects.get(name="dumb2")
patch.update_queryset(obj)
for o in objects:
o.reload()
assert [o.int_fld for o in objects] == [1, 102, 3]
assert [o.lst_fld for o in objects] == [['a', 'b', 'c'], ['b', 'c', 'd', 'z'], ['d', 'e', 'f']]
assert [o.dct_fld for o in objects] == [{}, {'foo': 'f', 'bar': 'b'}, {}]
assert [o.emb.name for o in objects] == ["emb1", "Foo", "emb3"]
def test_patch_set(self):
objects = [
PatchingDumbDocument.objects.create(name="dumb1", int_fld=1, lst_fld=['a', 'b', 'c'], emb=DumbEmbedded(name="emb1")),
PatchingDumbDocument.objects.create(name="dumb2", int_fld=2, lst_fld=['b', 'c', 'd'], emb=DumbEmbedded(name="emb2")),
PatchingDumbDocument.objects.create(name="dumb3", int_fld=3, lst_fld=['d', 'e', 'f'], emb=DumbEmbedded(name="emb3"))
]
patch = Patch(data=[{'path': '/int_fld', 'op': 'inc', 'value': 100},
{'path': '/lst_fld', 'op': 'push', 'value': 'z'},
{'path': '/emb/name', 'op': 'set', 'value': "Foo"}])
assert patch.is_valid(), patch.errors
queryset = PatchingDumbDocument.objects.all()
patch.update_queryset(queryset)
for o in objects:
o.reload()
assert [o.int_fld for o in objects] == [101, 102, 103]
assert [o.lst_fld for o in objects] == [['a', 'b', 'c', 'z'], ['b', 'c', 'd', 'z'], ['d', 'e', 'f', 'z']]
assert [o.emb.name for o in objects] == ["Foo", "Foo", "Foo"]
def test_patch_matched(self):
objects = [
PatchingDumbDocument.objects.create(
name="dumb1",
emb_lst=[
DumbEmbedded(name="dumb1emb1", foo=11),
DumbEmbedded(name="dumb1emb2", foo=12),
DumbEmbedded(name="dumb1emb3", foo=13)
]
),
PatchingDumbDocument.objects.create(
name="dumb2",
emb_lst=[
DumbEmbedded(name="dumb2emb1", foo=21),
DumbEmbedded(name="dumb2emb2", foo=22),
DumbEmbedded(name="dumb2emb3", foo=23)
]
),
PatchingDumbDocument.objects.create(
name="dumb3",
emb_lst=[
DumbEmbedded(name="dumb3emb1", foo=31),
DumbEmbedded(name="dumb3emb2", foo=32),
DumbEmbedded(name="dumb3emb3", foo=33)
]
),
]
patch = Patch(data=[{'path': "/emb_lst/S/name", 'op': 'set', 'value': "winner"}])
assert patch.is_valid(), patch.errors
queryset = PatchingDumbDocument.objects.filter(emb_lst__foo=22)
patch.update_queryset(queryset)
for o in objects:
o.reload()
for o in objects:
for e in o.emb_lst:
assert e.foo != 22 or e.name == "winner"
class TestView(PatchModelMixin, GenericViewSet):
serializer_class = DumbSerializer
queryset = PatchingDumbDocument.objects
class TestPatchingView(APITestCase):
client_class = APIRequestFactory
def doCleanups(self):
PatchingDumbDocument.drop_collection()
def test_patch_obj(self):
objects = [
PatchingDumbDocument.objects.create(name="dumb1", lst_fld=['a', 'b', 'c']),
PatchingDumbDocument.objects.create(name="dumb2", lst_fld=['b', 'c', 'd']),
PatchingDumbDocument.objects.create(name="dumb3", lst_fld=['d', 'e', 'f'])
]
patch = [{'path': '/lst_fld', 'op': 'push', 'value': 'z'}]
view = TestView.as_view({'patch': 'modify_obj'})
req = self.client.patch("", patch, format='json')
res = view(req, id=objects[1].id)
assert res.status_code == 204
for o in objects:
o.reload()
assert [o.lst_fld for o in objects] == [['a', 'b', 'c'], ['b', 'c', 'd', 'z'], ['d', 'e', 'f']]
def test_patch_set(self):
objects = [
PatchingDumbDocument.objects.create(name="dumb1", lst_fld=['a', 'b', 'c']),
PatchingDumbDocument.objects.create(name="dumb2", lst_fld=['b', 'c', 'd']),
PatchingDumbDocument.objects.create(name="dumb3", lst_fld=['d', 'e', 'f'])
]
patch = [{'path': '/lst_fld', 'op': 'push', 'value': 'z'}]
view = TestView.as_view({'patch': 'modify_set'})
req = self.client.patch("", patch, format='json')
res = view(req)
assert res.status_code == 204
for o in objects:
o.reload()
assert [o.lst_fld for o in objects] == [['a', 'b', 'c', 'z'], ['b', 'c', 'd', 'z'], ['d', 'e', 'f', 'z']]
```
#### File: site-packages/tests/test_validation.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework import serializers
from rest_framework_mongoengine.serializers import DocumentSerializer
from .models import DumbDocument
class ValidationMethodSerializer(DocumentSerializer):
class Meta:
model = DumbDocument
def validate_name(self, value):
if len(value) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
return value.title()
class RenamedValidationMethodSerializer(DocumentSerializer):
class Meta:
model = DumbDocument
renamed = serializers.CharField(source='name', required=False)
def validate_renamed(self, value):
if len(value) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
return value.title()
def custom_field_validator(value):
if len(value) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
# cannot transform value
class FieldValidatorSerializer(DocumentSerializer):
class Meta:
model = DumbDocument
name = serializers.CharField(validators=[custom_field_validator])
def custom_model_validator(data):
if len(data['name']) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
class ModelValidatorSerializer(DocumentSerializer):
class Meta:
model = DumbDocument
validators = [custom_model_validator]
class TestValidating(TestCase):
def test_validation_method_is_executed(self):
serializer = ValidationMethodSerializer(data={'name': "fo"})
assert not serializer.is_valid()
assert 'name' in serializer.errors
def test_validation_method_passing(self):
serializer = ValidationMethodSerializer(data={'name': "foo"})
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data['name'] == "Foo"
def test_renamed_validation_method_is_executed(self):
serializer = RenamedValidationMethodSerializer(data={'renamed': "fo"})
assert not serializer.is_valid()
assert 'renamed' in serializer.errors
def test_renamed_validation_method_passing(self):
serializer = RenamedValidationMethodSerializer(data={'renamed': "foo"})
assert serializer.is_valid(), serializer.errors
assert serializer.validated_data['name'] == "Foo"
def test_validator_is_executed(self):
serializer = FieldValidatorSerializer(data={'name': "fo"})
assert not serializer.is_valid()
assert 'name' in serializer.errors
def test_validator_passing(self):
serializer = FieldValidatorSerializer(data={'name': "foo"})
assert serializer.is_valid(), serializer.errors
def test_validators_is_executed(self):
serializer = ModelValidatorSerializer(data={'name': "fo"})
assert not serializer.is_valid()
assert 'non_field_errors' in serializer.errors
def test_validators_passing(self):
serializer = ModelValidatorSerializer(data={'name': "foo"})
assert serializer.is_valid(), serializer.errors
``` |
{
"source": "72616e646f6d/config_database",
"score": 3
} |
#### File: 72616e646f6d/config_database/config.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from schema import Config, Item, Base
class DatabaseConfig():
def __init__(self, config_name, database_name='config.db'):
self.engine = create_engine('sqlite:///{}'.format(database_name))
Config.metadata.create_all(self.engine)
self.sessionmaker = sessionmaker(bind=self.engine)
self.config_name = config_name
config = self.get_or_create_config(config_name)
self.config_id = config.id
def get(self, keyname):
session = self.sessionmaker()
result = session.query(Item.value).filter(Item.config_id == self.config_id).filter(Item.key == keyname).first()
session.close()
return result[0]
def set(self, keyname, value):
self.set_or_insert_item(keyname, value)
def set_or_insert_item(self, keyname, value):
session = self.sessionmaker()
item = session.query(Item).filter(Config.id == self.config_id).filter(Item.key == keyname).first()
if item:
item.value = value
else:
item = Item(config_id=self.config_id, key=keyname, value=value)
session.add(item)
session.commit()
session.close()
def get_or_create_config(self, config_name):
session = self.sessionmaker()
instance = session.query(Config).filter(Config.name == self.config_name).first()
if instance:
return instance
else:
instance = Config(name=self.config_name)
session.add(instance)
session.commit()
return instance
def __del__(self):
self.sessionmaker.close_all()
``` |
{
"source": "729557989/Email-Intent-Classifier",
"score": 3
} |
#### File: 729557989/Email-Intent-Classifier/text_preprocessing.py
```python
import pandas as pd
import re
from bs4 import BeautifulSoup
from string import ascii_letters
# Here is the dictionary that we will use for expanding the contractions:
contraction_mapping = {"ain't": "is not",
"aren't": "are not",
"can't": "cannot",
"'cause": "because",
"could've": "could have", "couldn't": "could not",
"didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam",
"mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
"wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
"we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have",
"would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
"you're": "you are", "you've": "you have"}
def drop_columns(df, columns):
'''
Purpose: drop useless columns in dataframe (df)
Parameters: the dataframe (df),
the columns you wanna drop within the dataframe.
Note: columns should be a list containg the unwanted column names.
Returns: the cleaned dataframe (df)
'''
for column in columns:
df = df.drop([column], axis=1)
return df
# remove tabs ("\n")
def remove_tab(df, column_loc):
'''
Purpose: remove tabs ("\n") in the dataframe's sentences
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = [re.sub(r'\n', ' ', str(text)) for text in df[column_loc]]
return df
# map contractions
def map_contraction(df, column_loc):
'''
Purpose: map contraction for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = [" ".join([contraction_mapping[t] if t in contraction_mapping else t for t in str(text).split(" ")]) for text in df[column_loc]]
return df
# remove html tags
def remove_html(df, column_loc):
'''
Purpose: remove html related tags for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = [BeautifulSoup(str(text), "lxml").text for text in df[column_loc]]
return df
# remove urls and gmails
def remove_urls_and_gmails(df, column_loc):
'''
Purpose: remove url and gmails for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = [re.sub('http://\S+|https://\S+', ' ', str(text)) for text in df[column_loc]]
# this method cleans the special links:
# ai-camp.org/ambassador/ashley_forrest
df[column_loc] = [re.sub(r"(https:/www.)|ai-camp.org/.+?/.[a-z]+|_.[a-z]+", " ", str(text)) for text in df[column_loc]]
df[column_loc] = [re.sub(r"www.+?.[a-z]+.com", " ", str(text)) for text in df[column_loc]]
df[column_loc] = [re.sub(r"<EMAIL>", " ", str(text)) for text in df[column_loc]]
return df
# lower casing
def lower_case(df, column_loc):
'''
Purpose: apply lower casing method for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
# assumes the column has no null
df[column_loc] = [str(text).lower() for text in df[column_loc]]
return df
# remove punctuations
allowed = set(ascii_letters + ' ')
def remove_punctuation(df, column_loc):
'''
Purpose: remove punctuation for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = ["".join([c if c in allowed else ' ' for c in str(text)]) for text in df[column_loc]]
return df
# remove extra spaces
def remove_extra_spaces(df, column_loc):
'''
Purpose: remove extra spaces (" ") for sentences in the dataframe (df)
Parameters: the dataframe (df),
the column location of where the sentences are stored (column_loc)
Returns: the cleaned dataframe (df)
'''
df[column_loc] = [re.sub(r'[" "]+', " ", str(text)) for text in df[column_loc]]
return df
# function to clean data
def clean_df(df, column_loc, drop_cols=None):
'''
Purpose: apply text preprocessing to the raw csv dataframe
Parameters: the raw dataframe (df),
the column location of where the sentences are stored (column_loc),
Returns: the cleaned dataframe (df).
'''
if drop_cols != None:
df = drop_columns(df, drop_cols)
df = df[df[column_loc].isna() == False]
df = remove_tab(df, column_loc)
df = map_contraction(df, column_loc)
df = remove_html(df, column_loc)
df = remove_urls_and_gmails(df, column_loc)
df = remove_punctuation(df, column_loc)
df = remove_extra_spaces(df, column_loc)
df = lower_case(df, column_loc)
return df
def clean_texts(sents):
"""
Purpose: take in a list of string sentences needed to be preprocessed before AI model prediction
Params: sents, a list of string sentences
Returns: a list of preprocessed sentences that is preprocessed, but NOT tokenized yet.
"""
temp_df = pd.DataFrame()
temp_df['sents'] = sents
temp_df['sents'] = clean_df(temp_df, 'sents')
return list(temp_df['sents'].values)
def max_length(texts):
'''
Purpose: return the max length each sentence arrays
Params: 2d array of string.
Returns: an integer representing the max_length of the sentence arrays
'''
return max(len(t) for t in texts)
def map_labels(label):
"""
Purpose: a function used for the pandas library's ".apply()" method
to convert all the specific labels in the dataframe into general labels
Params: label(string) -> the label from every single row of the dataframe column
Returns: a general label (string)
"""
others = ['ads', 'unique_questions', 'starting_clubs', 'contact_management']
program_info = ['course_schedule', 'content', 'reschedule']
registration = ['course_availability', 'application_deadlines', 'payment_confirmation', 'website_navigation', 'account_edits', 'progress_or_spots']
program_logistics = ['zoom_links', 'zoom_recordings', 'cancel', 'other_urls']
monetary_issues = ['taxes', 'payment', 'refund']
scholarship = ['apply_scholarship', 'info_about_scholarship']
if label in others:
label = "others"
elif label in program_info:
label = "program_info"
elif label in registration:
label = "registration"
elif label in program_logistics:
label = "program_logistics"
elif label in monetary_issues:
label = "monetary_issues"
elif label in scholarship:
label = "scholarship"
elif label == 'unactionable':
label = 'unactionable'
return label
```
#### File: 729557989/Email-Intent-Classifier/train.py
```python
from eval import flat_accuracy, eval
import torch
import tqdm
# Disable HuggingFace Warning
import logging
logging.disable(logging.WARNING)
logging.getLogger("pytorch_pretrained_bert.tokenization").setLevel(logging.ERROR)
import warnings
warnings.filterwarnings('ignore')
def training(model, epochs, batch_size, device, optimizer, scheduler,
train_dataloader, valid_dataloader, saving_path=None, save_best_weights=False,
model_class=None, comparison_metric='loss', data_compared='valid',
transfer_learning=True, get_training_stats=False,
new_learining_rate=False, patience=3):
"""
Purpose: Train the A.I. Model and save it
Params: 1. model (BertForSequenceClassification):
- The A.I. for training
2. epochs (integer):
- Number of training epochs
3. batch_size (integer):
- Batch_size, or number of training examples utilized in one iteration
4. device (torch.device):
- What device to use for A.I. training, generally 'cpu' or 'cuda'
5. optimizer (transformers.optimization.AdamW):
- The optimizer for the A.I. model to reduce loss
6. scheduler (torch.optim.lr_scheduler.LambdaLR):
- The scheduler that lowers learning rate throughout training
7. train_dataloader (torch.utils.data.dataloader.DataLoader):
- The dataloader containing the train data for training
8. valid_dataloader (torch.utils.data.dataloader.DataLoader):
- The dataloader containing the validation data for training
9. saving_path (string):
- The path where the model's weights as stored, so called a checkpoint
10. save_best_weights (string):
- Whether or not save only the model with the best weights through training
- NOTE: this functionality should be used in conjucntion with:
12. comparison_metric, 13. data_compared
11. model_class (BertUncased):
- The class object for torch.save() to save the model's weights
12. comparison_metric (string):
- What metric to utilized to determine if a model has the best weights for 10. save_best_weights
- This could be either 'loss' (loss), or 'acc' (accuracy)
- NOTE: this functionality should be used in conjucntion with:
12. comparison_metric, 13. data_compared
13. data_compared (string):
- What data is utilized to determine if a model has the best weights for 10. save_best_weights
- This could be either 'train' (training data), or 'valid' (validation data)
- NOTE: this functionality should be used in conjucntion with:
12. comparison_metric
13. data_compared
14. transfer_learning (boolean):
- Whether or not to train the Saved model in the saving_path
- NOTE: must provide the saving_path to use this functionality
15. get_training_stats (boolean):
- Whether to return a list of training stats, such as loss & accuracy for plotting
16. new_learining_rate(boolean):
- Whether to continue using the saved optimizer and LrScheduler settings from the checkpoint at 9. saving_path
17. patience (integer):
- The number of epochs to wait before early stop if the model is considered having no progress,
this determined by 12. comparison_metric & 13. data_compared.
- NOTE: this functionality should be used in conjucntion with:
10. save_best_weights
12. comparison_metric
13. data_compared
Returns: a list of training stats (refer to 15.) if get_training_stats is set to True, or else return nothing
"""
assert comparison_metric == 'loss' or comparison_metric == 'acc'
assert data_compared == 'valid' or comparison_metric == 'train'
if transfer_learning == True:
assert saving_path != None
# Cast the A.I. model to utilize device
model.to(device)
training_stats = []
patience_counter = 0
# Epoch training loops
for epoch in range(1, epochs+1):
if save_best_weights==True and saving_path != None:
# Clear unused GPU memories
torch.cuda.empty_cache()
# Load the checkpoint weights from saving_path
if epoch > 1 or transfer_learning==True:
checkpoint = torch.load(saving_path, map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
if new_learining_rate == False and epoch == 1:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint["LrScheduler_state_dict"])
prev_metric = checkpoint[data_compared+'_'+comparison_metric]
# Cast the A.I. model to utilize device
model.to(device)
# Clear unused GPU memories
torch.cuda.empty_cache()
# Reset computed accuracy and loss
total_train_acc = 0
total_train_loss = 0
# Cast the model to training mode
model.train()
print("<"+"-"*80+">")
print("Current Lr: {}, Initial_Lr: {}".format(
optimizer.state_dict()['param_groups'][-1]['lr'],
optimizer.state_dict()['param_groups'][-1]['initial_lr']
))
# Make forward and backward propagations
for idx, batch in tqdm.tqdm(enumerate(train_dataloader),
total=len(train_dataloader.dataset)/batch_size,
ascii=True,
desc="Model Training"):
input_seqs = batch[0].to(device)
attention_masks = batch[1].to(device)
labels = batch[2].to(device)
# Clear any previously calculated gradients before performing a backward pass.
model.zero_grad()
# Perform a forward propagation
output = model(input_seqs,
token_type_ids=None,
attention_mask=attention_masks,
labels=labels,
return_dict=True)
# Calculate the accuracy of our predictions vs labels
logits = output.logits
logits = logits.detach().cpu().numpy()
label_ids = labels.to('cpu').numpy()
total_train_acc += flat_accuracy(logits, label_ids)
# Accumulate training loss
loss = output.loss
total_train_loss += loss.item()
# Back propagate to get gradients
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update learning rates
scheduler.step()
# Compute average loss and accuracies
avg_train_loss = total_train_loss / len(train_dataloader)
avg_train_acc = total_train_acc / len(train_dataloader)
avg_valid_acc, avg_valid_loss = eval(model, device, valid_dataloader, batch_size)
# Save best weights happen here
if epoch > 1 or transfer_learning == True:
if data_compared == 'train' and comparison_metric == 'loss':
curr_metric = avg_train_loss
elif data_compared == 'train' and comparison_metric == 'acc':
curr_metric = avg_train_acc
elif data_compared == 'valid' and comparison_metric == 'loss':
curr_metric = avg_valid_loss
elif data_compared == 'valid' and comparison_metric == 'acc':
curr_metric = avg_valid_acc
if saving_path != None:
if save_best_weights==True:
# Prerequisities: If an saving path exist, save_best_weights is set to True
# If this isn't transfer learning: always save the model during first epoch
if epoch == 1 and transfer_learning==False:
save_checkpoint(
model, model_class,
avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc,
optimizer, scheduler, saving_path
)
print("Model Saved For First Epoch!")
# If transfer_learning is set to True or this isn't the first epoch,
# then save the weights if the current model is considered as 'better' than the previous one.
elif (comparison_metric=='loss' and (curr_metric < prev_metric)) or (comparison_metric=='acc' and (curr_metric > prev_metric)):
save_checkpoint(
model, model_class,
avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc,
optimizer, scheduler, saving_path
)
print("Weights Updated and Saved! {} {} improved from {} to {}".format(
data_compared, comparison_metric, prev_metric, curr_metric
))
patience_counter = 0
# If not, don't save the weights and move on to the next epoch.
else:
print("Weights weren't updated")
patience_counter += 1
# Always save the model if saving_path is set to True but not save_best_weights
else:
save_checkpoint(
model, model_class,
avg_train_loss, avg_valid_loss, avg_train_acc, avg_valid_acc,
optimizer, scheduler, saving_path
)
print("Model Saved")
# Report progress with accuracy, loss metrics
print(f"Epoch: {epoch}\nTrain Eval: Avg.Acc: {avg_train_acc}, Avg.Loss: {avg_train_loss}")
print(f"Valid Eval: Avg.Acc: {avg_valid_acc}, Avg.Loss: {avg_valid_loss}")
# If get_training_stats is set to True, append the stats at the end of every epoch
if get_training_stats == True:
training_stats.append(
{
'epoch': epoch,
'Training Loss': avg_train_loss,
'Training Accur.': avg_train_acc,
'Valid. Loss': avg_valid_loss,
'Valid. Accur.': avg_valid_acc
}
)
# If the model haven't improve since an 'patience' amount of epochs, end training early
if patience != None and patience_counter == patience:
print(f"Model haven't improved for {patience_counter} epochs, Early Stopping Now")
# Return the list of training stats if get_training_stats is set to True
if get_training_stats == True:
return training_stats
else:
return
# Return the list of training stats if get_training_stats is set to True
if get_training_stats == True:
return training_stats
def save_checkpoint(model, model_class,
train_loss, valid_loss, train_acc, valid_acc,
optimizer, scheduler, saving_path):
"""
Purpose: Save model weights
Params: 1. model (BertForSequenceClassification):
- The A.I. model
2. model_class (BertUncased):
- The class_object where the 1. model was stored at
3. train_loss (float):
- The A.I. model's training loss
4. valid_loss (float):
- The A.I. model's validation loss
5. train_acc (float):
- The A.I. model's training accuracy
6. valid_acc (float):
- The A.I. model's validation accuracy
7. optimizer (transformers.optimization.AdamW):
- The A.I. model's optimizer object
8. scheduler (torch.optim.lr_scheduler.LambdaLR):
- The A.I. model's scheduler object
9. saving_path (string):
- The path to save the weights, AKA the checkpoint
Returns: Nothing
"""
checkpoint = {'model_object': model_class,
'train_loss': train_loss,
'valid_loss': valid_loss,
'train_acc': train_acc,
'valid_acc': valid_acc,
'model_state_dict': model.state_dict(),
'optimizer_state_dict':optimizer.state_dict(),
'LrScheduler_state_dict':scheduler.state_dict()}
torch.save(checkpoint, saving_path)
``` |
{
"source": "72nd/deck-cli",
"score": 3
} |
#### File: deck_cli/deck/fetch.py
```python
from deck_cli.deck.models import NCBoard, NCBaseBoard, NCDeckCard, NCDeckStack, NCCardPost, NCDeckAssignedUser, NCCardAssignUserRequest
from collections.abc import Callable
import xml.etree.ElementTree as ET
from typing import List
import requests
ALL_USER_IDS_URL = "/ocs/v1.php/cloud/users"
USER_DETAILS_URL = "ocs/v1.php/cloud/users/{user_uuid}"
DECK_APP_URL = "apps/deck/api/v1.0"
ALL_USER_BOARDS_URL = "boards"
SINGLE_BOARD_URL = "boards/{board_id}"
ALL_STACKS_URL = "boards/{board_id}/stacks"
SINGLE_CARD_URL = "boards/{board_id}/stacks/{stack_id}/cards/{card_id}"
SINGLE_CARD_POST_URL = "boards/{board_id}/stacks/{stack_id}/cards"
ASSIGN_USER_TO_CARD_URL = "boards/{board_id}/stacks/{stack_id}/cards/{card_id}/assignUser"
ProgressCallback = Callable[[int, int, str], ]
"""
Called by the Fetch class before doing a request. Can be used to inform the
user about the progress. The following parameters are provided:
1. The number of the current step.
2. The total number of steps needed to complete the task. 0 if unknown.
3. A short description of the task.
"""
class NextcloudException(Exception):
"""Catches Nextcloud API errors."""
def __init__(self, root: ET):
code = root.find("./meta/statuscode").text
message = root.find("./meta/message").text
Exception.__init__(self, "{} ({})".format(message, code))
class Fetch:
"""
Contains all calls to the Nextcloud and Deck API.
The progress_callback can be used to display a update to the user
when doing multiple API calls at once.
"""
base_url: str
user: str
password: str
progress_callback: ProgressCallback
def __init__(
self,
base_url: str,
user: str,
password: <PASSWORD>,
progress_callback: ProgressCallback = lambda *args: None
):
self.base_url = base_url
self.user = user
self.password = password
self.progress_callback = progress_callback
def all_boards(self) -> List[NCBoard]:
"""Returns all boards for the given user."""
self.progress_callback(1, 1, "requests overview over all boards")
data = self.__send_get_request(
self.__deck_api_url(ALL_USER_BOARDS_URL))
return NCBoard.from_json(data, True)
def all_boards_with_stacks(self) -> List[NCBoard]:
"""
Returns all boards for the given user, fetches for all Boards their
Stacks and inserts them into the resulting data structure.
"""
self.progress_callback(1, 0, "requests overview over all boards")
data = self.__send_get_request(
self.__deck_api_url(ALL_USER_BOARDS_URL))
boards = NCBoard.from_json(data, True)
i: int = 1
for board in boards:
self.progress_callback(
i, len(boards),
"request stacks for {} board".format(board.title))
board.stacks = self.stacks_by_board(board.board_id)
i += 1
return boards
def board_by_id(self, board_id: int) -> NCBaseBoard:
"""Returns a board by a given board id."""
data = self.__send_get_request(
self.__deck_api_url(SINGLE_BOARD_URL.format(board_id=board_id)))
return NCBaseBoard.from_json(data, False)
def stacks_by_board(self, board_id: int) -> List[NCDeckStack]:
"""Returns all stacks of a given board with the given id."""
data = self.__send_get_request(
self.__deck_api_url(ALL_STACKS_URL.format(board_id=board_id)))
return NCDeckStack.from_json(data, True)
def user_ids(self) -> List[str]:
"""
Returns a list of Nextcloud's user ids also known as user-names in the
web front-end.
"""
data = self.__send_get_request(
"{}/{}".format(self.base_url, ALL_USER_IDS_URL)
)
root = ET.fromstring(data)
if root.find("./meta/status").text == "failure":
raise NextcloudException(root)
return [x.text for x in root.find("./data/users")]
def user_mail(self, name: str) -> str:
"""
Returns a dictionary mapping the given user name to his/her
mail address.
"""
api_url = USER_DETAILS_URL.format(user_uuid=name)
data = self.__send_get_request("{}/{}".format(self.base_url, api_url))
root = ET.fromstring(data)
return root.find("./data/email").text
def add_card(
self,
board_id: int,
stack_id: int,
card: NCCardPost,
) -> NCDeckCard:
"""Adds a given card to the Deck via the API."""
api_url = self.__deck_api_url(
SINGLE_CARD_POST_URL.format(
board_id=board_id,
stack_id=stack_id,
)
)
rsl = self.__send_post_request(api_url, card.dumps())
return NCDeckCard.from_json(rsl, False)
def assign_user_to_card(
self,
board_id: int,
stack_id: int,
card_id: int,
user_uid: str
) -> NCDeckAssignedUser:
"""Assign a User with a given uid (Nextlcoud user name) to a card."""
api_url = self.__deck_api_url(
ASSIGN_USER_TO_CARD_URL.format(
board_id=board_id,
stack_id=stack_id,
card_id=card_id,
)
)
body = NCCardAssignUserRequest(user_id=user_uid)
rsl = self.__send_put_request(api_url, body.dumps())
return NCDeckAssignedUser.from_json(rsl, False)
def __send_get_request(self, url: str) -> str:
"""
Calls a Nextcloud/Deck API with the given URL and returns
the answer as a string.
"""
rqs = requests.get(
url,
headers=self.__request_header(),
auth=(self.user, self.password)
)
return rqs.text
def __send_put_request(self, url: str, data) -> str:
"""Send a PUT Request to the API with a given data body."""
rqs = requests.put(
url,
data=data,
headers=self.__request_header(),
auth=(self.user, self.password)
)
return rqs.text
def __send_post_request(self, url: str, data) -> str:
"""Send a POST Request to the API with a given data body."""
rqs = requests.post(
url,
data=data,
headers=self.__request_header(),
auth=(self.user, self.password)
)
return rqs.text
def __deck_api_url(self, postfix: str) -> str:
"""Returns the Deck API URL with a given postfix."""
return "{}/{}/{}".format(self.base_url, DECK_APP_URL, postfix)
def __request_header(self) -> dict[str, str]:
"""Retruns the request header for all Deck API calls."""
return {
"OCS-APIRequest": "true",
"Content-Type": "application/json",
}
``` |
{
"source": "72nd/pydantic-xlsx",
"score": 3
} |
#### File: pydantic-xlsx/pydantic_xlsx/conversion.py
```python
from abc import ABCMeta, abstractmethod
from typing import Generic, Optional, TypeVar
from openpyxl.cell.cell import Cell
T = TypeVar('T')
"""A conversion object is always implemented for a specific type."""
class Conversion(Generic[T], metaclass=ABCMeta):
"""
Defines a type/class which a specific representation in the Xlsx file. Use
the `ConversionFactory` to obtain the type class for your type.
"""
@abstractmethod
def field_value(cell: Cell) -> T:
"""
Converts the content of the Excel cell into the type defined by the
class. This value can then be used to populate the model field.
"""
pass
@abstractmethod
def populate_cell(cell: Cell, value: T):
"""
Populates a given Xlsx cell with the with the given value. The
representation of this value is defined by the Type subclass.
"""
pass
class ConversionFactory:
"""
Creates the correct Conversion implementation based on a given pydantic
model field.
"""
@classmethod
def from_field(
cls,
field: T,
) -> Optional[Conversion]:
"""
Determines based on a given pydantic Field (-type). If there is no
implementation None will be returned.
"""
```
#### File: pydantic-xlsx/pydantic_xlsx/fields.py
```python
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, Optional, Type, TypeVar
from openpyxl.styles import Font
from pydantic.fields import FieldInfo, Undefined
from .types import Money
class XlsxFieldInfo(FieldInfo):
"""
Extends pydantic's Field class for some extra functionality (e.g. cell
formatting).
"""
__slots__ = (
"font",
"number_format",
)
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
super().__init__(default, **kwargs)
self.font = kwargs.pop('font', None)
self.number_format = kwargs.pop("number_format", None)
def XlsxField(
default: Any = Undefined,
*,
font: Optional[Font] = None,
number_format: Optional[str] = None,
**kwargs,
) -> Any:
"""
A field for extra formatting etc. The styles defined by a field will be
applied to the whole column.
"""
field_info = XlsxFieldInfo(
default,
font=font,
number_format=number_format,
**kwargs,
)
return field_info
T = TypeVar('T')
class FieldTypeInfo(Generic[T], metaclass=ABCMeta):
"""
Some `XlsxField` settings can be derived from certain field types like
`types.Money`.
"""
field_type = T
def __init__(self, field_type: Type[T]) -> None:
self.field_type = field_type
@abstractmethod
def field_info(self) -> XlsxFieldInfo:
"""Returns `XlsxFieldInfo` based on the Field type."""
pass
class MoneyFieldInfo(FieldTypeInfo[Money]):
def field_info(self) -> XlsxFieldInfo:
return XlsxFieldInfo(number_format=self.field_type.number_format())
class FieldTypeInfoFactory:
"""
Creates the correct `FieldTypeInfo` for a given type.
"""
@classmethod
def from_field_type(cls, field_type: Type[T]) -> Optional[FieldTypeInfo]:
"""
Creates and returns the correct `FieldTypeInfo` for a given type.
"""
if issubclass(field_type, Money):
return MoneyFieldInfo(field_type)
return None
@classmethod
def field_info_from_type(
cls,
field_type: Type[T]
) -> Optional[XlsxFieldInfo]:
"""
Same as `from_field_type` but directly calls `FieldTypeInfo.field_info`
(if available) and returns the result.
"""
if (impl := cls.from_field_type(field_type)) is not None:
return impl.field_info()
return None
```
#### File: pydantic-xlsx/pydantic_xlsx/types.py
```python
from abc import ABCMeta, abstractmethod
from typing import Optional
class Money(float, metaclass=ABCMeta):
"""
Handles amounts of money by subclassing float. In general it's a very bad
idea to store amounts of money as floats as all kind of funny things can
happen. But as openpyxl parses currency cell values as int or float we have
to work with it anyway.
Depending on the user input in the Excel file. The input float can have any
number of decimal places. The value is rounded according to the number of
minor units of the given currency and then converted to an integer.
To define a money field in your model you first have to define the currency
you like to use. For this subclass the Money class and set some class
variables:
```
class Euro(Money):
minor_unit = 2
code = "€"
code_before_amount = False
delimiter = ","
thousands_separator = "."
```
"""
amount: int
"""The amount of money."""
@property
@classmethod
@abstractmethod
def minor_unit(cls) -> int:
"""
Expresses the decimal relationship between the currency and it's minor
unit. 1 means a ratio of 10:1, 2 equals to 100:1 and so on. For example
the European currency "Euro" has a minor unit 2 as one Euro is made up
of 100 cents.
"""
pass
@property
@classmethod
@abstractmethod
def code(cls) -> str:
"""Freely chosen code to represent your currency."""
pass
@property
@classmethod
@abstractmethod
def code_before_amount(cls) -> bool:
"""The position of the currency code."""
pass
@property
@classmethod
@abstractmethod
def delimiter(cls) -> bool:
"""
Delimiter used to distinguish between the currency and it's minor unit.
"""
pass
@property
@classmethod
@abstractmethod
def thousands_separator(cls) -> str:
"""
Separator used to group thousands.
"""
pass
def __new__(cls, value: float):
return float.__new__(cls, value)
def __init__(self, value: float) -> None:
normalized = "{1:.{0}f}".format(self.minor_unit, value)
self.amount = int(normalized.replace(".", ""))
@classmethod
def validate(cls, value: float):
return cls(value)
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def number_format(cls) -> str:
"""Returns the Excel number format code for the currency."""
# Defines how to display the thousands and minors of a number.
# Ex.: `#,##0,00`
decimal_seperation = "#{}{}0.{}".format(
cls.delimiter,
"#" * cls.minor_unit,
"0" * cls.minor_unit,
)
if cls.code_before_amount:
amount = "{} {}".format(cls.code, decimal_seperation)
else:
amount = "{} {}".format(decimal_seperation, cls.code)
rsl = "{};[RED]-{}".format(amount, amount)
return rsl
def __str__(self) -> str:
minor_amount = self.amount % (10 ** self.minor_unit)
minor = "{1:0{0}d}".format(self.minor_unit, minor_amount)
integer_amount = self.amount - minor_amount
integer_thousands = "{:,}".format(integer_amount)
integer = integer_thousands.replace(",", self.thousands_separator)
number = "{}{}{}".format(integer, self.delimiter, minor)
if self.code_before_amount:
return "{} {}".format(self.code, number)
return "{} {}".format(number, self.code)
def __repr__(self) -> str:
return self.__str__()
class Url:
"""
A URL to a website with an optional title. Will be converted into a Excel
Hyperlink.
"""
title: Optional[str]
url: str
def __init__(self, title: Optional[str], url: str):
self.title = title
self.url = url
``` |
{
"source": "733amir/DailyCodingProblem",
"score": 4
} |
#### File: DailyCodingProblem/288 - By Salesforce - Medium/main.py
```python
def descending_ascending_diff(number: int) -> int:
asc = ''.join(sorted(str(number).zfill(4)))
des = asc[::-1]
return int(des) - int(asc)
def solution_a(number: int) -> int:
for steps in range(1, 10000):
number = descending_ascending_diff(number)
if number == 0:
raise "all digits of the number are the same"
if number == 6174:
return steps
raise "10000 steps counted, need more steps"
def main():
for i in range(1000, 10000):
try:
steps = solution_a(i)
except:
continue
print(f"{i} took {solution_a(i)} steps.")
if __name__ == "__main__":
main()
``` |
{
"source": "733amir/fandogh-cli",
"score": 3
} |
#### File: fandogh-cli/fandogh_cli/domain_commands.py
```python
import click
from .fandogh_client.domain_client import *
from .base_commands import FandoghCommand
from .presenter import present
from .utils import format_text, TextStyle
@click.group('domain')
def domain():
"""
Domain management commands
"""
def _verify_ownership(name):
response = verify_domain(name)
if response['verified']:
click.echo('Domain {} ownership verified successfully.'.format(name))
else:
click.echo('It seems the key is not set correctly as value of a TXT record for domain {}.'.format(name))
click.echo(
'please add a TXT record with the following key to your name server in order to help us verify your ownership.')
click.echo('Key:' + format_text(response['verification_key'], TextStyle.OKGREEN))
return response
@click.command('add', cls=FandoghCommand)
@click.option('--name', prompt='domain name', help='your domain name')
def add(name):
"""
Upload project on the server
"""
response = add_domain(name)
if response['verified'] is True:
click.echo(format_text("Your domain has been added and doesn't need verification", TextStyle.OKGREEN))
return
click.echo('The domain has been added.')
click.echo('Now you just need to help us that you have ownership of this domain.')
click.echo(
'please add a TXT record with the following key to your name server in order to help us verify your ownership.')
click.echo('Key:' + format_text(response['verification_key'], TextStyle.OKGREEN))
while not response['verified']:
confirmed = click.confirm('I added the record')
if confirmed:
response = _verify_ownership(name)
else:
click.echo('You can verify the ownership later on')
click.echo('Once you added the record please run the following command')
click.echo(format_text('fandogh domain verify --name={}'.format(name), TextStyle.BOLD))
return
@click.command('list', cls=FandoghCommand)
def list():
"""
List images
"""
table = present(lambda: list_domains(),
renderer='table',
headers=['Domain name', 'Verified', 'Certificate'],
columns=['name', 'verified', 'certificate'])
click.echo(table)
@click.command('verify', cls=FandoghCommand)
@click.option('--name', 'name', prompt='Domain name', help='The domain name')
def verify(name):
"""
Verify domain ownership
"""
_verify_ownership(name)
@click.command('request-certificate', cls=FandoghCommand)
@click.option('--name', 'name', prompt='Domain name', help='The domain name')
def request_certificate(name):
"""
Request a Let's Encrypt SSL/TLS Certificate for a domain
"""
create_certificate(name)
command = format_text("fandogh domain details --name {}".format(name), TextStyle.OKBLUE)
click.echo("Your request has been submitted and we are trying to get a certificate from Let's Encrypt for your"
"domain, it might get a few minutes to complete.\n"
"you can follow up your request using {}".format(command))
@click.command('details', cls=FandoghCommand)
@click.option('--name', 'name', prompt='Domain name', help='The domain name')
def details(name):
"""
Get details of a domain
"""
_display_domain_details(details_domain(name), clear=False)
@click.command('revoke-certificate', cls=FandoghCommand)
@click.option('--name', 'name', prompt='Domain name', help='The domain name')
def revoke_certificate(name):
"""
Revoke a certificate
"""
if click.confirm("You're about to revoke {} certificate and delete the secret, are you sure?".format(name)):
result = delete_certificate(name)
click.echo(result['message'])
else:
click.echo("Revoking certificate has been canceled")
@click.command('delete', cls=FandoghCommand)
@click.option('--name', '-n', 'name', prompt='Domain name', help='The domain name')
def delete(name):
click.echo(delete_domain(name))
def _display_domain_details(domain_details, clear=True):
if clear:
click.clear()
click.echo('Domain: {}'.format(format_text(domain_details['name'], TextStyle.HEADER)))
if domain_details['verified'] is True:
click.echo('\tVerified: {}'.format(format_text("Yes", TextStyle.OKGREEN)))
else:
click.echo('\tVerified: {}'.format(format_text("Yes", TextStyle.FAIL)))
if domain_details.get('certificate', None) is None:
click.echo("\tCertificate: {}".format(format_text("Not requested", TextStyle.OKBLUE)))
else:
certificate_details = domain_details['certificate'].get('details')
status = certificate_details['status']
if status == 'PENDING':
click.echo("\tCertificate: {}".format(format_text('Trying to get a certificate', TextStyle.OKBLUE)))
elif status == 'ERROR':
click.echo("\tCertificate: {}".format(format_text('Getting certificate failed', TextStyle.FAIL)))
elif status == 'READY':
click.echo("\tCertificate: {}".format(format_text('Certificate is ready to use', TextStyle.OKGREEN)))
else:
click.echo('\tCertificate: {}'.format(format_text('Certificate status is unknown', TextStyle.WARNING)))
info = certificate_details.get("info", False)
if info:
click.echo('\tInfo: {}'.format(format_text(info, TextStyle.WARNING)))
if len(certificate_details.get('events', [])) > 0:
click.echo("\tEvents:")
for condition in certificate_details.get("events", []):
click.echo("\t + {}".format(condition))
domain.add_command(add)
domain.add_command(list)
domain.add_command(verify)
domain.add_command(details)
domain.add_command(request_certificate)
domain.add_command(revoke_certificate)
domain.add_command(delete)
```
#### File: fandogh_cli/fandogh_client/namespace_client.py
```python
import requests
from fandogh_cli.fandogh_client import base_url, get_exception
from fandogh_cli.fandogh_client import get_stored_token
base_namespace_url = '%sapi/users/namespaces' % base_url
def details_namespace():
token = get_stored_token()
response = requests.get("%s/%s" % (base_namespace_url, "NAMESPACE"), # use user's namespace
headers={'Authorization': 'JWT ' + token})
if response.status_code != 200:
raise get_exception(response)
else:
return response.json()
```
#### File: fandogh-cli/fandogh_cli/version_check.py
```python
import requests
from fandogh_cli import VERSION, NAME
class Version:
def __init__(self, version_str):
self.version_tuple = self._parse_version(version_str)
pass
def _parse_version(self, version_str):
try:
version = list(map(int, version_str.split('.')))
if len(version) == 0:
return [0, ]
version.extend([0] * (3 - len(version)))
return version
except Exception:
return [0]
def __len__(self):
return len(self.version_tuple)
def __gt__(self, other):
this, that = self._make_same_length(self.version_tuple, other.version_tuple)
for index, version_part in enumerate(this):
if version_part > that[index]:
return True
elif version_part < that[index]:
return False
return False
def _make_same_length(self, first, second):
max_length = max(len(first), len(second))
the_first = first + [0] * (max_length - len(first))
the_second = second + [0] * (max_length - len(second))
return the_first, the_second
def compare(self, other):
assert isinstance(other, Version)
this, that = self._make_same_length(self.version_tuple, other.version_tuple)
for index, version in enumerate(this):
if version > that[index]:
return index + 1
elif version < that[index]:
return -1 * (index + 1)
return 0
def __ge__(self, other):
return self > other or self == other
def __le__(self, other):
return not self > other
def __lt__(self, other):
if self > other or self == other:
return False
return True
def __eq__(self, other):
assert isinstance(other, Version)
this, that = self._make_same_length(self.version_tuple, other.version_tuple)
return this == that
def __str__(self):
return ".".join(map(str, self.version_tuple))
def __repr__(self):
return str(self)
def get_package_info():
url = "https://pypi.org/pypi/{}/json".format(NAME)
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
raise RuntimeError("Unexpected response status while calling pypi")
except Exception as exp:
raise RuntimeError("Unable to connect to pypi.org: {}".format(exp))
def get_latest_version():
package_info = get_package_info()
try:
return Version(package_info['info']['version'])
except KeyError as missing_key:
raise RuntimeError("Unexpected response: {} is missing from response".format(missing_key))
def get_current_version():
return Version(VERSION)
``` |
{
"source": "733amir/telegram-cloner",
"score": 2
} |
#### File: 733amir/telegram-cloner/main.py
```python
import fire
from cmd_interface import CommandlineInterface
def main():
fire.Fire(CommandlineInterface)
if __name__ == '__main__':
main()
``` |
{
"source": "734990893/gzpt-unlock",
"score": 2
} |
#### File: gzpt-unlock/ui/app.py
```python
import json
import sys
import tkinter as tk
import requests
from constants import (CENTER_OUTLINE_RATIO, CIRCLE_ACTIVE_COLOR,
CIRCLE_BG_COLOR, CIRCLE_CENTER_COLOR, DEBUG_MODE,
DEFAULT_SCREEN_HEIGHT, DEFAULT_SCREEN_WIDTH,
GAP_FONT_RATIO, GAP_HEIGHT_RATIO, GAP_OUTLINE_RATIO,
GAP_RADIUS_RATIO, MIN_WINDOW_HEIGHT, MIN_WINDOW_WIDTH,
WINDOW_START_MODE)
from messages import (DEBUG_LAST_INPUT, DEBUG_SERVER_UNAVAILABLE,
DEBUG_VALIDATE_SUCCESS, INSERT_INSTRUCTION)
class GUI():
def __init__(self, width: int, height: int):
super().__init__()
self.root = tk.Tk()
self.root.minsize(MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT)
self.root.title('Gaze Pattern Authentication')
# self.root.resizable(width=False,
# height=False)
self.root.wm_attributes(WINDOW_START_MODE, 1)
self.window = tk.Canvas(self.root,
width=width,
height=height,
bd=0,
highlightthickness=0)
self.all_items = set()
self.active_circle = None
self.input_zones = (None, None) # dicts of positions and radiuses
self.window.pack()
self.reload_page(width, height)
def start(self):
"""
Start Running GUI
"""
print("GUI started!")
self._update(check_reload=False)
self.root.mainloop()
def _update(self, check_reload=True):
"""Main update function
This function is executed every 500ms
Parameter
---------
check_reload: bool, optional
check should reload or not
"""
if check_reload:
self._check_update()
try:
response_data = json.loads(bytes.decode(
requests.get("http://127.0.0.1:5050/results").content))
except requests.ConnectionError:
self.window.itemconfig(self.debug_text,
text=DEBUG_SERVER_UNAVAILABLE)
if response_data:
active_circle = response_data["last_input"]
validate_success = response_data["validate_success"]
if DEBUG_MODE:
self.window.itemconfig(self.debug_text,
text=DEBUG_LAST_INPUT.format(active_circle) +
DEBUG_VALIDATE_SUCCESS.format(validate_success))
self._activate_circle(active_circle)
self.window.after(500, self._update)
def _check_update(self):
screen_width, screen_height = self.root.winfo_width(), self.root.winfo_height()
if screen_width != self.width or screen_height != self.height:
print(self.root.winfo_width(),
self.root.winfo_height(), self.width, self.height)
self.reload_page(screen_width, screen_height)
requests.put("http://127.0.0.1:5050/settings",
json={
"screen_width": screen_width,
"screen_height": screen_height,
"input_centers": self.input_zones[0],
"input_tolerances": self.input_zones[1]
})
def reload_page(self, width, height):
"""Redraw all items in the window
Parameters
----------
width: int
width of the window
height: int
height of the window
"""
print("reload")
self._remove_all_items()
self.width = width
self.height = height
self.gap_len = min(width, height) * GAP_HEIGHT_RATIO
self.debug_text = self._draw_text(self.width,
self.height,
None,
family="mamelon",
size=20,
anchor='se')
center_pt = (self.width/2, self.height/2 - self.gap_len/3)
self.nine_circles, self.input_zones = self._create_nine_points_grid(center_pt,
self.gap_len,
self.gap_len/GAP_RADIUS_RATIO,
width=self.gap_len/GAP_OUTLINE_RATIO,
fill=self.from_rgb(CIRCLE_BG_COLOR))
self.nine_centers, _ = self._create_nine_points_grid(center_pt,
self.gap_len,
CENTER_OUTLINE_RATIO*self.gap_len/GAP_OUTLINE_RATIO,
width=0,
fill=self.from_rgb(CIRCLE_CENTER_COLOR))
self.instruction_text = self._draw_text(self.width/2,
4/5*self.height,
INSERT_INSTRUCTION,
family="mamelon",
size=int(self.gap_len/GAP_FONT_RATIO))
self.all_items.add(self.debug_text)
for c_id in self.nine_circles:
self.all_items.add(c_id)
for p_id in self.nine_centers:
self.all_items.add(p_id)
self.all_items.add(self.instruction_text)
########################################
## Helper functions for Drawing ##
########################################
def _draw_circle(self, x, y, r, width, fill):
c = self.window.create_oval(x-r, y-r, x+r, y+r, width=width, fill=fill)
return c
def _draw_text(self, x, y, text, anchor='n', family='Arial', size=20):
debug_text = self.window.create_text(x,
y,
text=text,
anchor=anchor,
font=(family, size))
return debug_text
def _activate_circle(self, circle_id: int):
"""Set activated circle to background color and new active circle to active color
Parameter
---------
circle_id: int
number of the circle representing, one of 1-9
"""
if self.active_circle:
self.window.itemconfig(
self.nine_circles[self.active_circle], fill=self.from_rgb(CIRCLE_BG_COLOR))
if circle_id:
self.window.itemconfig(
self.nine_circles[circle_id], fill=self.from_rgb(CIRCLE_ACTIVE_COLOR))
self.active_circle = circle_id
##################################
## Other Helper functions ##
##################################
def _remove_all_items(self):
"""Remove all items in window"""
for item in self.all_items:
self.window.delete(item)
self.all_items = set()
def from_rgb(self, rgb: tuple) -> str:
"""Convert rgb to hex color
Parameter
---------
rgb: tuple
3 integers representing red, green, blue channels
Return
------
str
hex color in format of '#xxxxxx'
Example
-------
>>>from_rgb((255, 10, 5))
'#ff0a05'
"""
r, g, b = rgb
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def _create_nine_points_grid(self, center_pt, distance, radius, width=3, fill=None) -> list:
"""Create a Nine-point Grid\n
7 | 8 | 9\n
4 | 5 | 6\n
1 | 2 | 3\n
The items are stored as above order in list
Parameters
----------
center_pt: tuple
position of center point
distance: float
distance between two neighbor points
radius: float
radius of each point
Return
------
list
list of item ids
"""
ids = [None]
circles = {}
radiuses = {}
circle_class = 1
cx, cy = center_pt
for dy in (1, 0, -1):
for dx in (-1, 0, 1):
center_x = cx+dx*distance
center_y = cy+dy*distance
ids.append(self._draw_circle(center_x,
center_y,
radius,
width,
fill))
circles[str(circle_class)] = (center_x, center_y)
radiuses[str(circle_class)] = radius
circle_class += 1
return ids, (circles, radiuses)
if __name__ == "__main__":
try:
screen_width, screen_height = sys.argv[1], sys.argv[2]
except:
screen_width, screen_height = DEFAULT_SCREEN_WIDTH, DEFAULT_SCREEN_HEIGHT
gui = GUI(int(screen_width), int(screen_height))
gui.start()
``` |
{
"source": "735tesla/python-pineapple",
"score": 3
} |
#### File: python-pineapple/pineapple/filters.py
```python
from module import Module
class Filters(Module):
def __init__(self, api):
super(Filters, self).__init__(api, 'Filters')
def getClientData(self):
return self.request('getClientData')
def getSSIDData(self):
return self.request('getSSIDData')
def toggleClientMode(self, allow):
mode = 'Allow' if enabled else 'Disallow'
return self.request('toggleClientMode', {'mode': mode})
def toggleSSIDMode(self, allow):
mode = 'Allow' if enabled else 'Disallow'
return self.request('toggleSSIDMode', {'mode': mode})
def addClient(self, mac):
return self.request('addClient', {'mac': mac})
def removeClient(self, mac):
return self.request('removeClient', {'mac': mac})
def addSSID(self, ssid):
return self.request('addSSID', {'ssid': ssid})
def removeSSID(self, ssid):
return self.request('removeSSID', {'ssid': ssid})
```
#### File: python-pineapple/pineapple/notifications.py
```python
from module import Module
class Notifications(Module):
def __init__(self, api):
super(Notifications, self).__init__(api, 'notifications', True)
def addNotification(self, message):
return self.request('addNotification', {'message': message})
def listNotifications(self):
return self.request('listNotifications')
def clearNotifications(self):
return self.request('clearNotifications')
```
#### File: python-pineapple/pineapple/pineapple.py
```python
from api import API
from . import modules
import httplib, requests
# from pineapple import *;p=pineapple.Pineapple('7a0685064eb99de1f89a73cd403f13fdaadc42cff8624d8b6d5389f1fc19b8bbe2f960b8f70e38b49b4c581029ad939c967e44ce52ededd82a67bc6ca188d21e')
class Pineapple(object):
def __init__(self, apiKey, apiUrl = None, debug = False):
super(Pineapple, self).__init__()
self.debug = debug
self.api = API(apiKey, apiUrl)
self.modules = {}
self._pineappleModule = __import__('pineapple')
for moduleName in modules:
moduleClass = self._pineappleModule.__dict__[moduleName].__dict__[moduleName.title()]
self.modules[moduleName] = moduleClass(self.api)
if self.debug:
httplib.HTTPConnection.debuglevel = 1
logger = __import__('logging')
logger.basicConfig()
logger.getLogger().setLevel(logger.DEBUG)
requests_log = logger.getLogger('requests.packages.urllib3')
requests_log.setLevel(logger.DEBUG)
requests_log.propagate = True
def loadModule(self, moduleClass):
self.modules[moduleClass.__name__] = moduleClass(self.api)
def getModule(self, module):
return self.modules[module]
``` |
{
"source": "73696e65/ppfuzz",
"score": 2
} |
#### File: ppfuzz/lib/grammar.py
```python
import copy
import re
import random
import sys
from lib.inject_const import *
from google.protobuf.descriptor import FieldDescriptor as fd
RE_PARENTHESIZED_EXPR = re.compile(r'\([^()]*\)[?+*]')
RE_EXTENDED_NONTERMINAL = re.compile(r'(<[^<> ]*>[?+*])')
RE_NONTERMINAL = re.compile(r'(<[^<> ]*>)')
symbol_name = "<symbol>"
def gpb_to_ebnf(msg):
"""
Create a EBNF grammer from a protobuf message class
"""
g = dict()
def traverse_message(parent, name=None):
key = name if name else parent.name
value = str()
for o in parent.fields:
if o.label == fd.LABEL_OPTIONAL:
value += f'(<{o.name}>)?'
elif o.label == fd.LABEL_REQUIRED:
value += f'<{o.name}>'
elif o.label == fd.LABEL_REPEATED:
value += f'(<{o.name}>)+'
else:
raise NotImplementedError
g[f'<{key}>'] = [ value ]
for o in parent.fields:
if o.type == fd.TYPE_MESSAGE:
traverse_message(o.message_type, o.name)
elif o.type == fd.TYPE_INT64: # 3
g[f'<{o.name}>'] = [INJECT_INT64]
elif o.type == fd.TYPE_INT32: # 5
g[f'<{o.name}>'] = [INJECT_INT32]
elif o.type == fd.TYPE_BOOL:
g[f'<{o.name}>'] = [INJECT_BOOL]
elif o.type == fd.TYPE_STRING: # 9
g[f'<{o.name}>'] = [INJECT_STRING]
elif o.type == fd.TYPE_BYTES: # 12
g[f'<{o.name}>'] = [INJECT_BYTES]
elif o.type == fd.TYPE_ENUM: # 14
g[f'<{o.name}>'] = [e.name for e in o.enum_type.values]
else:
print(f"{parent.name} -> {o.name}, {o.type}")
raise NotImplementedError
traverse_message(msg.DESCRIPTOR)
return convert_ebnf_grammar(g)
def convert_ebnf_parentheses(ebnf_grammar):
"""
Convert a parentheses in extended BNF to BNF
"""
grammar = extend_grammar(ebnf_grammar)
for nonterminal in ebnf_grammar:
expansions = ebnf_grammar[nonterminal]
for i in range(len(expansions)):
expansion = expansions[i]
while True:
parenthesized_exprs = parenthesized_expressions(expansion)
if len(parenthesized_exprs) == 0:
break
for expr in parenthesized_exprs:
operator = expr[-1:]
contents = expr[1:-2]
new_sym = new_symbol(grammar)
expansion = grammar[nonterminal][i].replace(
expr, new_sym + operator, 1)
grammar[nonterminal][i] = expansion
grammar[new_sym] = [contents]
return grammar
def convert_ebnf_grammar(ebnf_grammar):
"""
Convert a grammar in extended BNF to BNF
"""
return convert_ebnf_operators(convert_ebnf_parentheses(ebnf_grammar))
def convert_ebnf_operators(ebnf_grammar):
"""
Convert the operators in extended BNF to BNF
"""
grammar = extend_grammar(ebnf_grammar)
for nonterminal in ebnf_grammar:
expansions = ebnf_grammar[nonterminal]
for i in range(len(expansions)):
expansion = expansions[i]
extended_symbols = extended_nonterminals(expansion)
for extended_symbol in extended_symbols:
operator = extended_symbol[-1:]
original_symbol = extended_symbol[:-1]
new_sym = new_symbol(grammar, original_symbol)
grammar[nonterminal][i] = grammar[nonterminal][i].replace(
extended_symbol, new_sym, 1)
if operator == '?':
grammar[new_sym] = ["", original_symbol]
elif operator == '*':
grammar[new_sym] = ["", original_symbol + new_sym]
elif operator == '+':
grammar[new_sym] = [
original_symbol, original_symbol + new_sym]
return grammar
def extend_grammar(grammar, extension={}):
new_grammar = copy.deepcopy(grammar)
new_grammar.update(extension)
return new_grammar
def parenthesized_expressions(expansion):
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_PARENTHESIZED_EXPR, expansion)
def new_symbol(grammar, symbol_name=symbol_name):
"""
Return a new symbol for `grammar` based on `symbol_name`
"""
if symbol_name not in grammar:
return symbol_name
count = 1
while True:
tentative_symbol_name = symbol_name[:-1] + "-" + repr(count) + ">"
if tentative_symbol_name not in grammar:
return tentative_symbol_name
count += 1
def is_nonterminal(s):
return re.match(RE_NONTERMINAL, s)
def nonterminals(expansion):
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_NONTERMINAL, expansion)
def extended_nonterminals(expansion):
if isinstance(expansion, tuple):
expansion = expansion[0]
return re.findall(RE_EXTENDED_NONTERMINAL, expansion)
def reachable_nonterminals(grammar, start_symbol):
reachable = set()
def _find_reachable_nonterminals(grammar, symbol):
nonlocal reachable
reachable.add(symbol)
for expansion in grammar.get(symbol, []):
for nonterminal in nonterminals(expansion):
if nonterminal not in reachable:
_find_reachable_nonterminals(grammar, nonterminal)
_find_reachable_nonterminals(grammar, start_symbol)
return reachable
def unreachable_nonterminals(grammar, start_symbol):
return grammar.keys() - reachable_nonterminals(grammar, start_symbol)
def def_used_nonterminals(grammar, start_symbol):
defined_nonterminals = set()
used_nonterminals = {start_symbol}
for defined_nonterminal in grammar:
defined_nonterminals.add(defined_nonterminal)
expansions = grammar[defined_nonterminal]
if not isinstance(expansions, list):
print(repr(defined_nonterminal) + ": expansion is not a list")
return None, None
if len(expansions) == 0:
print(repr(defined_nonterminal) + ": expansion list empty")
return None, None
for expansion in expansions:
if isinstance(expansion, tuple):
expansion = expansion[0]
if not isinstance(expansion, str):
print(repr(defined_nonterminal) + ": " + repr(expansion) + ": not a string")
return None, None
for used_nonterminal in nonterminals(expansion):
used_nonterminals.add(used_nonterminal)
return defined_nonterminals, used_nonterminals
def is_valid_grammar(grammar, start_symbol):
defined_nonterminals, used_nonterminals = def_used_nonterminals(grammar, start_symbol)
if defined_nonterminals is None or used_nonterminals is None:
return False
# Do not complain about '<start>' being not used,
# even if start_symbol is different
if start_symbol in grammar:
used_nonterminals.add(start_symbol)
for unused_nonterminal in defined_nonterminals - used_nonterminals:
print(repr(unused_nonterminal) + ": defined, but not used")
for undefined_nonterminal in used_nonterminals - defined_nonterminals:
print(repr(undefined_nonterminal) + ": used, but not defined")
# Symbols must be reachable either from <start> or given start symbol
unreachable = unreachable_nonterminals(grammar, start_symbol)
msg_start_symbol = start_symbol
if start_symbol in grammar:
unreachable = unreachable - \
reachable_nonterminals(grammar, start_symbol)
if start_symbol != start_symbol:
msg_start_symbol += " or " + start_symbol
for unreachable_nonterminal in unreachable:
print(repr(unreachable_nonterminal) + ": unreachable from " + msg_start_symbol)
return used_nonterminals == defined_nonterminals and len(unreachable) == 0
def check_grammar(grammar, start_symbol):
assert start_symbol in grammar
assert is_valid_grammar(
grammar,
start_symbol=start_symbol)
def exp_string(expansion):
"""
Return the string to be expanded
"""
if isinstance(expansion, str):
return expansion
return expansion[0]
__all__ = ["create_grammar", "grammar_to_gpb", "check_grammar"]
```
#### File: ppfuzz/lib/helper.py
```python
import subprocess
import re
import os.path
import sys
from os import mkdir
from importlib import import_module
sys.path.append("..")
from config import *
def get_proto_files():
"""
Extract .proto files from the services
"""
proto_files = set()
[proto_files.add(p) for _, _, p in services]
return proto_files
def get_proto_libs(proto_files):
"""
Dynamically import the compiled protobuf files
"""
libs = dict()
for pf in proto_files:
proto_lib = re.sub(r'.proto$', '_pb2', os.path.basename(pf))
libs[pf] = import_module(f"{proto_out}.{proto_lib}")
return libs
def create_vectors(libs):
"""
Create attack vectors - list of dictionaries containing url, request
and protobuf message. Later each entry will be expanded with grammar.
"""
vectors = list()
for url, request, proto in services:
msg = getattr(libs[proto], request)
entry = dict()
entry['url'] = url
entry['request'] = request
entry['msg'] = msg
vectors.append(entry)
return vectors
def pb_compile(files, dest):
"""
Compile the protobuf files running the external 'protoc' compiler
"""
if not os.path.exists(dest):
mkdir(dest)
for file in files:
args = f'protoc -I={os.path.dirname(file)} --python_out={dest} {file}'
print(f"Running '{args}'")
ret = subprocess.call(args, shell=True)
if ret:
exit()
__all__ = ["create_vectors", "get_proto_files", "get_proto_libs", "pb_compile"]
``` |
{
"source": "737363395/DriveSingularity",
"score": 2
} |
#### File: DriveSingularity/pyds/handlers.py
```python
from ray.rllib.env import MultiAgentEnv
class MultiAgentAPI(MultiAgentEnv):
def __init__(self, env):
self._observation_space = env.observation_space
self._action_space = env.action_space
self._agents = env.agents() # ids
self._env = env
def reset(self):
state_n = self._env.reset()
return dict(zip(self._agents, state_n))
def step(self, action_dict):
next_state_n, reward_n, done_n, _ = self._env.step(action_dict)
return dict(self._agents, next_state_n), dict(self._agents, reward_n), dict(self._agents, done_n), {}
``` |
{
"source": "73k05/cfp-app-xml-generator",
"score": 3
} |
#### File: cfp-app-xml-generator/core/csv_parser.py
```python
import os
import lxml.etree as ET
import chardet
import pandas as pd
from lxml.etree import CDATA
from utils.log import write_log
def celltostr(cell):
if not pd.isnull(cell):
return str(cell)
return ''
def parse_generate(file_csv_path):
if not os.path.exists(file_csv_path):
write_log("File does not exists")
return
# XML
# create the file structure
lheo = ET.Element('lheo')
lheo.attrib['xmlns'] = 'https://www.of.moncompteformation.gouv.fr'
offres = ET.SubElement(lheo, 'offres')
file_encoding = 'UTF-8'
sep = ','
with open(fr'{file_csv_path}', 'rb') as f:
result = chardet.detect(f.read()) # or readline if the file is large
file_encoding = result['encoding']
if file_encoding.lower() != "utf-8":
sep = ';'
write_log(f"Encoding is {file_encoding}")
engine = 'c' # c or python
# Read in the data
# ISO-8859-1
# unicode
# utf-16
# utf-8
# cp1252
# utf-8-sig
# latin1
data = pd.read_csv(file_csv_path, engine=engine, encoding=file_encoding, error_bad_lines=False, sep=sep,
quotechar="\"")
for index, row in data.iterrows():
write_log("File generation...")
write_log(index)
formation = ET.SubElement(offres, 'formation')
formation.attrib['numero'] = celltostr(row['numero'])
formation.attrib['datemaj'] = celltostr(row['datemaj'])
formation.attrib['datecrea'] = celltostr(row['datecrea'])
intitule_formation = ET.SubElement(formation, 'intitule-formation')
intitule_formation.text = celltostr(row['intitule-formation'])
objectif_formation = ET.SubElement(formation, 'objectif-formation')
objectif_formation.text = CDATA(celltostr(row['objectif-formation']))
resultats_attendus = ET.SubElement(formation, 'resultats-attendus')
resultats_attendus.text = CDATA(celltostr(row['resultats-attendus']))
contenu_formation = ET.SubElement(formation, 'contenu-formation')
contenu_formation.text = CDATA(celltostr(row['contenu-formation']))
parcours_de_formation = ET.SubElement(formation, 'parcours-de-formation')
parcours_de_formation.text = celltostr(row['parcours-de-formation'])
objectif_general_formation = ET.SubElement(formation, 'objectif-general-formation')
objectif_general_formation.text = celltostr(row['objectif-general-formation'])
certification = ET.SubElement(formation, 'certification')
code_CERTIFINFO = ET.SubElement(certification, 'code-CERTIFINFO')
code_CERTIFINFO.text = celltostr(row['code-CERTIFINFO'])
# Action
action = ET.SubElement(formation, 'action')
action.attrib['numero'] = celltostr(row['action-numero'])
action.attrib['datemaj'] = celltostr(row['action-datemaj'])
action.attrib['datecrea'] = celltostr(row['action-datecrea'])
# rythme_formation = ET.SubElement(action,'rythme-formation')
# rythme_formation.text = celltostr(row['rythme-formation'])
niveau_entree_obligatoire = ET.SubElement(action, 'niveau-entree-obligatoire')
niveau_entree_obligatoire.text = celltostr(row['niveau-entree-obligatoire'])
modalites_enseignement = ET.SubElement(action, 'modalites-enseignement')
modalites_enseignement.text = celltostr(row['modalites-enseignement'])
conditions_specifiques = ET.SubElement(action, 'conditions-specifiques')
conditions_specifiques.text = CDATA(celltostr(row['conditions-specifiques']))
lieu_de_formation = ET.SubElement(action, 'lieu-de-formation')
coordonnees = ET.SubElement(lieu_de_formation, 'coordonnees')
coordonnees.attrib['numero'] = celltostr(row['lieu-de-formation-coordonnees-numero'])
nom = ET.SubElement(coordonnees, 'nom')
nom.text = celltostr(row['lieu-de-formation-coordonnees-nom'])
prenom = ET.SubElement(coordonnees, 'prenom')
prenom.text = celltostr(row['lieu-de-formation-coordonnees-prenom'])
adresse = ET.SubElement(coordonnees, 'adresse')
adresse.attrib['numero'] = celltostr(row['lieu-de-formation-adresse-numero'])
ligne = ET.SubElement(adresse, 'ligne')
ligne.text = celltostr(row['lieu-de-formation-adresse-numero']) + ' ' + celltostr(
row['lieu-de-formation-adresse-ligne'])
codepostal = ET.SubElement(adresse, 'codepostal')
codepostal.text = celltostr(row['lieu-de-formation-adresse-codepostal'])
ville = ET.SubElement(adresse, 'ville')
ville.text = celltostr(row['lieu-de-formation-adresse-ville'])
# Geoloc
geoloc = ET.SubElement(adresse, 'geolocalisation')
lat = ET.SubElement(geoloc, 'latitude')
lat.text = celltostr(row['lieu-de-formation-latitude'])
lng = ET.SubElement(geoloc, 'longitude')
lng.text = celltostr(row['lieu-de-formation-longitude'])
# Extra adresse
extras = ET.SubElement(adresse, 'extras')
extras.attrib['info'] = 'adresse'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'numero-voie'
extra.text = celltostr(row['lieu-de-formation-extra-numero'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'code-nature-voie'
extra.text = celltostr(row['lieu-de-formation-extra-codevoie'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'libelle-voie'
extra.text = celltostr(row['lieu-de-formation-extra-libvoie'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'accessibilite-batimentaire'
extra.text = celltostr(row['lieu-de-formation-extra-acces'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'conformite-reglementaire'
extra.text = celltostr(row['lieu-de-formation-extra-regle'])
telfixe = ET.SubElement(coordonnees, 'telfixe')
numtel = ET.SubElement(telfixe, 'numtel')
numtel.text = celltostr(row['lieu-de-formation-numtel'])
courriel = ET.SubElement(coordonnees, 'courriel')
courriel.text = celltostr(row['lieu-de-formation-courriel'])
modalites_entrees_sorties = ET.SubElement(action, 'modalites-entrees-sorties')
modalites_entrees_sorties.text = celltostr(row['modalites-entrees-sorties'])
url_action = ET.SubElement(action, 'url-action')
urlweb = ET.SubElement(url_action, 'urlweb')
urlweb.text = celltostr(row['urlweb'])
session = ET.SubElement(action, 'session')
session.attrib['numero'] = celltostr(row['session-numero'])
session.attrib['datemaj'] = celltostr(row['session-datemaj'])
session.attrib['datecrea'] = celltostr(row['session-datecrea'])
periode = ET.SubElement(session, 'periode')
debut = ET.SubElement(periode, 'debut')
fin = ET.SubElement(periode, 'fin')
debut.text = celltostr(row['debut'])
fin.text = celltostr(row['fin'])
adresse_inscription = ET.SubElement(session, 'adresse-inscription')
adresse = ET.SubElement(adresse_inscription, 'adresse')
adresse.attrib['numero'] = celltostr(row['session-adresse-numero'])
ligne = ET.SubElement(adresse, 'ligne')
ligne.text = celltostr(row['session-adresse-numero']) + celltostr(row['session-adresse-ligne'])
codepostal = ET.SubElement(adresse, 'codepostal')
codepostal.text = celltostr(row['session-adresse-codepostal'])
ville = ET.SubElement(adresse, 'ville')
ville.text = celltostr(row['session-adresse-ville'])
# Session Geoloc
geoloc = ET.SubElement(adresse, 'geolocalisation')
lat = ET.SubElement(geoloc, 'latitude')
lat.text = celltostr(row['session-latitude'])
lng = ET.SubElement(geoloc, 'longitude')
lng.text = celltostr(row['session-longitude'])
# Session Extra adresse
extras = ET.SubElement(adresse, 'extras')
extras.attrib['info'] = 'adresse'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'numero-voie'
extra.text = celltostr(row['session-extra-numero'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'code-nature-voie'
extra.text = celltostr(row['session-extra-codevoie'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'libelle-voie'
extra.text = celltostr(row['session-extra-libvoie'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'accessibilite-batimentaire'
extra.text = celltostr(row['session-extra-acces'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'conformite-reglementaire'
extra.text = celltostr(row['session-extra-regle'])
etat_recrutement = ET.SubElement(session, 'etat-recrutement')
etat_recrutement.text = celltostr(row['etat-recrutement'])
# Extras
extras = ET.SubElement(session, 'extras')
extras.attrib['info'] = 'session'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'contact-inscription'
coordonnees = ET.SubElement(extra, 'coordonnees')
coordonnees.attrib['numero'] = celltostr(row['session-extra-coordonnees-numero'])
nom = ET.SubElement(coordonnees, 'nom')
nom.text = celltostr(row['session-extra-coordonnees-nom'])
prenom = ET.SubElement(coordonnees, 'prenom')
prenom.text = celltostr(row['session-extra-coordonnees-prenom'])
telfixe = ET.SubElement(coordonnees, 'telfixe')
numtel = ET.SubElement(telfixe, 'numtel')
numtel.text = celltostr(row['session-extra-numtel'])
courriel = ET.SubElement(coordonnees, 'courriel')
courriel.text = celltostr(row['session-extra-courriel'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'garantie'
extra.text = celltostr(row['session-extra-garantie'])
# Adresse Info
adresse_information = ET.SubElement(action, 'adresse-information')
adresse = ET.SubElement(adresse_information, 'adresse')
adresse.attrib['numero'] = celltostr(row['adresse-information-numero'])
ligne = ET.SubElement(adresse, 'ligne')
ligne.text = celltostr(row['adresse-information-numero']) + ' ' + celltostr(row['adresse-information-ligne'])
codepostal = ET.SubElement(adresse, 'codepostal')
codepostal.text = celltostr(row['adresse-information-codepostal'])
ville = ET.SubElement(adresse, 'ville')
ville.text = celltostr(row['adresse-information-ville'])
# Adresse Info Geoloc
geoloc = ET.SubElement(adresse, 'geolocalisation')
lat = ET.SubElement(geoloc, 'latitude')
lat.text = celltostr(row['adresse-information-latitude'])
lng = ET.SubElement(geoloc, 'longitude')
lng.text = celltostr(row['adresse-information-longitude'])
# Adresse Info Extra adresse
extras = ET.SubElement(adresse, 'extras')
extras.attrib['info'] = 'adresse'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'ligne5-adresse'
extra.text = celltostr(row['adresse-information-extra-ligne5-adresse'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'conformite-reglementaire'
extra.text = celltostr(row['adresse-information-extra-regle'])
restauration = ET.SubElement(action, 'restauration')
restauration.text = CDATA(celltostr(row['restauration']))
hebergement = ET.SubElement(action, 'hebergement')
hebergement.text = CDATA(celltostr(row['hebergement']))
transport = ET.SubElement(action, 'transport')
transport.text = CDATA(celltostr(row['transport']))
acces_handicapes = ET.SubElement(action, 'acces-handicapes')
acces_handicapes.text = celltostr(row['acces-handicapes'])
langue_formation = ET.SubElement(action, 'langue-formation')
langue_formation.text = celltostr(row['langue-formation'])
modalites_recrutement = ET.SubElement(action, 'modalites-recrutement')
modalites_recrutement.text = celltostr(row['modalites-recrutement'])
modalites_pedagogiques = ET.SubElement(action, 'modalites-pedagogiques')
modalites_pedagogiques.text = CDATA(celltostr(row['modalites-pedagogiques']))
code_perimetre_recrutement = ET.SubElement(action, 'code-perimetre-recrutement')
code_perimetre_recrutement.text = celltostr(row['code-perimetre-recrutement'])
nombre_heures_centre = ET.SubElement(action, 'nombre-heures-centre')
nombre_heures_centre.text = celltostr(row['nombre-heures-centre'])
nombre_heures_entreprise = ET.SubElement(action, 'nombre-heures-entreprise')
nombre_heures_entreprise.text = celltostr(row['nombre-heures-entreprise'])
# Extras
extras = ET.SubElement(action, 'extras')
extras.attrib['info'] = 'action'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'contact-information'
coordonnees = ET.SubElement(extra, 'coordonnees')
coordonnees.attrib['numero'] = celltostr(row['action-extra-coordonnees-numero'])
nom = ET.SubElement(coordonnees, 'nom')
nom.text = celltostr(row['action-extra-coordonnees-nom'])
prenom = ET.SubElement(coordonnees, 'prenom')
prenom.text = celltostr(row['action-extra-coordonnees-prenom'])
telfixe = ET.SubElement(coordonnees, 'telfixe')
numtel = ET.SubElement(telfixe, 'numtel')
numtel.text = celltostr(row['action-extra-numtel'])
courriel = ET.SubElement(coordonnees, 'courriel')
courriel.text = celltostr(row['action-extra-courriel'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'modalites-handicap'
extra.text = celltostr(row['action-extra-modalites-handicap'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'info-admission'
extra.text = celltostr(row['action-extra-info-admission'])
extra = ET.SubElement(extras, 'extras')
extra.attrib['info'] = 'codes-modalites-admission'
extra2 = ET.SubElement(extra, 'extra')
extra2.attrib['info'] = 'code-modalites-admission'
extra2.text = celltostr(row['action-extra-modalites-admission'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'duree-apprentissage'
extra.text = celltostr(row['action-extra-duree-apprentissage'])
extra = ET.SubElement(extras, 'extras')
extra.attrib['info'] = 'codes-rythme-formation'
extra2 = ET.SubElement(extra, 'extra')
extra2.attrib['info'] = 'code-rythme-formation'
extra2.text = celltostr(row['action-extra-codes-rythme-formation'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'individuelle-collective'
extra.text = celltostr(row['action-extra-individuelle-collective'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'frais-anpec'
extra.text = celltostr(row['action-extra-frais-anpec'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'frais-certif-inclus-frais-anpec'
extra.text = celltostr(row['action-extra-frais-certif-anpec'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'detail-frais-anpec'
extra.text = celltostr(row['action-extra-detail-frais-anpec'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'code-modele-economique'
extra.text = celltostr(row['action-extra-code-modele-economique'])
extra = ET.SubElement(extras, 'extras')
extra.attrib['info'] = 'frais-pedagogiques'
extra2 = ET.SubElement(extra, 'extra')
extra2.attrib['info'] = 'taux-tva'
extra2.text = celltostr(row['action-extra-taux-tva'])
extra2 = ET.SubElement(extra, 'extra')
extra2.attrib['info'] = 'frais-ht'
extra2.text = celltostr(row['action-extra-frais-ht'])
extra2 = ET.SubElement(extra, 'extra')
extra2.attrib['info'] = 'frais-ttc'
extra2.text = celltostr(row['action-extra-frais-ttc'])
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'existence-prerequis'
extra.text = celltostr(row['action-extra-existence-prerequis'])
# Organisme de formation
organisme_formation_responsable = ET.SubElement(formation, 'organisme-formation-responsable')
SIRET_organisme_formation = ET.SubElement(organisme_formation_responsable, 'SIRET-organisme-formation')
siret = ET.SubElement(SIRET_organisme_formation, 'SIRET')
siret.text = celltostr(row['SIRET'])
# Extra
extras = ET.SubElement(formation, 'extras')
extras.attrib['info'] = 'formation'
extra = ET.SubElement(extras, 'extra')
extra.attrib['info'] = 'resume-contenu'
extra.text = CDATA(celltostr(row['resume-contenu']))
write_log("File generated.")
# create a new XML file with the results
# myoffers = ET.tostring(lheo, encoding='utf-8').decode() # python3
# Old way to save XML in UTF-8 encoding
# mycatalogue = io.open("catalogue.xml", "w", encoding='utf8')
# mycatalogue.write(myoffers)
tree = ET.ElementTree(lheo)
tree.write('upload/catalogue.xml', encoding='iso-8859-1', xml_declaration=True)
``` |
{
"source": "73VW/arduinozaure",
"score": 3
} |
#### File: arduinozore/handlers/device.py
```python
import os
from arduinozore.handlers.crudHandler import CrudHandler
from arduinozore.handlers.tools import get_arduino
from arduinozore.handlers.tools import get_config_name
from arduinozore.models.card import Card
from arduinozore.models.device import Device
from arduinozore.models.sensor import Sensor
from arduinozore.settings import DEVICE_CONFIG_FOLDER
from arduinozore.settings import SSL_PORT
from arduinozore.settings import path
class DevicePageHandler(CrudHandler):
"""Device page handler."""
default_args = {'enabled': '', 'name': '', 'type': ''}
def list(self):
"""List configuration."""
devices = Device.get_all()
self.render('device/list.html', devices=devices)
def show(self, slug):
"""Show device."""
device = Device.get(Device.get_identifier_from_serial(slug))
if device is None:
device = Device.get_config(slug)
if device is None:
self.redirect(self.redirect_url + '/create', permanent=False)
else:
settings = dict()
settings['device'] = device
settings['slug'] = slug
self.render('device/show.html', **settings)
else:
settings = dict()
settings['port'] = SSL_PORT
settings['slug'] = slug
settings['device'] = device
self.render('device/communicate.html', **settings)
def create(self, slug):
"""Show configuration form for device."""
cards = Card.get_all()
sensors = Sensor.get_all()
device = Device.get(slug)
if 'card' in self.request.arguments:
card = Card.get(self.get_argument('card'))
else:
card = None
settings = dict()
settings['method'] = 'post'
settings['cards'] = cards
settings['card'] = card
settings['sensors'] = sensors
settings['device'] = device
settings['slug'] = slug
settings['method'] = 'post'
self.render('device/config.html', **settings)
def edit(self, slug):
"""Show configuration form for device."""
device = Device.get(Device.get_identifier_from_serial(slug))
cards = Card.get_all()
sensors = Sensor.get_all()
if device is None:
device = Device.get_config(slug)
if device is None:
self.redirect(self.redirect_url + '/create', permanent=False)
settings = dict()
settings['method'] = 'put'
settings['cards'] = cards
settings['card'] = device.card
settings['sensors'] = sensors
settings['device'] = device
settings['method'] = 'put'
self.render('device/config.html', **settings)
def store(self, slug):
"""Store configuration."""
self.save(slug)
self.redirect(self.redirect_url, permanent=True)
def update(self, slug):
"""Update configuration."""
self.save(slug)
self.redirect(self.redirect_url, permanent=True)
def save(self, slug):
"""Save configuration."""
try:
self.request.arguments.pop("_method")
except Exception:
pass
device = Device.from_request_args(slug, self.request.arguments)
device.save()
def destroy(self, slug):
"""Destroy configuration."""
arduino = get_arduino(slug)
config_name = get_config_name(arduino)
config_file = path(DEVICE_CONFIG_FOLDER, config_name)
os.remove(config_file)
self.redirect(self.redirect_url, permanent=False)
```
#### File: arduinozore/handlers/error404handler.py
```python
from arduinozore.handlers.baseHandler import BaseHandler
class My404Handler(BaseHandler):
"""404 error handler."""
def prepare(self):
"""Override prepare to cover all possible HTTP methods."""
if self.request.protocol == 'http':
self.redirect('https://' + self.request.host +
self.request.uri, permanent=False)
self.set_status(404)
self.render("404.html")
```
#### File: arduinozore/handlers/index.py
```python
from arduinozore.handlers.baseHandler import BaseHandler
from arduinozore.models.device import Device
class IndexPageHandler(BaseHandler):
"""Index page handler."""
def get(self):
"""Handle get request."""
devices = Device.get_connected_devices()
self.render('index.html', devices=devices)
```
#### File: arduinozore/handlers/sensor.py
```python
from arduinozore.handlers.crudHandler import CrudHandler
from arduinozore.models.sensor import Sensor
from tornado.escape import url_escape
class SensorHandler(CrudHandler):
"""Sensor handler."""
def list(self):
"""List configurations."""
sensors = Sensor.get_all()
self.render('sensor/list.html', sensors=sensors)
def show(self, slug):
"""Show device."""
sensor = Sensor.get(slug)
if sensor is not None:
self.render('sensor/show.html', sensor=sensor, slug=slug)
else:
self.redirect('/sensor', permanent=False)
def create(self):
"""Show configuration form for device."""
settings = dict()
settings['sensor'] = None
settings['method'] = 'post'
self.render('sensor/config.html', **settings)
def edit(self, slug):
"""Show configuration form for device."""
sensor = Sensor.get(slug)
settings = dict()
settings['sensor'] = sensor
settings['method'] = 'put'
self.render('sensor/config.html', **settings)
def store(self, slug=""):
"""Create configuration."""
sensor_name = self.get_argument('name')
self.save(sensor_name)
def update(self, slug):
"""Update configuration."""
self.save(slug)
def save(self, slug):
"""Save configuration."""
sensor_name = slug
min_value = self.get_argument('min_value')
max_value = self.get_argument('max_value')
suffix = self.get_argument('suffix')
reverse = True if 'reverse' in self.request.arguments else False
sensor = Sensor(sensor_name, min_value, max_value, reverse, suffix)
sensor.save()
slug = url_escape(slug)
redirect_url = self.redirect_url
if slug not in redirect_url:
redirect_url += '/' + slug
self.redirect(redirect_url, permanent=True)
def destroy(self, slug):
"""Destroy configuration."""
sensor = Sensor.get(slug)
sensor.delete()
self.redirect(self.redirect_url, permanent=False)
```
#### File: arduinozore/handlers/serialManager.py
```python
import json
import sys
import time
from multiprocessing import Event
from multiprocessing import Manager
from multiprocessing import Process
from arduinozore.handlers.serialReader import SerialReader
class Singleton(type):
"""
Singleton metaclass.
From https://stackoverflow.com/q/6760685/9395299.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Instantiate singleton or return."""
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SerialManager(Process, metaclass=Singleton):
"""Process class."""
def __init__(self):
"""Init process."""
Process.__init__(self)
self.exit = Event()
self.serial_readers = {}
self.datas = Manager().dict()
self.out = Manager().dict()
def toggle_pin(self, port, pin):
"""Toggle pin on card connected to port."""
if port not in dict(self.out):
self.out[port] = pin
sys.stdout.flush()
def get_toggelable_pin(self, port):
"""Return toggelable pins for port."""
try:
toggle = self.out[port]
del self.out[port]
return toggle
except KeyError:
return None
def finish(self, port):
"""Finish serial reader process."""
if port in self.serial_readers:
self.serial_readers[port].shutdown()
del self.serial_readers[port]
def set_datas(self, port, datas):
"""Set data from child process."""
self.datas[port] = json.dumps(datas)
def get_serial_reader(self, port):
"""Get datas for specified serial port."""
if port not in self.serial_readers:
self.serial_readers[port] = SerialReader(port, self)
self.serial_readers[port].start()
def get_datas_for_port(self, port):
"""Get datas for serial port."""
self.get_serial_reader(port)
try:
return self.datas[port]
except (KeyError, Exception):
self.datas[port] = 'Initializing reader'
return self.datas[port]
def run(self):
"""Run process."""
while not self.exit.is_set():
try:
# print("Manager running")
# sys.stdout.flush()
time.sleep(1)
except (KeyboardInterrupt, RuntimeError) as e:
self.shutdown()
except Exception as e:
raise e
finally:
for s_r in self.serial_readers:
self.serial_readers[s_r].join()
def shutdown(self):
"""Shut down process."""
self.exit.set()
```
#### File: arduinozore/handlers/tools.py
```python
import os
from arduinozore.settings import DEVICE_CONFIG_FOLDER
from arduinozore.settings import path
from serial.tools import list_ports
from yaml import safe_load
def get_arduinos():
"""Return connected arduinos devices."""
serials = list(list_ports.comports())
return [s for s in serials if 'Arduino' in s.description or (
s.manufacturer is not None and 'Arduino' in s.manufacturer)]
def get_arduino(name):
"""Return arduino by name."""
arduinos = get_arduinos()
arduino = next(
(arduino for arduino in arduinos if name in arduino.device), None)
return arduino
def load_config(config_file):
"""Load port list from specific config file."""
with open(config_file, 'r') as f:
datas = safe_load(f)
ports = {k: datas['ports'][k]
for k in datas['ports'] if len(datas['ports'][k]) is not 0}
return (datas['device_name'], ports)
def load_config_from_arduino(arduino):
"""Load port list corresponding to arduino if it exists."""
filenames = os.listdir(DEVICE_CONFIG_FOLDER)
config_name = get_config_name(arduino)
config_file = path(DEVICE_CONFIG_FOLDER, config_name)
if config_name in filenames:
return load_config(config_file)
else:
return (None, None)
def get_config_name(arduino):
"""Return config filename for arduino."""
config_name = str(arduino.vid)
config_name += str(arduino.pid)
config_name += str(arduino.serial_number)
config_name += ".yaml"
return config_name
```
#### File: arduinozore/models/card.py
```python
from arduinozore.models.model import Model
from arduinozore.settings import CARD_CONFIG_FOLDER
class Card(Model):
"""Card class."""
yaml_tag = u'!Card'
def __init__(self, name, nb_input_pins, nb_output_pins):
"""Init card."""
self.name = name
self.nb_input_pins = int(nb_input_pins)
self.nb_output_pins = int(nb_output_pins)
def save(self):
"""Save card."""
self.save_yaml(CARD_CONFIG_FOLDER)
def delete(self):
"""Delete card."""
self._delete(CARD_CONFIG_FOLDER)
def __repr__(self):
"""Represent card in order to save it."""
return "%s(name=%r, nb_input_pins=%r, nb_output_pins=%r)" % (
self.__class__.__name__, self.name, self.nb_input_pins, self.nb_output_pins)
@classmethod
def get_all(cls):
"""Get all card configurations."""
return cls._get_all(CARD_CONFIG_FOLDER)
@classmethod
def get(cls, name):
"""Get card by name."""
return cls._get(name, CARD_CONFIG_FOLDER)
```
#### File: arduinozore/models/device.py
```python
from arduinozore.models.card import Card
from arduinozore.models.model import Model
from arduinozore.settings import DEVICE_CONFIG_FOLDER
from serial.tools import list_ports
class Port(Model):
"""Port class."""
yaml_tag = u'!Port'
def __init__(self, name, number, enabled, _type="output"):
"""Init port."""
self.name = name
self._type = _type
self.number = number
self.enabled = enabled if enabled is True or False else (
True if enabled == 'on' else False)
def __repr__(self):
"""Represent port in order to save it."""
return "%s(number=%r, name=%r, _type=%r, enabled=%r)" % (
self.__class__.__name__, self.number, self.name, self._type, self.enabled)
class Device(Model):
"""Device class."""
yaml_tag = u'!Device'
def __init__(self, name, identifier, card_name):
"""Init device."""
self.name = name
self.identifier = identifier
self.card = Card.get(card_name)
self.init_ports()
def save(self):
"""Save device."""
self.save_yaml(DEVICE_CONFIG_FOLDER)
def delete(self):
"""Delete device."""
self._delete(DEVICE_CONFIG_FOLDER)
def __repr__(self):
"""Represent device in order to save it."""
return "%s(name=%r, identifier=%r, card=%r, ports=%r)" % (
self.__class__.__name__, self.name, self.identifier, self.card, self.ports)
def init_ports(self):
"""Init port list."""
self.ports = {'input': list(), 'output': list()}
for i in range(self.card.nb_input_pins):
self.ports['input'].append(Port(
number=i, name="", enabled="False", _type=""))
for i in range(self.card.nb_output_pins):
self.ports['output'].append(Port(
number=i, name="", enabled="False", _type=""))
def add_port_from_dict(self, port, dict):
"""Create port and add from dict."""
port = int(port.replace("port", ""))
if port >= self.card.nb_input_pins:
port = port - self.card.nb_input_pins
p = Port(number=port, **dict)
_type = p._type if p._type == "output" else "input"
port_to_replace = next(
(self.ports[_type].index(port) for port in self.ports[_type] if p.number == port.number), None)
self.ports[_type][port_to_replace] = p
def get_filename(self):
"""Get filename to save."""
return __class__.filenamify(self.identifier) + ".yaml"
@classmethod
def get_arduinos(cls):
"""Get list of connected arduinos."""
serials = list(list_ports.comports())
arduinos = [s for s in serials if 'Arduino' in s.description or (
s.manufacturer is not None and 'Arduino' in s.manufacturer)]
return arduinos
@classmethod
def get_all(cls):
"""Get all device configurations."""
return __class__._get_all(DEVICE_CONFIG_FOLDER)
@classmethod
def get(cls, name):
"""Get device by name."""
return __class__._get(name, DEVICE_CONFIG_FOLDER)
@classmethod
def get_connected_devices(cls):
"""Get devices connected."""
arduinos = cls.get_arduinos()
devices = {a.device: __class__.get(__class__.get_identifier(
a)) for a in arduinos}
return devices
@classmethod
def get_config(cls, name):
"""Get config by name."""
configs = cls.get_all()
if configs is not None:
return next(
(config for config in configs if config.name == name), None)
else:
return configs
@classmethod
def get_identifier_from_serial(cls, serial):
"""Get device identifier from serial name."""
arduinos = cls.get_arduinos()
arduino = next(
(arduino for arduino in arduinos if serial in arduino.device), None)
return __class__.get_identifier(arduino)
@classmethod
def get_identifier(cls, arduino):
"""Get identifier from arduino."""
if arduino is not None:
config_name = str(arduino.vid)
config_name += str(arduino.pid)
config_name += str(arduino.serial_number)
return config_name
else:
return None
@classmethod
def from_request_args(cls, slug, args):
"""Init device from request args."""
args = {arg: args[arg][0].decode() for arg in args}
res = dict()
for arg in args:
if '[' in arg and ']' in arg:
split_arg = arg.split('[')
var_name = split_arg[0]
index = split_arg[1].split(']')[0]
if var_name not in res:
res[var_name] = dict()
res[var_name][index] = args[arg]
else:
res[arg] = args[arg]
if 'identifier' not in res:
identifier = cls.get_identifier_from_serial(slug)
else:
identifier = res.pop('identifier')
dev = cls(res.pop('name'), identifier,
res.pop('card_name'))
for port in res:
dev.add_port_from_dict(port, res[port])
return dev
``` |
{
"source": "73VW/area4",
"score": 3
} |
#### File: area4/area4/__init__.py
```python
name = "area4"
author = "https://github.com/RDIL"
author_email = "<EMAIL>"
description = "Dividers in Python, the easy way! Multiple different divider looks."
# Divider variables:
divider1 = str("------------------------")
divider2 = str("________________________")
divider3 = str("........................")
divider4 = str("⬛⬛⬛⬛⬛⬛⬛⬛⬛⬛⬛⬛⬛")
divider5 = str("⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️⬆️")
divider6 = str("⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️⬇️")
divider7 = str("========================")
custom_div = str("")
# Functions:
def div1():
print(divider1)
def div2():
print(divider2)
def div3():
print(divider3)
def div4():
print(divider4)
def div5():
print(divider5)
def div6():
print(divider6)
def div7():
print(divider7)
def customdiv():
print(custom_div)
def area4info():
print("Name:", name)
print("Author:", author, "\nAuthor Email:", author_email)
print("Description:", description)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.