max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
lira/ui.py | stsewd/pylearn | 1 | 12792851 | from prompt_toolkit.application import Application
from prompt_toolkit.formatted_text import merge_formatted_text, to_formatted_text
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, VSplit, Window
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.widgets import Label, TextArea
from lira.app import LiraApp
from lira.book import Book
def get_key_bindings():
keys = KeyBindings()
@keys.add("c-c")
@keys.add("c-q")
def _(event):
"""Pressing Ctrl-Q or Ctrl-C will exit the user interface."""
event.app.exit()
return keys
themes = {
"default": {
"Text": "#fff",
"Strong": "#fff bold",
"Emphasis": "#fff italic",
"Literal": "#fff",
"Paragraph": "#fff",
"CodeBlock": "#fff",
"Prompt": "#fff",
"TestBlock": "#fff",
"Section": "#fff",
"Separator": "#00ff00",
}
}
styles = themes["default"]
sections = {
"menu": TextArea(
height=40, width=25, style=styles["Text"], text="Python-Tutorial\n"
),
"status": TextArea(
height=3,
prompt=">>> ",
style=styles["Text"],
multiline=False,
wrap_lines=False,
),
"text": TextArea(height=10, width=40, style=styles["Text"], text="text"),
"prompt": TextArea(height=10, width=40, style=styles["Text"], text="prompt"),
"vseparator": Window(height=0, width=1, char="|", style=styles["Separator"]),
"hseparator": Window(height=1, char="-", style=styles["Separator"]),
}
class TerminalUI:
def __init__(self, path):
self.theme = "default"
sections_list = []
for section in ["text", "prompt"]:
sections_list.append(sections[section])
book = Book(root=path)
book.parse()
chapters = book.chapters[1]
chapters.parse()
contents = chapters.contents[0]
render = self.get_label(contents)
label = Label(merge_formatted_text(render))
self.container = HSplit(
[
VSplit(
[
sections["menu"],
sections["vseparator"],
HSplit([label, sections["prompt"]]),
]
),
sections["hseparator"],
sections["status"],
]
)
def get_label(self, contents):
render = []
for node in contents.children:
if node.is_terminal:
text = node.text()
style = node.tagname
render.append(to_formatted_text(text, styles[style]))
else:
render.extend(self.get_label(node))
render.append(to_formatted_text("\n", ""))
return render
def run(self):
lira = LiraApp()
lira.setup()
self.app = Application(
layout=Layout(self.container),
key_bindings=get_key_bindings(),
mouse_support=True,
full_screen=True,
)
self.app.run()
| 2.828125 | 3 |
fred/conf.py | TUDelft-DataDrivenControl/FRED | 0 | 12792852 | import yaml
import numpy as np
import logging
logger = logging.getLogger("cm.conf")
class ControlModelParameters:
"""
Load parameters from .yaml file.
"""
def __init__(self):
self._config = None
self.wind_farm = None
self.turbine = None
self.simulation = None
self.flow = None
self.ssc = None
self.mode = None
def load(self, file):
logger.info("Loading configuration from: {}".format(file))
self._load_configuration_from_yaml(file)
try:
self._assign_configuration()
except KeyError as ke:
message = "Missing definition in config file, did not find {}".format(ke)
logger.error(message, exc_info=1)
raise KeyError("Missing definition in config file, did not find {}".format(ke))
logger.info("Loaded configuration.")
def _load_configuration_from_yaml(self, file):
stream = open(file, "r")
self._config = yaml.load(stream=stream, Loader=yaml.SafeLoader)
def print(self):
print(yaml.dump(self._config))
def _assign_configuration(self):
self.mode = self._config["mode"]
if self.mode == "simulation":
self.wind_farm = self.WindFarm(self._config["wind_farm"])
self.turbine = self.Turbine(self._config["turbine"])
self.simulation = self.Simulation(self._config["simulation"])
self.flow = self.Flow(self._config["flow"])
if self.mode == "supercontroller":
self.ssc = self.SSC(self._config["ssc"])
self.turbine = self.Turbine(self._config["turbine"])
# if self.ssc.type == "gradient_step":
self.wind_farm = self.WindFarm(self._config["wind_farm"])
self.simulation = self.Simulation(self._config["simulation"])
self.flow = self.Flow(self._config["flow"])
# else:
# self.simulation = self.Simulation(self._config["simulation"])
if "estimator" in self._config.keys():
self.estimator = self.Estimator(self._config["estimator"])
class WindFarm:
def __init__(self, config_dict):
self.size = config_dict["size"]
self.cells = config_dict["cells"]
self.positions = config_dict["positions"]
self.yaw_angles = np.deg2rad(config_dict["yaw_angles"])
# self.yaw_angles = [np.array(x) for x in self.yaw_angles]
self.do_refine_turbines = config_dict["do_refine_turbines"]
if self.do_refine_turbines:
self.refine_radius = config_dict["refine_radius"]
else:
self.refine_radius = None
self.controller = self.FarmController(config_dict["farm_controller"])
class FarmController:
def __init__(self, config_dict):
self.control_discretisation = config_dict["control_discretisation"]
self.controls = config_dict["controls"]
self.with_external_controller = False
for control in self.controls.values():
if control['type'] == 'external':
self.with_external_controller = True
self.external_controls = config_dict["external_controller"]["controls"]
self.port = config_dict["external_controller"]["port"]
break
# todo: refine control settings
class Turbine:
"""
Turbine configuration class
"""
def __init__(self,config_dict):
self.axial_induction = config_dict["axial_induction"]
self.diameter = config_dict["diameter"]
self.radius = self.diameter / 2
self.thickness = config_dict["thickness"]
self.hub_height = config_dict["hub_height"]
self.kernel = config_dict["kernel"]
self.force_scale_axial = config_dict.get("force_scale_axial",1.)
self.force_scale_transverse = config_dict.get("force_scale_transverse",1.)
self.power_scale = config_dict.get("power_scale",1.)
self.yaw_rate_limit = config_dict.get("yaw_rate_limit",-1)
self.coefficients = config_dict.get("coefficients", "induction")
self.pitch = config_dict.get("pitch", 0.)
self.torque = config_dict.get("torque", 0.)
class Simulation:
def __init__(self, config_dict):
self.is_dynamic = config_dict["is_dynamic"]
# if not self.is_dynamic:
# raise NotImplementedError("Steady flow currently not implemented")
if self.is_dynamic:
self.total_time = config_dict["total_time"]
self.time_step = config_dict["time_step"]
self.write_time_step = config_dict["write_time_step"]
self.name = config_dict["name"]
self.save_logs = config_dict["save_logs"]
self.dimensions = config_dict["dimensions"]
self.probes = config_dict.get("probes",[])
class Flow:
def __init__(self, config_dict):
self.kinematic_viscosity = config_dict["kinematic_viscosity"]
self.tuning_viscosity = config_dict["tuning_viscosity"]
self.density = config_dict["density"]
self.mixing_length = config_dict["mixing_length"]
self.wake_mixing_length = config_dict["wake_mixing_length"]
self.wake_mixing_width = config_dict["wake_mixing_width"]
self.wake_mixing_offset = config_dict["wake_mixing_offset"]
self.wake_mixing_ml_max = config_dict["wake_mixing_ml_max"]
self.continuity_correction = config_dict["continuity_correction"]
self.type = config_dict["type"]
if self.type == "steady":
self.inflow_velocity = config_dict["inflow_velocity"]
elif self.type == "series":
self.inflow_velocity_series = np.array(config_dict["inflow_velocity_series"])
self.inflow_velocity = self.inflow_velocity_series[0, 1:3]
self.finite_element = config_dict.get("finite_element","TH")
class SSC:
def __init__(self, config_dict):
self.port = config_dict["port"]
self.controls = config_dict["controls"]
self.external_controls = config_dict["external_controls"]
self.external_measurements = config_dict["external_measurements"]
self.control_discretisation = config_dict["control_discretisation"]
self.prediction_horizon = config_dict["prediction_horizon"]
self.control_horizon = config_dict["control_horizon"]
self.transient_time = config_dict.get("transient_time",-1)
# self.objective = config_dict["objective"]
# if self.objective == "tracking":
# self.power_reference = np.array(config_dict["power_reference"])
# self.power_reference[:, 1] *= 1e6
# # if self.mode == "pitch_torque":
# # raise NotImplementedError("gradient step pitch torque control not implemented.")
self.plant = config_dict.get("plant", "cm")
if self.plant == "sowfa":
self.sowfa_time_step = config_dict["sowfa_time_step"]
class Estimator:
def __init__(self, config_dict):
try:
self.source = config_dict["source"]
except KeyError as ke:
logger.error("Only SOWFA as data source implemented")
self.estimation_type = config_dict["type"]
self.assimilation_window = config_dict["assimilation_window"]
self.forward_step = config_dict.get("forward_step", 1)
self.transient_period = config_dict.get("transient_period", -1)
self.prediction_period = config_dict.get("prediction_period", 0)
self.cost_function_weights = config_dict["cost_function_weights"]
self.data = config_dict["data"]
par = ControlModelParameters()
wind_farm = par.wind_farm
turbine = par.turbine
flow = par.flow
simulation = par.simulation
with_adjoint = True
if __name__ == '__main__':
par = ControlModelParameters()
par.load("../config/test_config.yaml")
# par.print()
# par.turbine.print()
| 2.640625 | 3 |
knlp/information_extract/keywords_extraction/textrank_keyword.py | ERICMIAO0817/knlp | 19 | 12792853 | # !/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: textrank_keyword
# Author: <NAME>
# Mail: <EMAIL>
# Created Time: 2021-09-04
# Description:
# -----------------------------------------------------------------------#
import networkx as nx
import numpy as np
from knlp.common.constant import sentence_delimiters, allow_speech_tags
from knlp.information_extract.keywords_extraction.textrank import TextRank
from knlp.utils.util import get_default_stop_words_file, AttrDict
class TextRank4Keyword(TextRank):
"""
这个函数实现了利用Text rank算法来实现关键词提取的功能。
基础的思路就是先分词,然后计算每个词语的权重,最后按照顺序排列,得到重要性
暂时不考虑英文的需求
介绍请见 https://www.jiqizhixin.com/articles/2018-12-28-18
ref https://github.com/letiantian/TextRank4ZH/blob/master/textrank4zh/
"""
def __init__(self, stop_words_file=get_default_stop_words_file(), private_vocab=None,
allow_speech_tags=allow_speech_tags,
delimiters="|".join(sentence_delimiters)):
"""
Args:
stop_words_file: 停用词的文件路径
label_set:
allow_speech_tags: 要保留的词性
delimiters: 默认值是`?!;?!。;…\n`,用来将文本拆分为句子。
"""
super().__init__(stop_words_file=stop_words_file, private_vocab=private_vocab,
allow_speech_tags=allow_speech_tags,
delimiters=delimiters)
def get_keywords(self, num=6, window=2, word_min_len=1, page_rank_config={'alpha': 0.85, }):
"""
获取最重要的num个长度大于等于word_min_len的关键词。
Args:
num:
window:
word_min_len:
page_rank_config:
Returns: 关键词列表。AttriDict {}
"""
# 获取按照text rank的方式得到的关键词,并排序
keywords = self.sort_words(self._vertex_source, self._edge_source, window=window,
page_rank_config=page_rank_config)
result = []
count = 0
for item in keywords:
if count >= num:
break
if len(item.word) >= word_min_len:
result.append(item)
count += 1
return result
def get_keyphrases(self, keywords_num=12, min_occur_num=2):
"""
获取关键短语。
获取 keywords_num 个关键词构造的可能出现的短语,要求这个短语在原文本中至少出现的次数为min_occur_num。
使用有限的keywords_num 个关键词来构造短语
Args:
keywords_num: 关键词的个数
min_occur_num: 最少出现次数
Returns: 关键短语的列表。
"""
keywords_set = set([item.word for item in self.get_keywords(num=keywords_num, word_min_len=1)])
keyphrases = set()
for sentence in self.words_no_filter:
one = []
for word in sentence:
if word in keywords_set:
one.append(word)
else:
if len(one) > 1:
keyphrases.add(''.join(one)) # concat在一起
if len(one) == 0:
continue
else:
one = []
# 兜底
if len(one) > 1:
keyphrases.add(''.join(one))
return [phrase for phrase in keyphrases
if self.text.count(phrase) >= min_occur_num or phrase in self.label_set]
@staticmethod
def sort_words(vertex_source, edge_source, window=2, page_rank_config=None):
"""
将单词按关键程度从大到小排序
Args:
vertex_source: 二维列表,子列表代表句子,子列表的元素是单词,这些单词用来构造pagerank中的节点
edge_source: 二维列表,子列表代表句子,子列表的元素是单词,根据单词位置关系构造pagerank中的边
window: 一个句子中相邻的window个单词,两两之间认为有边
page_rank_config: pagerank的设置
Returns:
"""
page_rank_config = {'alpha': 0.85, } if not page_rank_config else page_rank_config
sorted_words = []
word_index = {}
index_word = {}
_vertex_source = vertex_source
_edge_source = edge_source
words_number = 0
for word_list in _vertex_source:
for word in word_list:
if word not in word_index:
word_index[word] = words_number
index_word[words_number] = word
# MAP WORD TO AN INDEX
words_number += 1
graph = np.zeros((words_number, words_number)) # words_number X words_number MATRIX
def combine(word_list, window=2):
"""
构造在window下的单词组合,用来构造单词之间的边。
Args:
word_list: list of str, 由单词组成的列表。
window: int, 窗口大小。
Returns:
"""
if window < 2:
window = 2
for x in range(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
for word_list in _edge_source:
for w1, w2 in combine(word_list, window):
if w1 in word_index and w2 in word_index:
index1 = word_index[w1]
index2 = word_index[w2]
# 有链接的位置 = 1。0
graph[index1][index2] = 1.0
graph[index2][index1] = 1.0
nx_graph = nx.from_numpy_matrix(graph)
scores = nx.pagerank(nx_graph, **page_rank_config) # this is a dict DIRECTLY GET THE SCORE FOR ALL THIS MATRIX
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
for index, score in sorted_scores:
item = AttrDict(word=index_word[index], weight=score)
sorted_words.append(item)
return sorted_words
if __name__ == '__main__':
text = "测试分词的结果是否符合预期"
window = 5
num = 20
word_min_len = 2
need_key_phrase = True
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True)
output = {"key_words": [], "key_phrase": []}
res_keywords = tr4w.get_keywords(num=num, word_min_len=word_min_len, window=window)
for item in res_keywords:
kw_count = tr4w.text.count(item.word)
output["key_words"].append([item.word, item.weight, kw_count])
if need_key_phrase:
for phrase in tr4w.get_keyphrases(keywords_num=10, min_occur_num=2):
output['key_phrase'].append(phrase)
print(output)
| 2.671875 | 3 |
comprehend_groundtruth_integration/src/comprehend_customer_scripts/GroundTruth/DocumentClassifier/groundtruth_format_conversion_handler.py | rpivo/amazon-comprehend-examples | 16 | 12792854 | <reponame>rpivo/amazon-comprehend-examples
import json
import argparse
from urllib.parse import urlparse
from groundtruth_to_comprehend_clr_format_converter import GroundTruthToComprehendCLRFormatConverter
class GroundTruthToCLRFormatConversionHandler:
def __init__(self):
self.convert_object = GroundTruthToComprehendCLRFormatConverter()
self.dataset_filename = ""
def validate_s3_input(self, args):
dataset_output_S3Uri = args.dataset_output_S3Uri
dataset_url = urlparse(dataset_output_S3Uri)
dataset_scheme = dataset_url.scheme
self.dataset_filename = dataset_url.path.split("/")[-1]
print(self.dataset_filename)
if dataset_scheme != "s3" or self.dataset_filename.split(".")[-1] != "csv":
raise Exception("Either of the output S3 lo cation provided is incorrect!")
def read_write_multiclass_dataset(self):
with open('output.manifest', 'r', encoding='utf-8') as groundtruth_output_file, \
open(self.dataset_filename, 'a', encoding='utf8') as multiclass_dataset:
for index, jsonLine in enumerate(groundtruth_output_file):
class_name, source = self.convert_object.convert_to_multiclass_dataset(index, jsonLine)
source = json.dumps(source).strip('"')
multiclass_dataset.write(class_name + ',"' + source + '"')
multiclass_dataset.write("\n")
def read_write_multilabel_dataset(self, label_delimiter):
with open('output.manifest', 'r', encoding='utf-8') as groundtruth_output_file, \
open(self.dataset_filename, 'a', encoding='utf8') as multilabel_dataset:
for index, jsonLine in enumerate(groundtruth_output_file):
labels, source = self.convert_object.convert_to_multilabel_dataset(index, jsonLine, label_delimiter)
source = json.dumps(source).strip('"')
multilabel_dataset.write(labels + ',"' + source + '"')
multilabel_dataset.write("\n")
def main():
parser = argparse.ArgumentParser(description="Parsing the output S3Uri")
parser.add_argument('mode')
parser.add_argument('dataset_output_S3Uri')
parser.add_argument('label_delimiter')
args = parser.parse_args()
handler = GroundTruthToCLRFormatConversionHandler()
handler.validate_s3_input(args)
if args.mode == "MULTI_CLASS":
handler.read_write_multiclass_dataset()
elif args.mode == "MULTI_LABEL":
handler.read_write_multilabel_dataset(args.label_delimiter)
else:
raise Exception("The value provided for mode is invalid. Valid values are MUTLI_CLASS|MULTI_LABEL")
if __name__ == "__main__":
main()
| 3.15625 | 3 |
0001.Two Sum/solution.py | zhlinh/leetcode | 0 | 12792855 | <filename>0001.Two Sum/solution.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-01-04
Last_modify: 2016-09-02
******************************************
'''
'''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
UPDATE (2016/2/13):
The return format had been changed to zero-based indices.
Please read the above updated description carefully.
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for i, v in enumerate(nums):
if target-v in d:
return [d[target-v], i]
d[v] = i
| 3.90625 | 4 |
core/management/commands/constants.py | daichi-yoshikawa/django-boilerplate | 4 | 12792856 | <reponame>daichi-yoshikawa/django-boilerplate<filename>core/management/commands/constants.py<gh_stars>1-10
from api.common import constants
N_USERS = 11
N_TENANTS = 5
ADMIN = constants.TENANT_USER_ROLE_TYPE.ADMIN.value
GENERAL = constants.TENANT_USER_ROLE_TYPE.GENERAL.value
TENANT_USERS = {
1: (1, 1, ADMIN), # tenant_user_id: user_id, tenant_id, role_type
2: (1, 2, GENERAL),
4: (3, 1, GENERAL),
3: (2, 2, ADMIN),
5: (3, 3, ADMIN),
6: (4, 1, ADMIN),
7: (5, 2, GENERAL),
8: (6, 3, GENERAL),
9: (7, 1, GENERAL),
10: (8, 2, GENERAL),
11: (9, 3, GENERAL),
12: (10, 1, GENERAL),
}
| 1.6875 | 2 |
pendulum_environment.py | Gjain234/AdaptiveQLearning | 1 | 12792857 | <reponame>Gjain234/AdaptiveQLearning
import gym
import numpy as np
from agents import pendulum_agent
from eNet_Agent import eNet
from eNet_Agent import eNet_Discount
from eNet_Agent import eNetPendulum
from src import environment
from src import experiment
from src import agent
import pickle
''' Defining parameters to be used in the experiment'''
epLen = 200
nEps = 2000
numIters = 50
env = environment.make_pendulumEnvironment(epLen, False)
##### PARAMETER TUNING FOR AMBULANCE ENVIRONMENT
scaling_list = [0.04, 0.035, 0.045]
# scaling_list = [5, 0.6]
#scaling_list = [0.04, 0.5]
epsilon_list = [0.1]
# scaling_list = [0.5, .01] # alpha = 1
# scaling_list = [1, .4] # alpha = 0
# scaling_list = [0.5, 0.01] # alpha = 0.25
max_reward_adapt = 0
max_reward_e_net = 0
opt_adapt_scaling = 0.01
opt_e_net_scaling = 0.01
count = 0
# TRYING OUT EACH SCALING FACTOR FOR OPTIMAL ONE
for scaling in scaling_list:
for epsilon in epsilon_list:
experiment_dict = {'seed': 1, 'epFreq' : 1, 'targetPath': './tmp.csv', 'deBug' : False, 'nEps': nEps, 'recFreq' : 10, 'numIters' : numIters}
#
# RUNNING EXPERIMENT FOR ADAPTIVE ALGORITHM
agent_list_adap = []
for _ in range(numIters):
agent_list_adap.append(pendulum_agent.PendulumAgent(epLen, nEps, scaling, 0.995))
exp = experiment.Experiment(env, agent_list_adap, experiment_dict)
adap_fig = exp.run()
dt_adapt_data = exp.save_data()
if (dt_adapt_data.groupby(['episode']).mean().tail(1))['epReward'].iloc[0] > max_reward_adapt:
max_reward_adapt = (dt_adapt_data.groupby(['episode']).mean().tail(1))['epReward'].iloc[0]
opt_adapt_scaling = scaling
dt_adapt = dt_adapt_data
opt_adapt_agent_list = agent_list_adap
del agent_list_adap
del dt_adapt_data
# RUNNING EXPERIMENT FOR EPSILON NET ALGORITHM
# action_net = np.arange(start=0, stop=1, step=epsilon)
# state_net = np.arange(start=0, stop=1, step=epsilon)
#
# agent_list = []
# for _ in range(numIters):
# agent_list.append(eNet_Discount(action_net, state_net, 0.99, scaling, (3,1)))
#
# exp = experiment.Experiment(env, agent_list, experiment_dict, save=True)
# exp.run()
# dt_net_data = exp.save_data()
#
# curr_reward = (dt_net_data.groupby(['episode']).mean().tail(1))['epReward'].iloc[0]
# if curr_reward > max_reward_e_net:
# max_reward_e_net = curr_reward
# opt_e_net_scaling = scaling
# opt_epsilon_scaling = epsilon
# dt_net = dt_net_data
#
# del agent_list
# del dt_net_data
#print(opt_adapt_scaling)
#print(opt_epsilon_scaling)
#print(opt_e_net_scaling)
# SAVING DATA TO CSV
#dt_adapt.to_csv('pendulum_adapt.csv')
#dt_net.to_csv('pendulum_net_1.csv') | 2.53125 | 3 |
centinel/unit_test/test_http.py | mikiec84/centinel | 29 | 12792858 | import pytest
import os
from ..primitives import http
class TestHTTPMethods:
def test_url_not_exist(self):
"""
test if _get_http_request(args...) returns failure
for an invalid url.
"""
file_name = "data/invalid_hosts.txt"
fd = open(file_name, 'r')
for line in fd:
line = line.rstrip('\n')
res = http._get_http_request(line)
assert res is not None
assert 'failure' in res['response'].keys()
fd.close()
def test_url_exist(self):
"""
test if _get_http_request(args..) returns valid contents from a
valid url.
"""
file_name = "data/valid_hosts.txt"
fd = open(file_name, 'r')
for line in fd:
line = line.rstrip('\n')
res = http._get_http_request(line)
assert res is not None
assert 'failure' not in res['response'].keys()
fd.close()
def test_batch_url_invalid_hosts(self):
"""
test _get_http_request(arg...) primitive when a list of invaid domain
name is passed to get_requests_batch(args...).
"""
invalid_hosts_file_name = "data/invalid_hosts.txt"
fd = open(invalid_hosts_file_name, 'r')
lines = [line.rstrip('\n') for line in fd]
results = http.get_requests_batch(lines)
assert results is not None
# assert failure for inValid Hosts
for key, result in results.items():
assert result is not None
assert 'failure' in result['response'].keys()
fd.close()
def test_batch_url_valid_hosts(self):
"""
test _get_http_request(arg...) primitive when a list of valid domain
name is passed to get_requests_batch(args...).
"""
valid_hosts_file_name = "data/valid_hosts.txt"
fd = open(valid_hosts_file_name, 'r')
lines = [line.rstrip('\n') for line in fd]
results = http.get_requests_batch(lines)
assert results is not None
# assert no failure for valid hosts
for key,result in results.items():
assert result is not None
assert 'failure' not in result['response'].keys()
fd.close()
def test_batch_url_thread_error(self):
"""
test if thread takes long time to finish
TODO: choose url that gives thread error
"""
#file_name = "data/input_file.txt"
#fd = open(file_name, 'r')
#lines = [line.rstrip('\n') for line in fd]
#result = http.get_requests_batch(lines)
#assert result is not None
#assert 'error' in result
#assert result['error'] is "Threads took too long to finish."
#fd.close() | 2.9375 | 3 |
demo_video.py | lippman1125/pytorch_FAN | 58 | 12792859 | <reponame>lippman1125/pytorch_FAN
import torch
import torchvision.transforms as transforms
import numpy as np
import cv2
import copy
import sys
from utils.imutils import *
from utils.transforms import *
from datasets import W300LP, VW300, AFLW2000, LS3DW
import models
from models.fan_model import FAN
from utils.evaluation import get_preds, final_preds
from faceboxes import face_detector_init, detect
CHECKPOINT_PATH = "./checkpoint/fan3d_wo_norm_att/model_best.pth.tar"
# flag of saving pics to gen gif
SAVE = False
SAVE_DIR = "./save_pics"
if len(sys.argv) < 2:
print("please specify run model...")
exit(0)
model_names = sorted(
name for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name]))
print(model_names)
model = FAN(2)
if sys.argv[1] == "cpu":
model_dict = model.state_dict()
checkpoint = torch.load(CHECKPOINT_PATH, map_location=lambda storage, loc: storage)['state_dict']
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
else:
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(CHECKPOINT_PATH)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
proto = "faceboxes_deploy.prototxt"
mdl = "faceboxes_iter_120000.caffemodel"
face_detector = face_detector_init(proto, mdl)
if SAVE == True:
if not os.path.exists(SAVE_DIR):
os.mkdir(SAVE_DIR)
count = 0
reference_scale = 200
cap = cv2.VideoCapture(0)
while True:
_, img_ori = cap.read()
# rects = face_detector(img_ori, 1)
rects = detect(img_ori, face_detector)
if len(rects) == 0:
continue
print(rects)
for rect in rects:
d = [rect.left() - 10, rect.top() - 10, rect.right() + 10, rect.bottom() + 10]
# d = [rect.left() , rect.top() , rect.right() , rect.bottom()]
center = torch.FloatTensor([d[2] - (d[2] - d[0]) / 2.0, d[3] - (d[3] - d[1]) / 2.0])
# center[1] = center[1] + (d[3] - d[1]) * 0.12
hw = max(d[2] - d[0], d[3] - d[1])
scale = float(hw / reference_scale)
# print(scale)
img_chn = copy.deepcopy(img_ori[:,:,::-1])
img_trans = np.transpose(img_chn, (2,0,1))
inp = crop(img_trans, center, scale)
inp.unsqueeze_(0)
output = model(inp)
if sys.argv[1] == "cpu":
score_map = output[-1].data
else:
score_map = output[-1].data.cpu()
pts_img = final_preds(score_map, [center], [scale], [64, 64])
# print(pts_img)
pts_img = np.squeeze(pts_img.numpy())
# print(pts_img)
for i in range(pts_img.shape[0]):
pts = pts_img[i]
cv2.circle(img_ori, (pts[0], pts[1]), 2, (0, 255, 0), -1, 2)
cv2.rectangle(img_ori, (d[0], d[1]), (d[2], d[3]), (255, 255, 255))
cv2.imshow("landmark", img_ori)
if SAVE == True:
cv2.imwrite(os.path.join(SAVE_DIR, "image_{}.jpg".format(count)), img_ori)
cv2.waitKey(1)
count += 1 | 2 | 2 |
krobot/captcha.py | rbardenet/krobot | 1 | 12792860 | import os,sys
from PIL import Image
import numpy
LETTER_NB = 5
LETTER_SPACE = 1
LETTER_SIZE = 8
LETTER_LEFT = 10
LETTER_RIGHT = 16
class CaptchaReader(object):
"""docstring for CaptchaReader"""
def __init__(self, folderDico):
super(CaptchaReader, self).__init__()
self.folderDico = folderDico + "/"
def read(self, filename):
# Extract symbol from targetted captcha
symb_extractor = captchaSymbolExtractor()
listSymb = symb_extractor.extractSymbol(filename)
cap_string = ""
nb_unread = 0
for symb in listSymb:
succes = False
for f in os.listdir(self.folderDico):
if f.endswith(".png"):
pil_image = Image.open(self.folderDico + f)
dic_symb = numpy.array(pil_image)
if self.compare(symb, dic_symb):
succes = True
if f[0].isdigit():
cap_string += f[0]
else:
cap_string += f[3]
break
if not succes:
# If you go there, then the symbol has not been read
Image.fromarray(symb).save("error/symb" + str(nb_unread) + ".png")
nb_unread += 1
#return the string
return cap_string
def compare(self, symb_np, im_dic):
#print symb_np
return numpy.array_equal(symb_np, im_dic/255)
class captchaSymbolExtractor(object):
"""docstring for captchaSymbolExtractor"""
def __init__(self):
super(captchaSymbolExtractor, self).__init__()
def extractSymbol(self, filename):
# mat_pix is a numpy array
mat_pix = self.openImage(filename)
list_im = []
for i in range(5):
left = LETTER_LEFT + i * (LETTER_SIZE + LETTER_SPACE)
right = LETTER_LEFT + (i + 1) * (LETTER_SIZE + LETTER_SPACE) - 1
symb = mat_pix[6:19, left:right]
list_im.append(symb)
im = Image.fromarray(symb*255)
im = im.convert('1')
return list_im
def openImage(self, filename):
pil_image = Image.open(filename)
return numpy.array(pil_image) | 2.765625 | 3 |
Assignment_1/git/CSL622/rankingNodes.py | atlkdr/Social_Networks | 0 | 12792861 | <filename>Assignment_1/git/CSL622/rankingNodes.py
"""
@author: <NAME>, <NAME>, <NAME>
"""
import networkx as netx
import matplotlib.pyplot as plt
#find adjacent_nodes
def adj_nodes(G):
list_nodes_info = []
node_mapped = []
i = 0
for u in G.nodes():
list_nodes_info.append([])
list_adjNodes = []
count = 0
for v in G.nodes():
if G.has_edge(u,v)==True:
count = count+1
list_adjNodes.append(int(v))
list_nodes_info[i].append(int(u))
list_nodes_info[i].append(count)
list_nodes_info[i].append(list_adjNodes)
node_mapped.insert(i,int(u))
i = i+1
return(list_nodes_info,node_mapped)
#returns the page ranking factor
def page_rank(G,list_nodes_info,node_mapped):
initial_pageRank = []
processed_pageRank = []
for i in range(len(list_nodes_info)):
initial_pageRank.insert(i,1/len(list_nodes_info))
while(len(set(initial_pageRank))!=len(initial_pageRank)):
for i in range(len(list_nodes_info)):
cumPR = 0
for j in range(len(list_nodes_info[i][2])):
node_index = node_mapped.index(list_nodes_info[i][2][j])
cumPR = cumPR + (initial_pageRank[node_index]/list_nodes_info[node_index][1])
processed_pageRank.insert(i,cumPR)
initial_pageRank = processed_pageRank
return(initial_pageRank)
#sorts the page fanking factors and then assign ranking to each nodes base on its importance
def ranking_assign(G,pageRank_factor,list_nodes_info):
nodes_with_ranks = []
for i in range(len(G.nodes())):
nodes_with_ranks.insert(i,[pageRank_factor[i],list_nodes_info[i][0]])
nodes_with_ranks.sort(reverse=True)
ranking = []
for i in (range(len(G.nodes()))):
ranking.append([i+1,list_nodes_info[i][0]])
return(ranking)
G = netx.read_edgelist(r"pagerank.txt",create_using=netx.DiGraph())
netx.draw_spring(G,with_labels=1,node_size=200,font_size=12)
plt.show()
list_nodes_info,node_mapped = adj_nodes(G)
pageRank_factor = page_rank(G,list_nodes_info,node_mapped)
rank = ranking_assign(G,pageRank_factor,list_nodes_info)
#prints the nodes along with ranking
for i in range(len(G.nodes())):
print("Node: " + str(rank[i][0]) + " \tRank: " + str(rank[i][1])) | 3.109375 | 3 |
tests/test_wificontrol.py | TopperBG/pywificontrol | 115 | 12792862 | <filename>tests/test_wificontrol.py
# Written by <NAME> and <NAME> <<EMAIL>>
#
# Copyright (c) 2016, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import mock
from wificontrol import WiFiControl
@pytest.fixture
def ssid():
network = {
'ssid': 'Test'
}
return network
class FakeWiFiControl(WiFiControl):
def __init__(self):
self.wifi = mock.MagicMock()
self.wpasupplicant = mock.MagicMock()
self.hotspot = mock.MagicMock()
class TestWiFiControl:
def setup_method(self):
self.manager = FakeWiFiControl()
def test_host_mode(self):
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.start_host_mode()
assert self.manager.wpasupplicant.stop.call_count == 1
assert self.manager.hotspot.started.call_count == 1
assert self.manager.hotspot.start.call_count == 1
def test_client_mode(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.start_client_mode()
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
def test_wifi_turn_on(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.turn_on_wifi()
assert self.manager.wifi.unblock.call_count == 1
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
self.manager.wpasupplicant.started.return_value = True
assert self.manager.get_wifi_turned_on() is True
def test_wifi_turn_off(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=True)
self.manager.hotspot.started = mock.Mock(return_value=False)
self.manager.turn_off_wifi()
assert self.manager.wifi.block.call_count == 1
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.stop.call_count == 1
self.manager.wpasupplicant.started.return_value = False
assert self.manager.get_wifi_turned_on() is False
def test_wifi_turn_on_if_wifi_is_on(self):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.turn_on_wifi()
assert self.manager.wifi.unblock.call_count == 0
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.hotspot.started.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 0
assert self.manager.hotspot.start.call_count == 0
def test_network_add(self, ssid):
self.manager.add_network(ssid)
assert self.manager.wpasupplicant.add_network.is_called_once_with(ssid)
def test_network_remove(self, ssid):
self.manager.remove_network(ssid)
assert self.manager.wpasupplicant.remove_network.is_called_once_with(ssid)
def test_status_get(self, ssid):
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.hotspot.started = mock.Mock(return_value=True)
state, status = self.manager.get_status()
assert state == self.manager.HOST_STATE
assert status is None
self.manager.wpasupplicant.started.return_value = True
self.manager.hotspot.started.return_value = False
self.manager.wpasupplicant.get_status = mock.Mock(return_value=ssid)
state, status = self.manager.get_status()
assert state == self.manager.WPA_STATE
assert status == ssid
def test_start_connection(self, ssid):
def start_connecting(*args):
self.manager.hotspot.started.return_value = False
self.manager.revert_on_connect_failure(result=None)
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.wpasupplicant.start_connecting.side_effect = start_connecting
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.start_connecting(ssid)
assert self.manager.wpasupplicant.started.call_count == 1
assert self.manager.hotspot.stop.call_count == 1
assert self.manager.wpasupplicant.start.call_count == 1
args = (ssid, self.manager.revert_on_connect_failure, None, 10)
assert self.manager.wpasupplicant.start_connecting.is_called_once_with(args)
assert self.manager.hotspot.started.call_count == 1
assert self.manager.wpasupplicant.stop.call_count == 1
assert self.manager.hotspot.start.call_count == 1
def test_reconnection(self, ssid):
def start_connecting(result, callback, args, timeout):
self.manager.hotspot.started.return_value = False
if args:
callback({}, *args)
else:
callback(result)
self.manager.wpasupplicant.started = mock.Mock(return_value=False)
self.manager.wpasupplicant.start_connecting.side_effect = start_connecting
self.manager.hotspot.started = mock.Mock(return_value=True)
self.manager.start_connecting(ssid, callback=self.manager.reconnect,
args=(ssid,))
assert self.manager.wpasupplicant.start_connecting.call_count == 2
def test_supplicant_functions(self):
self.manager.scan()
assert self.manager.wpasupplicant.scan.call_count == 1
self.manager.get_scan_results()
assert self.manager.wpasupplicant.get_scan_results.call_count == 1
self.manager.get_added_networks()
assert self.manager.wpasupplicant.get_added_networks.call_count == 1
self.manager.get_ip()
assert self.manager.wifi.get_device_ip.call_count == 1
self.manager.stop_connecting()
assert self.manager.wpasupplicant.stop_connecting.call_count == 1
self.manager.disconnect()
assert self.manager.wpasupplicant.disconnect.call_count == 1
self.manager.get_device_name()
assert self.manager.hotspot.get_host_name.call_count == 1
self.manager.get_hostap_name()
assert self.manager.hotspot.get_hostap_name.call_count == 1
name = 'test'
self.manager.set_device_names(name)
assert self.manager.wpasupplicant.set_p2p_name.call_count == 1
assert self.manager.wpasupplicant.set_p2p_name.is_called_once_with(name)
assert self.manager.hotspot.set_hostap_name.call_count == 1
assert self.manager.hotspot.set_hostap_name.is_called_once_with(name)
assert self.manager.hotspot.set_host_name.call_count == 1
assert self.manager.hotspot.set_host_name.is_called_once_with(name)
assert self.manager.wifi.restart_dns.call_count == 1
self.manager.set_hostap_password(name)
assert self.manager.hotspot.set_hostap_password.is_called_once_with(name)
def test_verify_names(self):
name = 'test'
mac_addr = '11:22:33:44:55:66'
self.manager.hotspot.get_host_name.return_value = name
self.manager.wpasupplicant.get_p2p_name.return_value = name
self.manager.hotspot.get_hostap_name.return_value = "{}{}".format(name, mac_addr[-6:])
self.manager.hotspot.get_device_mac.return_value = mac_addr[-6:]
assert self.manager.verify_hostap_name(name)
assert self.manager.verify_device_names(name)
assert self.manager.hotspot.get_host_name.call_count == 1
assert self.manager.wpasupplicant.get_p2p_name.call_count == 1
| 1.84375 | 2 |
app/main/doc.py | hezmondo/lulu | 0 | 12792863 | <reponame>hezmondo/lulu
import imghdr
import os
import re
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from xhtml2pdf import pisa
from app import db
from flask import current_app, flash, request
from werkzeug.utils import secure_filename
from app.main.common import Attachment, mget_filestore_dir, mget_tempfile_dir, mupdate_filename_suffix
from app.models import DigFile, DigStore, DocFile, DocStore, FileStore, Rent
from app.dao.doc import dig_filename_already_exists, dig_store_filename_already_exists, filename_already_exists, \
get_docfile_combo_meta, get_docfiles_combo, get_docfile_row, get_docfiles, get_digfile_object, \
get_digfile_row, get_docfiles_text, get_file_store_row, post_docfile, upload_docfile, \
get_digfiles_for_rent, get_docfile_object, get_docfile_objects, get_file_store_files_for_rent
from app.dao.payrequest import get_pr_history_row, get_pr_history
from app.dao.rent import get_rentcode
from base64 import b64encode
def madd_digfile_properties(digfile, doc_form_data):
digfile.rent_id = doc_form_data.get('rent_id')
digfile.summary = doc_form_data.get('summary')
return digfile
def madd_docfile_properties(docfile, doc_form_data, is_draft=False):
docfile.rent_id = doc_form_data.get('rent_id')
docfile.summary = doc_form_data.get('summary') if not is_draft else 'draft-doc: ' + request.form.get(
'summary')[0:75]
docfile.doc_text = doc_form_data.get('doc_text')
docfile.combined = doc_form_data.get('combined')
return docfile
def allowed_filetypes():
return ['.pdf', '.doc', '.docx', '.ods', '.odt', '.jpg', '.png', '.gif']
def append_file_form_time_data(docfile, doc_form_data):
doc_time = docfile.time_date.time()
time_date = doc_form_data.get('doc_date') + \
timedelta(hours=doc_time.hour, minutes=doc_time.minute, seconds=doc_time.second)
docfile.time_date = time_date
return docfile
def append_digfile_form_data(digfile):
dig_form_data = collect_dig_form_data()
digfile = append_file_form_time_data(digfile, dig_form_data)
return madd_digfile_properties(digfile, dig_form_data)
def append_docfile_form_data(docfile, is_draft=False):
doc_form_data = collect_doc_form_data()
docfile = append_file_form_time_data(docfile, doc_form_data)
return madd_docfile_properties(docfile, doc_form_data, is_draft)
def append_filestore_form_data(filestore):
filestore_form_data = collect_dig_form_data()
filestore = append_file_form_time_data(filestore, filestore_form_data)
filestore.rent_id = filestore_form_data.get('rent_id')
return filestore
def create_attachment_as_temp(uploaded_file):
filename = secure_filename(uploaded_file.filename).lower()
uploaded_file.save(os.path.join(mget_tempfile_dir(), filename))
return Attachment(filename, os.path.join(mget_tempfile_dir(), filename), uploaded_file.content_type)
def create_attachment_save_to_dig(rent_id, file):
filename = secure_filename(file.filename).lower()
filepath = os.path.join(mget_tempfile_dir(), filename or 'attachment_new.pdf')
dig_id, notice = mpost_upload(rent_id, file)
dig_row = get_digfile_row(dig_id)
with open(filepath, 'wb') as file:
file.write(dig_row.dig_data)
return Attachment(dig_row.summary, filepath, 'application/pdf')
def create_attachment_save_to_store(rent_id, file):
file_id, notice = mupload_to_file_store(rent_id, file)
file_store_file = get_file_store_row(file_id)
filepath = os.path.join(mget_filestore_dir(), file_store_file.summary)
return Attachment(file_store_file.summary, filepath, 'application/pdf')
def collect_email_form():
attachment_1 = {'attach_id': request.form.get('attachment_1_id', 0, type=int),
'attach_name': request.form.get('attachment_1_name', '', type=str),
'attach_type': request.form.get('attachment_1_type', '', type=str)}
attachment_2 = {'attach_id': request.form.get('attachment_2_id', 0, type=int),
'attach_name': request.form.get('attachment_2_name', '', type=str),
'attach_type': request.form.get('attachment_2_type', '', type=str)}
attachment_3 = {'attach_id': request.form.get('attachment_3_id', 0, type=int),
'attach_name': request.form.get('attachment_3_name', '', type=str),
'attach_type': request.form.get('attachment_3_type', '', type=str)}
attachment_4 = {'attach_id': request.form.get('attachment_4_id', 0, type=int),
'attach_name': request.form.get('attachment_4_name', '', type=str),
'attach_type': request.form.get('attachment_4_type', '', type=str)}
attachment_5 = {'attach_id': request.form.get('attachment_5_id', 0, type=int),
'attach_name': request.form.get('attachment_5_name', '', type=str),
'attach_type': request.form.get('attachment_5_type', '', type=str)}
attachment_details = [attachment_1, attachment_2, attachment_3, attachment_4, attachment_5]
fdict = {'file_list': [request.files.get('uploadfile_1'),
request.files.get('uploadfile_2'),
request.files.get('uploadfile_3'),
request.files.get('uploadfile_4'),
request.files.get('uploadfile_5')],
'doc_id': request.form.get('doc_id', 0, int),
'recipients': request.form.get('email_to'),
'save_dig_tog': request.form.get('save_dig_tog'),
'subject': request.form.get('email_subject')}
return attachment_details, fdict
def collect_email_form_data(rent_id, is_draft=False):
attachment_details, fdict = collect_email_form()
attachments = []
file_list = fdict.get('file_list')
file_list = [file for file in file_list if file.filename]
if file_list:
if request.form.get('location_db'):
for file in file_list:
attachment = create_attachment_save_to_dig(rent_id, file)
attachments.append(attachment)
elif request.form.get('location_folder'):
for file in file_list:
if file.filename:
attachment = create_attachment_save_to_store(rent_id, file)
attachments.append(attachment)
else:
for file in file_list:
if file.filename:
attachment = create_attachment_as_temp(file)
attachments.append(attachment)
loop = 1
for attachment in attachment_details:
if attachment.get('attach_id') > 0:
attachments.append(mget_attachment(attachment.get('attach_id'), attachment.get('attach_name'),
attachment.get('attach_type'), 'attachment_' + str(loop)))
loop = loop + 1
doc_id = fdict.get('doc_id')
docfile = create_docfile(is_draft) if not doc_id else update_docfile(doc_id, is_draft)
appmail = current_app.extensions['mail']
recipients = fdict.get('recipients')
subject = fdict.get('subject')
return appmail, docfile, recipients, subject, attachments
def convert_html_to_pdf(source_html, output_filename):
# open output file for writing (truncated binary)
result_file = open(os.path.join(mget_tempfile_dir(), output_filename), "w+b")
# convert HTML to PDF
pisa_status = pisa.CreatePDF(
source_html, # the HTML to convert
dest=result_file) # file handle to receive result
# close output file
result_file.close() # close output file
# return False on success and True on errors
return pisa_status.err
def collect_dig_form_data():
rent_id = int(request.form.get('rent_id'))
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
summary = request.form.get('summary')[0:89]
return {'rent_id': rent_id,
'doc_date': doc_date,
'summary': summary}
def collect_doc_form_data():
rent_id = int(request.form.get('rent_id'))
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
doc_text = request.form.get('xinput').replace("£", "£")
summary = request.form.get('summary')[0:89]
combined = True if request.form.get('doc_combo_true') else False
return {'rent_id': rent_id,
'doc_date': doc_date,
'doc_text': doc_text,
'summary': summary,
'combined': combined}
def create_draft_email_docfile(doc_text, rent_id, summary):
time_date = datetime.now()
summary = 'draft-doc: ' + summary
docfile = post_docfile(time_date, doc_text, rent_id, summary)
return docfile.id
def create_docfile(is_draft=False):
doc_form_data = collect_doc_form_data()
docfile = DocFile()
time_now = datetime.now().time()
time_date = doc_form_data.get('doc_date') + \
timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
docfile.time_date = time_date
docfile = madd_docfile_properties(docfile, doc_form_data, is_draft)
return docfile
# def digfile_object_create(dig_id):
# # TODO: Refactor
# doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
# if dig_id == 0:
# digfile = DigStore()
# time_now = datetime.now().time()
# time_date = doc_date + timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
# else:
# digfile = get_digfile_object(dig_id)
# doc_time = digfile.time_date.time()
# time_date = doc_date + timedelta(hours=doc_time.hour, minutes=doc_time.minute, seconds=doc_time.second)
# digfile.object_id = request.form.get('object_id')
# digfile.object_type_id = request.form.get('object_type_id')
# digfile.time_date = time_date
# digfile.summary = request.form.get('summary')
# return digfile
# def mget_file_time_date(file_date, file=None):
# time = file.time_date.time() if file else datetime.now().time()
# time_date = file_date + timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
# return time_date
def docfile_object_create(doc_id, doc_dig):
# TODO: Refactor
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
if doc_id == 0:
docfile = DocStore() if doc_dig == 'doc' else DigStore
time_now = datetime.now().time()
time_date = doc_date + timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
else:
docfile = get_docfile_object(doc_id) if doc_dig == 'doc' else get_digfile_object(doc_id)
doc_time = docfile.time_date.time()
time_date = doc_date + timedelta(hours=doc_time.hour, minutes=doc_time.minute, seconds=doc_time.second)
docfile.object_id = request.form.get('object_id')
docfile.object_type_id = request.form.get('object_type_id')
docfile.time_date = time_date
if doc_dig == 'doc':
docfile.doc_text = request.form.get('xinput').replace("£", "£")
docfile.summary = request.form.get('summary')
return docfile
def mget_attachment(attach_id, attach_name, attach_type, filename, mime_type='application/pdf'):
filepath = os.path.join(mget_tempfile_dir(), filename)
if attach_type == 'doc':
attach_doc = get_docfile_row(attach_id)
convert_html_to_pdf(attach_doc.doc_text, filename)
elif attach_type == 'dig':
attach_dig = get_digfile_row(attach_id)
with open(filepath, 'wb') as file:
file.write(attach_dig.dig_data)
elif attach_type == 'pr':
attach_doc = get_pr_history_row(attach_id)
convert_html_to_pdf(attach_doc.block, filename)
else:
filepath = os.path.join(mget_filestore_dir(), attach_name)
return Attachment(attach_name, filepath, mime_type)
def mget_attachment_form():
attachments = []
attachment_1 = request.form.get('attachment_1', '', type=str)
if attachment_1:
attachment_1 = attachment_1.split('$')
attachment_1 = {'attach_id': attachment_1[0],
'attach_type': attachment_1[1],
'attach_name': attachment_1[2]}
attachments.append(attachment_1)
attachment_2 = request.form.get('attachment_2', '', type=str)
if attachment_2:
attachment_2 = attachment_2.split('$')
attachment_2 = {'attach_id': attachment_2[0],
'attach_type': attachment_2[1],
'attach_name': attachment_2[2]}
attachments.append(attachment_2)
attachment_3 = request.form.get('attachment_3', '', type=str)
if attachment_3:
attachment_3 = attachment_3.split('$')
attachment_3 = {'attach_id': attachment_3[0],
'attach_type': attachment_3[1],
'attach_name': attachment_3[2]}
attachments.append(attachment_3)
attachment_4 = request.form.get('attachment_4', '', type=str)
if attachment_4:
attachment_4 = attachment_4.split('$')
attachment_4 = {'attach_id': attachment_4[0],
'attach_type': attachment_4[1],
'attach_name': attachment_4[2]}
attachments.append(attachment_4)
attachment_5 = request.form.get('attachment_5', '', type=str)
if attachment_5:
attachment_5 = attachment_5.split('$')
attachment_5 = {'attach_id': attachment_5[0],
'attach_type': attachment_5[1],
'attach_name': attachment_5[2]}
attachments.append(attachment_5)
return attachments
def mget_docfile(doc_dig, doc_id):
if doc_dig == "doc":
docfile = get_docfile_row(doc_id)
if docfile.summary[-3:] == " in" or " in " in docfile.summary:
docfile.doc_type = "in"
elif docfile.summary[-4:] == " out" or " out " in docfile.summary:
docfile.doc_type = "out"
else:
docfile.doc_type = "info"
elif doc_dig == 'dig':
docfile = get_digfile_row(doc_id)
if any(x in docfile.summary for x in ['.png', '.jpg', '.jpeg', '.bmp']):
docfile.image = b64encode(docfile.dig_data).decode("utf-8")
elif '.pdf' in docfile.summary:
docfile.pdf = b64encode(docfile.dig_data).decode("utf-8")
else:
docfile = get_file_store_row(doc_id)
docfile.ext = os.path.splitext(docfile.summary)[1]
return docfile
def mget_new_docfile(rent_id):
docfile = DocFile()
docfile.id = 0
docfile.time_date = datetime.now()
docfile.rent_id = int(rent_id)
docfile.summary = "email in"
docfile.doc_type = "in"
docfile.doc_text = ""
docfile.combined = True
return docfile
def mget_docfile_combo_meta(rent_id):
docfile = get_docfile_combo_meta(rent_id)
if docfile:
docfile.doc_dig = 'thread'
docfile.summary = 'combined doc thread'
return docfile
def mget_docfiles_combo(rent_id):
docfiles = get_docfiles_combo(rent_id)
for docfile in docfiles:
if "in" in docfile.summary.split():
docfile.in_out = 'in'
else:
docfile.in_out = 'out'
return docfiles
def mget_docfiles(rent_id, action=''):
digfile_filter = []
# # filter out draft docs
# docfile_filter = [(DocFile.summary.notlike('%draft-doc: %'))]
docfile_filter = []
file_store_filter = []
fdict = {'dfountin': 'all'}
if request.method == "POST":
fdict = mget_fdict(action)
if fdict.get('rentcode'):
digfile_filter.append(Rent.rentcode.ilike('%{}%'.format(fdict.get('rentcode'))))
docfile_filter.append(Rent.rentcode.ilike('%{}%'.format(fdict.get('rentcode'))))
file_store_filter.append(Rent.rentcode.ilike('%{}%'.format(fdict.get('rentcode'))))
if fdict.get('summary'):
digfile_filter.append(DigFile.summary.ilike('%{}%'.format(fdict.get('summary'))))
docfile_filter.append(DocFile.summary.ilike('%{}%'.format(fdict.get('summary'))))
file_store_filter.append(FileStore.summary.ilike('%{}%'.format(fdict.get('summary'))))
if fdict.get('doc_text'):
docfile_filter.append(DocFile.doc_text.ilike('%{}%'.format(fdict.get('doc_text'))))
elif action == 'attach' or rent_id > 0:
digfile_filter.append(DigFile.rent_id == rent_id)
docfile_filter.append(DocFile.rent_id == rent_id)
file_store_filter.append(FileStore.rent_id == rent_id)
docfiles, digfiles, file_store_files = get_docfiles(docfile_filter, digfile_filter, file_store_filter)
for docfile in docfiles:
docfile.doc_dig = 'doc'
for digfile in digfiles:
digfile.doc_dig = 'dig'
for file in file_store_files:
file.doc_dig = 'file'
files = docfiles + digfiles + file_store_files
results = sorted(files, key=lambda r: r.time_date, reverse=True)
return results, fdict
def mget_doc_and_digfiles(rent_id):
docfiles = get_docfiles_text(rent_id)
digfiles = get_digfiles_for_rent(rent_id)
file_store_files = get_file_store_files_for_rent(rent_id)
digfiles, file_store_files = mget_dig_and_file_store_data(digfiles,file_store_files)
files = docfiles + digfiles + file_store_files
results = sorted(files, key=lambda r: r.time_date, reverse=True)
return results
def mget_dig_and_file_store_data(digfiles, file_store_files):
for digfile in digfiles:
if any(x in digfile.summary for x in ['.png', '.jpg', '.jpeg', '.bmp']):
digfile.image = b64encode(digfile.dig_data).decode("utf-8")
elif '.pdf' in digfile.summary:
digfile.pdf = b64encode(digfile.dig_data).decode("utf-8")
for file_store_file in file_store_files:
file_store_file.ext = os.path.splitext(file_store_file.summary)[1]
return digfiles, file_store_files
def mget_digfile_object(doc_object_id):
# TODO: refactor same code as rent digfile getter
digfile = get_digfile_object(doc_object_id)
if any(x in digfile.summary for x in ['.png', '.jpg', '.jpeg', '.bmp']):
digfile.image = b64encode(digfile.dig_data).decode("utf-8")
elif '.pdf' in digfile.summary:
digfile.pdf = b64encode(digfile.dig_data).decode("utf-8")
return digfile
def mget_docfile_object(doc_object_id):
return get_docfile_object(doc_object_id)
def mget_docfile_objects(object_id, object_type_id):
return get_docfile_objects(object_id, object_type_id)
def mget_fdict(action=''):
fdict = {'rentcode': request.form.get("rentcode") or "",
'summary': request.form.get("summary") or "",
'doc_text': request.form.get("doc_text") or ""}
if action == 'attach':
for i in range(1, 6):
name = 'attachment_' + str(i)
fdict[name] = request.form.get(name) or ''
return fdict
def mget_new_docfile_object(object_id, object_type_id):
docfile = DocStore()
docfile.time_date = datetime.now()
docfile.object_type_id = object_type_id
docfile.doc_text = 'Write or paste your document here...'
docfile.object_id = object_id
return docfile
def insert_prs_for_rent(docfiles, rent_id):
pay_requests = get_pr_history(rent_id)
for pay_request in pay_requests:
pay_request.time_date = pay_request.time_date
pay_request.typedoc = 2
pay_request.doc_dig = 'pr'
files = docfiles + pay_requests
return sorted(files, key=lambda r: r.time_date, reverse=True)
def mpost_docfile(doc_id, doc_dig):
if doc_id == 0:
docfile = create_docfile()
elif doc_dig == 'doc':
docfile = update_docfile(doc_id)
elif doc_dig == 'dig':
docfile = update_digfile(doc_id)
else:
docfile = update_filestore(doc_id)
upload_docfile(docfile)
return docfile.rent_id
# def mpost_digfile_object(dig_id):
# # TODO: Refactor
# digfile = digfile_object_create(dig_id)
# upload_docfile(digfile)
#
# return docfile.object_id, docfile.object_type_id
def mpost_docfile_object(doc_id, doc_dig):
# TODO: Refactor
docfile = docfile_object_create(doc_id, doc_dig)
upload_docfile(docfile)
return docfile.object_id, docfile.object_type_id
def mpost_upload(rent_id, uploaded_file):
# TODO: Refactor
# new digital file uses upload function
# rentcode = request.form.get("rentcode")
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
time_now = datetime.now().time()
time_date = doc_date + timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
filename = secure_filename(uploaded_file.filename).lower()
if filename != '':
notice = ''
file_ext = os.path.splitext(filename)[1]
if file_ext not in allowed_filetypes():
return None, "Invalid file suffix"
elif file_ext in ['.bmp', '.jpeg', '.jpg', '.png', '.gif'] and file_ext != validate_image(uploaded_file.stream):
return None, "Invalid image"
while dig_filename_already_exists(filename, rent_id):
filename = mupdate_filename_suffix(filename)
notice = 'File already exists. The new file has been renamed to ' + filename + '. '
digfile = DigFile()
digfile.time_date = time_date
digfile.summary = filename
digfile.dig_data = uploaded_file.read()
digfile.rent_id = rent_id
db.session.add(digfile)
db.session.flush()
dig_id = digfile.id
db.session.commit()
notice = notice + 'File saved successfully!'
return dig_id, notice
# else:
# flash('No filename!')
# return redirect(request.url)
def mpost_object_upload(object_id, object_type_id, uploaded_file):
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
time_now = datetime.now().time()
time_date = doc_date + timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
filename = secure_filename(uploaded_file.filename).lower()
if filename != '':
notice = ''
file_ext = os.path.splitext(filename)[1]
if file_ext not in allowed_filetypes():
return None, "Invalid file suffix"
elif file_ext in ['.bmp', '.jpeg', '.jpg', '.png', '.gif'] and file_ext != validate_image(uploaded_file.stream):
return None, "Invalid image"
while dig_store_filename_already_exists(filename, object_id, object_type_id):
filename = mupdate_filename_suffix(filename)
notice = 'File already exists. The new file has been renamed to ' + filename + '. '
digfile = DigStore()
digfile.time_date = time_date
digfile.summary = filename
digfile.dig_data = uploaded_file.read()
digfile.object_id = object_id
digfile.object_type_id = object_type_id
db.session.add(digfile)
db.session.flush()
dig_id = digfile.id
db.session.commit()
notice = notice + 'File saved successfully!'
return dig_id, notice
def prepare_draft_for_edit(docfile):
docfile.time_date = datetime.now()
docfile.summary = reset_draft_email_summary(docfile.summary)
soup = BeautifulSoup(docfile.doc_text, 'html.parser')
subject = str(soup.find(id='email_subject_span').text)
email_to = soup.find(id='email_to_span')
email_to = email_to.string
docfile.doc_text = str(soup)
return email_to, re.sub('\n', '', subject), docfile.summary
def rename_filestore_file(filestore, old_filename, new_filename):
src = os.path.join(mget_filestore_dir(), old_filename)
dst = os.path.join(mget_filestore_dir(), new_filename)
try:
os.rename(src, dst)
filestore.summary = new_filename
except Exception as ex:
flash(f'Cannot rename file. Error: {str(ex)}', 'error')
filestore.summary = old_filename
return filestore
def reset_draft_email_summary(summary):
summary = summary.replace('draft-doc: ', '')
summary = ' '.join([item for item in summary.split() if '@' not in item])
if summary[-3:] == ' to':
summary = summary[0:-3]
return summary
def update_docfile(doc_id, is_draft=False):
docfile = get_docfile_row(doc_id)
docfile = append_docfile_form_data(docfile, is_draft)
return docfile
def update_digfile(doc_id):
digfile = get_digfile_row(doc_id)
digfile = append_digfile_form_data(digfile)
return digfile
def update_filestore(doc_id):
notice = ''
filestore = get_file_store_row(doc_id)
old_filename = filestore.summary
new_filename = request.form.get('summary')[0:89]
filestore = append_filestore_form_data(filestore)
if new_filename != old_filename:
while filename_already_exists(new_filename):
new_filename = mupdate_filename_suffix(new_filename)
notice = 'File already exists. The new file has been renamed to ' + new_filename + '. '
if notice:
flash(notice, 'message')
filestore = rename_filestore_file(filestore, old_filename, new_filename)
return filestore
def mupload_docfile_with_attach_info(attachments, docfile):
attachment_div = "<div id='attachments' style='font-size:10.5pt;'>Attachment(s):"
for attachment in attachments:
attachment_div = attachment_div + ' ' + attachment.file_name
attachment_div = attachment_div + "</div><br>"
html = attachment_div + docfile.doc_text
docfile.doc_text = html
upload_docfile(docfile)
def mupload_to_file_store(rent_id, uploaded_file):
notice = ''
doc_date = datetime.strptime(request.form.get('time_date'), '%Y-%m-%d')
time_now = datetime.now().time()
time_date = doc_date + timedelta(hours=time_now.hour, minutes=time_now.minute, seconds=time_now.second)
filename = get_rentcode(rent_id) + '-' + secure_filename(uploaded_file.filename).lower()
while filename_already_exists(filename):
filename = mupdate_filename_suffix(filename)
notice = 'File already exists. The new file has been renamed to ' + filename + '. '
uploaded_file.save(os.path.join(mget_filestore_dir(), filename))
file_store = FileStore()
file_store.time_date = time_date
file_store.summary = filename
file_store.rent_id = rent_id
db.session.add(file_store)
db.session.flush()
file_id = file_store.id
db.session.commit()
notice = notice + 'File saved successfully!'
return file_id, notice
def validate_image(stream):
header = stream.read(512)
stream.seek(0)
format = imghdr.what(None, header)
if not format:
return None
return '.' + (format if format != 'jpeg' else 'jpg')
| 1.851563 | 2 |
test/test_io.py | BioroboticsLab/deeppipeline | 4 | 12792864 | from pipeline.io import unique_id, video_generator
def test_video_generator_2015(bees_video, filelists_path):
gen = video_generator(bees_video, ts_format="2015", path_filelists=filelists_path)
results = list(gen)
assert len(results) == 3
prev_ts = 0.0
for _, _, ts in results:
assert ts > prev_ts
prev_ts = ts
def test_video_generator_2016(bees_video_2016):
gen = video_generator(bees_video_2016, ts_format="2016", path_filelists=None)
results = list(gen)
assert len(results) == 4
prev_ts = 0.0
for _, _, ts in results:
assert ts > prev_ts
prev_ts = ts
def test_unique_id():
first_id = unique_id()
second_id = unique_id()
assert first_id.bit_length() == 64
assert second_id.bit_length() == 64
assert first_id != second_id
| 2.1875 | 2 |
tests/test_encoding_validators/test_are_sources_in_utf.py | SerejkaSJ/fiasko_bro | 25 | 12792865 | <gh_stars>10-100
from fiasko_bro import defaults
from fiasko_bro.pre_validation_checks import file_not_in_utf8
def test_file_not_in_utf8_fail(encoding_repo_path):
directories_to_skip = defaults.VALIDATION_PARAMETERS['directories_to_skip']
output = file_not_in_utf8(encoding_repo_path, directories_to_skip)
assert isinstance(output, str)
def test_file_not_in_utf8_ok(general_repo_path):
directories_to_skip = defaults.VALIDATION_PARAMETERS['directories_to_skip']
output = file_not_in_utf8(general_repo_path, directories_to_skip)
assert output is None
def test_file_not_in_utf8_uses_whitelist(encoding_repo_path):
directories_to_skip = ['win1251']
output = file_not_in_utf8(encoding_repo_path, directories_to_skip)
assert output is None
| 2.234375 | 2 |
tests/test_ds_simple_db/test_serializers/test_table_serializer.py | dmitryshurov/simple_data_storage_library | 0 | 12792866 | <filename>tests/test_ds_simple_db/test_serializers/test_table_serializer.py
from unittest import TestCase
from ds_simple_db.core.entry import Entry
from ds_simple_db.serializers.table_serializer import TableSerializer
class TestTableSerializer(TestCase):
def test_entries_to_string_empty_entries_returns_empty_string(self):
entries = []
self.assertEqual(
'',
TableSerializer().entries_to_string(entries)
)
def test_row_separator(self):
self.assertEqual(
'+-----+-----+\n',
TableSerializer(row_width=5)._get_row_separator(num_cols=2)
)
self.assertEqual(
'+---+\n',
TableSerializer(row_width=3)._get_row_separator(num_cols=1)
)
def test_get_row_content(self):
values = ['col', 'value', 'col3']
self.assertEqual(
'| col |value|col3 |\n',
TableSerializer(row_width=5)._get_row_content(values)
)
self.assertEqual(
'|col|val|col|\n',
TableSerializer(row_width=3)._get_row_content(values)
)
def test_get_header(self):
cols = ['col', 'value', 'col3']
self.assertEqual(
'+-----+-----+-----+\n| col |value|col3 |\n+-----+-----+-----+\n',
TableSerializer(row_width=5)._get_header(cols)
)
def test_get_row(self):
cols = ['col', 'value', 'col3']
self.assertEqual(
'| col |value|col3 |\n+-----+-----+-----+\n',
TableSerializer(row_width=5)._get_row(cols)
)
def test_entries_to_string(self):
entries = [
Entry(data=dict(col='val', value='field', col3='val3')),
]
self.assertEqual(
'+-----+-----+-----+\n| col |value|col3 |\n+-----+-----+-----+\n| val |field|val3 |\n+-----+-----+-----+\n',
TableSerializer(row_width=5).entries_to_string(entries)
)
| 2.75 | 3 |
src/gui/combobox.py | Epihaius/panda3dstudio | 63 | 12792867 | <filename>src/gui/combobox.py<gh_stars>10-100
from .base import *
from .button import Button
from .menu import Menu
class ComboBox(Button):
_ref_node = NodePath("combobox_ref_node")
def __init__(self, parent, field_width, gfx_ids, text="", icon_id="", tooltip_text=""):
Button.__init__(self, parent, gfx_ids, tooltip_text=tooltip_text)
self.widget_type = "combobox"
self.command = self.__show_menu
self._field_width = field_width
self._field_text = text
self._field_text_in_tooltip = True
self._items = {}
self._item_ids = []
self._item_texts = {}
self._persistent_items = []
self._selected_item_id = None
self._selection_handlers = {}
self._is_field_active = False
self._field_back_img = None
self._field_tint = Skin.colors["combobox_field_tint_default"]
self._input_field = None
if text:
skin_text = Skin.text["combobox"]
font = skin_text["font"]
color = skin_text["color"]
self._field_label = font.create_image(text, color)
else:
self._field_label = None
if icon_id:
x, y, w, h = Skin.atlas.regions[icon_id]
img = PNMImage(w, h, 4)
img.copy_sub_image(Skin.atlas.image, 0, 0, x, y, w, h)
self._combo_icon = img
self._combo_icon_disabled = icon_disabled = PNMImage(img)
icon_disabled.make_grayscale()
icon_disabled -= LColorf(0., 0., 0., .25)
icon_disabled.make_rgb()
else:
self._combo_icon = self._combo_icon_disabled = None
self._popup_menu = Menu(on_hide=self.__on_hide)
l, r, b, t = self.inner_borders
w, h = self.min_size
w = field_width + l + r
size = (w, h)
self.set_size(size, is_min=True)
def destroy(self):
Button.destroy(self)
sel_item_id = self._selected_item_id
if sel_item_id is not None and sel_item_id not in self._persistent_items:
self._items[self._selected_item_id].destroy()
self._input_field = None
self._items.clear()
self._selection_handlers.clear()
self._popup_menu.destroy()
self._popup_menu = None
def __on_hide(self):
if self.active:
self.active = False
self.on_leave(force=True)
def __show_menu(self):
if not self._popup_menu.items:
return
self.active = True
x, y = self.get_pos(ref_node=self._ref_node)
offset_x, offset_y = self.get_menu_offset("bottom")
pos = (x + offset_x, y + offset_y)
offset_x, offset_y = self.get_menu_offset("top")
w, h = self._popup_menu.get_size()
alt_pos = (x + offset_x, y + offset_y - h)
self._popup_menu.show(pos, alt_pos)
def __on_select(self, item_id):
if self._selected_item_id == item_id:
return
update = False
if self._selected_item_id is not None and self._selected_item_id not in self._persistent_items:
index = self._item_ids.index(self._selected_item_id)
selected_item = self._items[self._selected_item_id]
self._popup_menu.add_item(selected_item, index)
update = True
self._selected_item_id = item_id
self.set_text(self._item_texts[item_id])
if self._selected_item_id not in self._persistent_items:
self._popup_menu.remove(self._selected_item_id)
update = True
if update:
self._popup_menu.update()
def set_size(self, size, includes_borders=True, is_min=False):
width, height = Button.set_size(self, size, includes_borders, is_min)
if self._input_field:
l, r, b, t = self.inner_borders
w = width - l - r
h = height - b - t
size = (w, h)
self._input_field.set_size(size, includes_borders, is_min)
return width, height
def has_icon(self):
return self._combo_icon is not None
def set_field_back_image(self, image):
self._field_back_img = image
def __get_image(self, state=None, draw_field=True):
width, height = self.get_size()
image = PNMImage(width, height, 4)
if draw_field:
field_back_img = self._field_back_img * self._field_tint
if self._field_label:
x, y = self.get_field_label_offset()
field_back_img.blend_sub_image(self._field_label, x, y, 0, 0)
x, y = self.get_field_offset()
image.blend_sub_image(field_back_img, x, y, 0, 0)
img = Button.get_image(self, state, composed=False)
image.blend_sub_image(img, 0, 0, 0, 0)
if self._combo_icon:
x, y = self.get_icon_offset()
image.blend_sub_image(self._combo_icon, x, y, 0, 0)
return image
def get_image(self, state=None, composed=False):
field = self._input_field
if not field or field.is_hidden(check_ancestors=False):
return self.__get_image(state)
width, height = self.get_size()
image = PNMImage(width, height, 4)
field_img = field.get_image()
if field_img:
x, y = self.get_field_offset()
image.copy_sub_image(field_img, x, y, 0, 0)
img = self.__get_image(state, draw_field=False)
image.blend_sub_image(img, 0, 0, 0, 0)
return image
def add_item(self, item_id, item_text, item_command=None, index=None,
persistent=False, update=False, select_initial=True):
item = self._popup_menu.add(item_id, item_text, item_command, index=index)
self._items[item_id] = item
self._selection_handlers[item_id] = lambda: self.__on_select(item_id)
if index is None:
self._item_ids.append(item_id)
else:
self._item_ids.insert(index, item_id)
self._item_texts[item_id] = item_text
if persistent:
self._persistent_items.append(item_id)
if select_initial and len(self._items) == 1:
if not persistent:
self._popup_menu.remove(item_id)
self._selected_item_id = item_id
self.set_text(item_text)
if update:
self._popup_menu.update()
def remove_item(self, item_id):
if item_id not in self._item_ids:
return
item = self._items[item_id]
del self._items[item_id]
del self._item_texts[item_id]
del self._selection_handlers[item_id]
index = self._item_ids.index(item_id)
size = len(self._item_ids)
self._item_ids.remove(item_id)
if item_id in self._persistent_items or self._selected_item_id != item_id:
self._popup_menu.remove(item_id, update=True, destroy=True)
else:
item.destroy()
if self._selected_item_id == item_id:
self._selected_item_id = None
if index == size - 1:
index -= 1
if index >= 0:
self.select_item(self._item_ids[index])
else:
self.set_text("")
if item_id in self._persistent_items:
self._persistent_items.remove(item_id)
def update_popup_menu(self):
self._popup_menu.update()
def create_popup_menu(self):
return Menu(on_hide=self.__on_hide)
def set_popup_menu(self, menu):
self._popup_menu = menu
def get_popup_menu(self):
return self._popup_menu
def clear(self):
self._popup_menu.destroy()
self._popup_menu = Menu(on_hide=self.__on_hide)
self._items = {}
self._item_ids = []
self._item_texts = {}
self._persistent_items = []
self._selected_item_id = None
self._selection_handlers = {}
self.set_text("")
def __card_update_task(self):
if self.is_hidden():
return
image = self.get_image(composed=False)
if not image:
return
parent = self.parent
if not parent:
return
x, y = self.get_pos()
w, h = self.get_size()
img = PNMImage(w, h, 4)
parent_img = parent.get_image(composed=False)
if parent_img:
img.copy_sub_image(parent_img, 0, 0, x, y, w, h)
img.blend_sub_image(image, 0, 0, 0, 0)
self.card.copy_sub_image(self, img, w, h)
def __update_card_image(self):
task = self.__card_update_task
if self.is_card_update_delayed():
task_id = "update_card_image"
PendingTasks.add(task, task_id, sort=1, id_prefix=self.widget_id,
batch_id="widget_card_update")
else:
task()
def set_field_tint(self, tint=None):
new_tint = tint if tint else Skin.colors["combobox_field_tint_default"]
if self._field_tint == new_tint:
return False
self._field_tint = new_tint
self.__update_card_image()
return True
def select_none(self):
if self._selected_item_id is not None and self._selected_item_id not in self._persistent_items:
index = self._item_ids.index(self._selected_item_id)
selected_item = self._items[self._selected_item_id]
self._popup_menu.add_item(selected_item, index, update=True)
self._selected_item_id = None
self.set_text("")
def select_item(self, item_id):
if item_id not in self._item_ids:
return
self._selection_handlers[item_id]()
def get_selected_item(self):
return self._selected_item_id
def get_item_ids(self):
return self._item_ids
def allow_field_text_in_tooltip(self, allow=True):
self._field_text_in_tooltip = allow
def set_text(self, text):
if self._field_text == text:
return False
self._field_text = text
if self._field_text_in_tooltip and self.tooltip_text:
self.override_tooltip_text(self.tooltip_text + (": " + text if text else ""))
if text:
skin_text = Skin.text["combobox"]
font = skin_text["font"]
color = skin_text["color"]
self._field_label = font.create_image(text, color)
else:
self._field_label = None
self.__update_card_image()
return True
def get_text(self):
return self._field_text
def set_item_text(self, item_id, text):
if item_id not in self._item_ids:
return
self._item_texts[item_id] = text
if item_id in self._persistent_items or self._selected_item_id != item_id:
self._popup_menu.set_item_text(item_id, text, update=True)
else:
item = self._items[self._selected_item_id]
item.set_text(text)
if self._selected_item_id == item_id:
self.set_text(text)
def get_item_text(self, item_id):
if item_id not in self._item_ids:
return
return self._item_texts[item_id]
def set_item_index(self, item_id, index):
if item_id not in self._item_ids:
return
self._item_ids.remove(item_id)
self._item_ids.insert(index, item_id)
item = self._items[item_id]
if item_id in self._persistent_items or self._selected_item_id != item_id:
self._popup_menu.remove(item_id)
self._popup_menu.add_item(item, index, update=True)
@property
def input_field(self):
return self._input_field
@input_field.setter
def input_field(self, input_field):
self._input_field = input_field
if not self.sizer:
sizer = Sizer("horizontal")
sizer.default_size = self.min_size
self.sizer = sizer
self.sizer.add(input_field)
def show_input_field(self, show=True):
field = self._input_field
if not field or field.is_hidden() != show:
return False
r = field.show() if show else field.hide()
self.__update_card_image()
return r
def is_input_field_hidden(self):
field = self._input_field
if not field or field.is_hidden():
return True
return False
| 2.546875 | 3 |
model/contact.py | OlegM8/python_training | 0 | 12792868 | from sys import maxsize
class Contact:
def __init__(self, f_name=None, l_name=None, company=None, id=None, info=None, phone=None):
self.f_name = f_name
self.l_name = l_name
self.company = company
self.id = id
self.info = info
self.phone = phone
def __repr__(self):
return "%s:%s;%s;%s" % (self.id, self.info, self.f_name, self.company)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.info == other.info
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | 3.390625 | 3 |
assistive_gym/__main__.py | RCHI-Lab/bodies-uncovered | 3 | 12792869 | import argparse
from .env_viewer import viewer
from .envs.bm_config import BM_Config
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Assistive Gym Environment Viewer')
parser.add_argument('--env', default='ScratchItchJaco-v1',
help='Environment to test (default: ScratchItchJaco-v1)')
bm_config = BM_Config()
parser = bm_config.add_bm_args(parser)
args = parser.parse_args()
bm_config.change_bm_config(args)
viewer(args.env)
| 2 | 2 |
src_python/user/user.py | vasisouv/twitter-api-tutorial | 2 | 12792870 | import tweepy
import json
import pymongo
#from src_python import utilities
# Initialize the API consumer keys and access tokens
consumer_key = "LukFsjKDofVcCdiKsCnxiLx2V"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
# Authenticate tweepy using the keys
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
def get_user(user_id):
print("Searching full information for user with id " + str(user_id))
try:
user_json = api.get_user(user_id)
except tweepy.TweepError as tweep_error:
print("Error with code : " + str(tweep_error.response.text))
return 0
return user_json
def get_user_tweets(user_id):
timeline = []
progress = 0
statuses = []
for status in tweepy.Cursor(api.user_timeline, id=user_id).items():
timeline.append(status)
progress+=1
print("Fetched "+str(progress)+" out of all timeline items")
return statuses
def get_user_network(user_id):
print("Searching network for user with id " + str(user_id))
followers = []
friends = []
max_followers = 100000
max_friends = 100000
try:
for page in tweepy.Cursor(api.followers_ids, id=user_id).pages():
followers.extend(page)
if len(followers) >= max_followers:
break
print("Followers so far : " + str(len(followers)))
print("finished followers")
for page in tweepy.Cursor(api.friends_ids, id=user_id).pages():
friends.extend(page)
if len(friends) >= max_friends:
break
print("Friends so far : " + str(len(friends)))
print("finished friends")
except tweepy.TweepError as tweep_error:
print("Error with code : " + str(tweep_error.response.text))
return 0
print("User with ID: " + user_id + " has " + str(len(followers)) + " followers and " + str(len(friends)) + " friends")
custom_object = {
"id": user_id,
"followers": followers,
"friends": friends
}
return custom_object
if __name__ == '__main__':
# Aristotle University's Twitter user ID
user_id = "234343780"
# <NAME>'s user_id
#user_id = "50374439"
### Get the entire timeline of tweets and retweets of a user ###
statuses = get_user_tweets(user_id)
for status in statuses:
print (status._json["text"])
#### Get full information about the user ###
# user_json = get_user(user_id)
# Access all the information using .*field*
# https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/user-object
# screen_name = str(user_json.screen_name)
# followers_count = str(user_json.followers_count)
# friends_count = str(user_json.friends_count)
#
# print ("This user has the screen name: "+screen_name)
# print ("This user has "+followers_count+" followers")
# print ("This user has "+friends_count+" friends")
#### Get the network (friends, followers) of the user ###
# network = get_user_network(user_id)
# print(network["friends"])
# print(network["followers"])
| 2.984375 | 3 |
ixl_learning_3.py | EternalTitan/ICPC-Practise | 1 | 12792871 | <reponame>EternalTitan/ICPC-Practise
def countX(steps):
RC_MAXIMUM = 1000000
edge_r = edge_c = RC_MAXIMUM
for each_step in steps:
r, c = map(int, each_step.split())
if r < edge_r:
edge_r = r
if c < edge_c:
edge_c = c
return edge_r * edge_c
| 2.375 | 2 |
Experiment1/phy1021/thermology.py | wzk1015/PhysicsExperiment | 2 | 12792872 | import xlrd
# from xlutils.copy import copy as xlscopy
import shutil
import os
from numpy import sqrt, abs
import sys
sys.path.append('../..') # 如果最终要从main.py调用,则删掉这句
from GeneralMethod.PyCalcLib import Fitting
from GeneralMethod.PyCalcLib import Method
from reportwriter.ReportWriter import ReportWriter
class thermology:
report_data_keys = [
'1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20',
'21','22','23','24','25','26','27','28','29',
'101','102','103','104','105','106','107','108','109','110','111','112','113','114','115','116','117',
'118','119','120','121','122','123','124','125','126','127','128','129',
'L','K','J',
'Ua','UJ'
]
PREVIEW_FILENAME = "Preview.pdf"
DATA_SHEET_FILENAME = "data.xlsx"
REPORT_TEMPLATE_FILENAME = "thermology_empty.docx"
REPORT_OUTPUT_FILENAME = "thermology_out.docx"
def __init__(self):
self.data = {} # 存放实验中的各个物理量
self.uncertainty = {} # 存放物理量的不确定度
self.report_data = {} # 存放需要填入实验报告的
print("1021 测量水的溶解热+焦耳热功当量\n1. 实验预习\n2. 数据处理")
while True:
try:
oper = input("请选择: ").strip()
except EOFError:
sys.exit(0)
if oper != '1' and oper != '2':
print("输入内容非法!请输入一个数字1或2")
else:
break
if oper == '1':
print("现在开始实验预习")
print("正在打开预习报告......")
os.startfile(self.PREVIEW_FILENAME)
elif oper == '2':
print("现在开始数据处理")
print("即将打开数据输入文件......")
# 打开数据输入文件
os.startfile(self.DATA_SHEET_FILENAME)
input("输入数据完成后请保存并关闭excel文件,然后按回车键继续")
# 从excel中读取数据
self.input_data("./"+self.DATA_SHEET_FILENAME) # './' is necessary when running this file, but should be removed if run main.py
print("数据读入完毕,处理中......")
# 计算物理量
self.calc_data1()
self.calc_data2()
# 计算不确定度
self.calc_uncertainty()
print("正在生成实验报告......")
# 生成实验报告
self.fill_report()
print("实验报告生成完毕,正在打开......")
os.startfile(self.REPORT_OUTPUT_FILENAME)
print("Done!")
'''
从excel表格中读取数据
@param filename: 输入excel的文件名
@return none
'''
def input_data(self, filename):
ws = xlrd.open_workbook(filename).sheet_by_name('thermology1')
# 从excel中读取数据
list_time = []
list_resistance = []
list_temperature = []
list_weight = []
for row in [1, 4, 7]:
for col in range(1, 8):
list_time.append(float(ws.cell_value(row, col))) #时间
self.data['list_time'] = list_time
for row in [2, 5, 8]:
for col in range(1, 8):
list_resistance.append(float(ws.cell_value(row, col))) #电阻值
self.data['list_resistance'] = list_resistance
for row in [3, 6, 9]:
for col in range(1, 8):
list_temperature.append(float(ws.cell_value(row, col))) #温度
self.data['list_temperature'] = list_temperature
col = 1
for row in range(10, 14):
list_weight.append(float(ws.cell_value(row,col))) #几种质量
self.data['list_weight'] = list_weight
row = 14
temp_ice = float(ws.cell_value(row, col)) #冰温度
self.data['temp_ice'] = temp_ice
row = 15
temp_env = float(ws.cell_value(row, col)) #环境温度
self.data['temp_env'] = temp_env
ws = xlrd.open_workbook(filename).sheet_by_name('thermology2')
list_time2 = []
list_resistance2 = []
list_temperature2 = []
for row in [1, 4, 7, 10]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_time2.append(float(ws.cell_value(row, col)))
self.data['list_time2'] = list_time2
for row in [2, 5, 8, 11]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_resistance2.append(float(ws.cell_value(row, col)))
self.data['list_resistance2'] = list_resistance2
for row in [3, 6, 9, 12]:
for col in range(1, 9):
if isinstance(ws.cell_value(row, col), str):
continue
else:
list_temperature2.append(float(ws.cell_value(row, col)))
self.data['list_temperature2'] = list_temperature2
col = 1
row = 13
temp_env2 = float(ws.cell_value(row, col))
self.data['temp_env2'] = temp_env2
row = 14
voltage_begin = float(ws.cell_value(row, col))
self.data['voltage_begin'] = voltage_begin
row = 15
voltage_end = float(ws.cell_value(row, col))
self.data['voltage_end'] = voltage_end
row = 16
resitence = float(ws.cell_value(row, col))
self.data['resitence'] = resitence
self.data['c1'] = 0.389e3
self.data['c2'] = 0.389e3
self.data['c0'] = 4.18e3
self.data['ci'] = 1.80e3
def calc_data1(self):
list_weight = self.data['list_weight']
list_time1 = self.data['list_time']
list_temperature1 = self.data['list_temperature']
temp_ice = self.data['temp_ice']
temp_env = self.data['temp_env']
c1 = self.data['c1']
c2 = self.data['c2']
c0 = self.data['c0']
ci = self.data['ci']
m_water = list_weight[1] - list_weight[0]
m_ice = list_weight[2] - list_weight[1]
list_graph = Fitting.linear(list_time1, list_temperature1, show_plot=False)
self.data['list_graph'] = list_graph
temp_begin = list_graph[0] * list_time1[0] + list_graph[1]
temp_end = list_graph[0] * list_time1[(len(list_time1)-1)] + list_graph[1]
self.data['temp_begin'] = temp_begin
self.data['temp_end'] = temp_end
self.data['m_water'] = m_water
self.data['m_ice'] = m_ice
'''
print(temp_begin)
print('\n',temp_end)
print('\n',m_water)
print('\n',m_ice)
print('!1!\n',c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001)
print('\n!2!\n',temp_begin-temp_end)
print('\n!3!\n',c0*temp_end)
print('\n!4!\n',ci*temp_ice)
'''
L = 1/(m_ice*0.001) * (c0*m_water*0.001+c1*list_weight[3]*0.001+c2*(list_weight[0]-list_weight[3])*0.001) * (temp_begin-temp_end)- c0*temp_end + ci*temp_ice
K = c0 * m_water*0.001 * (list_temperature1[15]-list_temperature1[8]) / ((list_time1[15]-list_time1[8])*(list_temperature1[15]-temp_env))
self.data['L'] = L
self.data['K'] = K
def calc_data2(self):
c1 = self.data['c1']
c0 = self.data['c0']
list_temperature2 = self.data['list_temperature2']
list_weight = self.data['list_weight']
temp_env2 = self.data['temp_env2']
list_time2 = self.data['list_time2']
voltage_begin = self.data['voltage_begin']
voltage_end = self.data['voltage_end']
resitence = self.data['resitence']
m_water = list_weight[1] - list_weight[0]
list_x = []
list_y = []
for i in range(len(list_temperature2)):
if i==len(list_temperature2)-1:
break
list_x.append((list_temperature2[i]+list_temperature2[i+1])/2-temp_env2)
for i in range(len(list_temperature2)):
if i == len(list_temperature2)-1:
break
list_y.append((list_temperature2[i+1]-list_temperature2[i])/((list_time2[i+1]-list_time2[i])*60))
self.data['list_x'] = list_x
self.data['list_y'] = list_y
list_graph2 = Fitting.linear(list_x, list_y, show_plot=False)
self.data['list_graph2'] = list_graph2
J = ((voltage_begin+voltage_end)/2)**2/(list_graph2[1]*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001+64.38))
self.data['J'] = J
'''
print('b',list_graph2[0])
print('\n a',list_graph2[1])
print('\n r',list_graph2[2])
'''
def calc_uncertainty(self):
list_a = []
list_x = self.data['list_x']
list_y = self.data['list_y']
list_graph2 = self.data['list_graph2']
voltage_begin = self.data['voltage_begin']
voltage_end = self.data['voltage_end']
resitence = self.data['resitence']
c1 = self.data['c1']
c0 = self.data['c0']
list_weight = self.data['list_weight']
m_water = list_weight[1] - list_weight[0]
for i in range(len(list_x)):
list_a.append(list_y[i]-list_graph2[1]*list_x[i])
self.data['list_a'] = list_a
Ua = Method.a_uncertainty(self.data['list_a'])
self.data['Ua'] = Ua
UJ = abs(((voltage_begin+voltage_end)/2)**2/(Ua*resitence*(c0*m_water*0.001+c1*list_weight[3]*0.001 + 64.38)))
self.data['UJ'] = UJ
def fill_report(self):
# 表格:xy
for i, x_i in enumerate(self.data['list_x']):
self.report_data[str(i + 1)] = "%.5f" % (x_i)
for i, y_i in enumerate(self.data['list_y']):
self.report_data[str(i + 101)] = "%.5f" % (y_i)
# 最终结果
self.report_data['L'] = self.data['L']
self.report_data['K'] = self.data['K']
self.report_data['J'] = self.data['J']
self.report_data['Ua'] = self.data['Ua']
self.report_data['UJ'] = self.data['UJ']
RW = ReportWriter()
RW.load_replace_kw(self.report_data)
RW.fill_report(self.REPORT_TEMPLATE_FILENAME, self.REPORT_OUTPUT_FILENAME)
if __name__ == '__main__':
mc = thermology()
| 2.421875 | 2 |
wagtaildocs_previews/tests/urls.py | mikiec84/wagtail-filepreviews | 22 | 12792873 | <gh_stars>10-100
from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtaildocs_previews import urls as wagtaildocs_urls
urlpatterns = [
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'', include(wagtail_urls)),
]
| 1.320313 | 1 |
chat/filters.py | jbbqqf/okapi | 0 | 12792874 | # -*- coding: utf-8 -*-
from django_filters import (FilterSet, CharFilter, DateTimeFilter,
NumberFilter, BooleanFilter)
from guardian.shortcuts import get_objects_for_user
from rest_framework import filters
from chat.models import Post, Channel
def get_readable_channel_ids(user):
"""
Return a list of channel ids on which user given in parameter has at least
read_channel permission.
It also includes public channels, where anyone can read/write on.
Channel ids are unique.
"""
readable_channels = get_objects_for_user(user, 'chat.read_channel',
use_groups=True)
readable_ids = [c.id for c in readable_channels]
public_channels = Channel.objects.filter(public=True)
for public_channel in public_channels:
readable_ids.append(public_channel.id)
unique_readable_ids = set(readable_ids)
return unique_readable_ids
class ReadableChannelFilter(filters.BaseFilterBackend):
"""
All users cannot see what they want. They are restricted to see only
channels on which they have at least read_channel permission.
"""
def filter_queryset(self, request, queryset, view):
readable_channel_ids = get_readable_channel_ids(request.user)
return queryset.filter(id__in=readable_channel_ids)
class ChannelFilter(FilterSet):
name = CharFilter(name='name', lookup_type='icontains',
label='name contain filter')
public = BooleanFilter(name='public', label='is public ?')
ca_label = 'filter channels created after or on provided date / time'
created_after = DateTimeFilter(name='date', lookup_type='gte',
label=ca_label)
cb_label = 'filter channels created before or on provided date / time'
created_before = DateTimeFilter(name='date', lookup_type='lte',
label=ca_label)
class Meta:
model = Channel
fields = ('name', 'public', 'created_after', 'created_before',)
class ReadablePostFilter(filters.BaseFilterBackend):
"""
Since channels have permissions, posts posted in a channel are not visible
for anyone. This filter makes sure only posts a user can read will be
returned.
"""
def filter_queryset(self, request, queryset, view):
readable_channel_ids = get_readable_channel_ids(request.user)
return queryset.filter(channel__in=readable_channel_ids)
class PostFilter(FilterSet):
author = CharFilter(name='author', lookup_type='icontains',
label='author contain filter')
type = CharFilter(name='type', label='filter on letter value')
content = CharFilter(name='type', lookup_type='icontains',
label='content contain filter')
channel = NumberFilter(name='channel',
label='filters posts sent on provided channel')
afterid = NumberFilter(name='id', lookup_type='gt',
label='filter posts posted after given post id')
dflabel = 'filter posts posted after or on provided date / time'
datefrom = DateTimeFilter(name='date', lookup_type='gte', label=dflabel)
dtlabel = 'filter posts posted before or on provided date / time'
dateto = DateTimeFilter(name='date', lookup_type='lte', label=dtlabel)
content = CharFilter(name='content', lookup_type='icontains',
label='content contain filter')
class Meta:
model = Post
fields = ('author', 'type', 'content', 'datefrom', 'dateto',)
| 2.25 | 2 |
onetrack/TrackingData.py | murnanedaniel/OneTrack | 1 | 12792875 | <filename>onetrack/TrackingData.py
# import all
import os
import sys
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy import sparse as sps
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
import networkx as nx
from functools import partial
from .tracking_utils import *
from .plotting_utils import *
def load_single_pytorch_file(file):
"""
Loads a single Pytorch Geometric file
"""
return torch.load(file, map_location="cpu")
def build_single_candidates(instance, building_method, sanity_check, **kwargs):
instance.build_candidates(building_method, sanity_check, **kwargs)
return instance
def evaluate_single_candidates(instance, evaluation_method, **kwargs):
instance.evaluate_candidates(**kwargs)
return instance
class TrackingData():
"""
A class that holds a list of Events, specifically for the tracking pipeline.
An Event contains a Graph and an EventTruth object.
"""
def __init__(self, files):
self.files = files
self.event_data = None
self.events = None
self.evaluation = None
logging.info("Loading files")
self.__load_files()
assert self.event_data is not None # Test if files are loaded
logging.info("Building events")
self.__build_events()
assert self.events is not None # Test if events are built
def __len__(self):
return len(self.events)
def __getitem__(self, idx):
event = self.events[idx]
return event
def __load_files(self):
"""
Loads files based on type
"""
file_type = self.__get_file_type()
if file_type == "pytorch_geometric":
self.event_data = self.__load_pytorch_files()
else:
raise ValueError("Unknown file type")
def __build_events(self):
"""
Builds Event objects from event data
"""
# self.events = []
# for data in tqdm(self.event_data):
# self.events.append(Event(data))
self.events = process_map(Event, self.event_data)#, max_workers=1)
def __get_file_type(self):
"""
Determine type of file
"""
try:
sample = torch.load(self.files[0], map_location="cpu")
if str(type(sample)) == "<class 'torch_geometric.data.data.Data'>":
return "pytorch_geometric"
else:
raise ValueError("Unknown file type, this is not a Pytorch Geometric file")
except:
raise ValueError("Unknown file type, there are still more file types to be added!")
def __load_pytorch_files(self):
"""
Loads all Pytorch geometric files in file list
"""
# data = []
# for file in tqdm(self.files):
# data.append(torch.load(file, map_location="cpu"))
data = process_map(load_single_pytorch_file, self.files)#, max_workers=1)
return data
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from events
"""
logging.info(f"Building candidates with sanity check: {sanity_check}")
build_single_candidates_partial = partial(build_single_candidates, building_method=building_method, sanity_check=sanity_check, **kwargs)
self.events = process_map(build_single_candidates_partial, self.events, max_workers=8)
# for event in tqdm(self.events):
# event.build_candidates(building_method, sanity_check, **kwargs)
def evaluate_candidates(self, evaluation_method="matching", **kwargs):
"""
Evaluates track candidates from events
"""
logging.info("Evaluating candidates")
evaluate_single_candidates_partial = partial(evaluate_single_candidates, evaluation_method=evaluation_method, **kwargs)
self.events = process_map(evaluate_single_candidates_partial, self.events, max_workers=8)
# for event in tqdm(self.events):
# event.evaluate_candidates(evaluation_method, **kwargs)
# TODO: Tidy this up!
n_true_tracks, n_reco_tracks, n_matched_particles, n_matched_tracks, n_duplicated_tracks, n_single_matched_particles = 0, 0, 0, 0, 0, 0
for event in self.events:
n_true_tracks += event.candidates.evaluation["n_true_tracks"]
n_reco_tracks += event.candidates.evaluation["n_reco_tracks"]
n_matched_particles += event.candidates.evaluation["n_matched_particles"]
n_single_matched_particles += event.candidates.evaluation["n_single_matched_particles"]
n_matched_tracks += event.candidates.evaluation["n_matched_tracks"]
n_duplicated_tracks += event.candidates.evaluation["n_duplicated_tracks"]
building_method = event.candidates.building_method
self.evaluation = {
"building_method": building_method,
"evaluation_method": evaluation_method,
"eff": n_matched_particles / n_true_tracks,
"single_eff": n_single_matched_particles / n_true_tracks,
"fr": 1 - (n_matched_tracks / n_reco_tracks),
"dup": n_duplicated_tracks / n_reco_tracks,
}
print(self.evaluation)
print(f"n_true_tracks: {n_true_tracks}, n_reco_tracks: {n_reco_tracks}, n_matched_particles: {n_matched_particles}, n_matched_tracks: {n_matched_tracks}, n_duplicated_tracks: {n_duplicated_tracks}")
def plot_evaluation(self, metric="eff", observable="eta", **kwargs):
"""
Plots evaluation of candidates
"""
if self.evaluation is None:
raise ValueError("No evaluation available")
if self.evaluation["evaluation_method"] == "matching":
self.__plot_matching_evaluation(metric, observable, **kwargs)
else:
raise NotImplementedError("Plotting not implemented yet for that method")
def __plot_matching_evaluation(self, metric="eff", observable="eta", **kwargs):
"""
Plots matching evaluation of candidates
"""
all_particles = pd.concat([event.candidates.evaluation["particles"].merge(event.event_truth.particles, on="particle_id", how="inner") for event in self.events])
plot_observable_performance(all_particles)
class Event():
"""
An Event contains a Graph and an EventTruth object. It represents a unit of particle physics data.
"""
def __init__(self, data):
self.graph = None
self.event_truth = None
self.candidates = None
self.data = self.__process_data(data)
def __process_data(self, data):
"""
Processes data to be used in the pipeline
"""
if str(type(data)) == "<class 'torch_geometric.data.data.Data'>":
self.graph = Graph(data_dict = data.to_dict())
self.event_truth = EventTruth(event_file = data.event_file)
else:
raise ValueError("Unknown data type")
# Define representation
def __repr__(self):
return f"Event(graph=({len(self.graph.hits['x'])} hits, {self.graph.edges['edge_index'].shape[1]} edges), event_truth=({len(self.event_truth)} particles), candidates=({len(self.candidates)} candidates))"
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from event
"""
self.candidates = self.graph.build_candidates(building_method, sanity_check, **kwargs)
def evaluate_candidates(self, method="matching", **kwargs):
"""
Evaluates track candidates from event
"""
self.candidates.evaluate(method, self.event_truth, **kwargs)
class Graph():
def __init__(self, data_dict):
self.hits = None
self.edges = None
self.graph_data = None
assert type(data_dict) == dict, "Data must be a dictionary"
self.__process_data(data_dict)
# Test if data is loaded
assert self.hits is not None
assert self.edges is not None
assert self.graph_data is not None
# Define representation
def __repr__(self):
return f"Graph(hits={self.hits}, edges={self.edges}, graph_data={self.graph_data})"
def __len__(self):
return len(self.hits["x"])
def __process_data(self, data):
"""
Processes data to be used in the pipeline
"""
if type(data) == dict:
self.__get_hit_data(data)
self.__get_edge_data(data)
self.__get_graph_data(data)
else:
raise ValueError("Unknown data type")
def __get_hit_data(self, data):
"""
Returns hit data
"""
self.hits = {}
assert "x" in data.keys(), "At least need a feature called x, otherwise define default node feature in config" # Check if x is in data
for key in data.keys():
if len(data[key]) == len(data["x"]):
self.hits[key] = data[key]
def __get_edge_data(self, data):
"""
Returns edge data
"""
self.edges = {}
assert "edge_index" in data.keys(), "At least need a feature called edge_index, otherwise define default edge feature in config" # Check if edge_index is in data
for key in data.keys():
if (
len(data[key].shape) > 1 and data[key].shape[1] == data["edge_index"].shape[1] or
len(data[key].shape) == 1 and data[key].shape[0] == data["edge_index"].shape[1]
):
self.edges[key] = data[key]
def __get_graph_data(self, data):
"""
Returns graph data
"""
self.graph_data = {k: data[k] for k in data.keys() - (self.hits.keys() & self.edges.keys())}
def build_candidates(self, building_method="CC", sanity_check=False, **kwargs):
"""
Builds track candidates from graph
"""
if building_method == "CC":
candidates = self.__get_connected_components(sanity_check, **kwargs)
elif building_method == "AP":
candidates = self.__get_all_paths(sanity_check, **kwargs)
elif building_method == "KF":
candidates = self.__get_kf_candidates(**kwargs)
else:
raise ValueError("Unknown building method")
return candidates
def __get_connected_components(self, sanity_check=False, score_cut=0.5, **kwargs):
"""
Builds connected components from graph
"""
if sanity_check:
edge_mask = self.edges["y"].bool()
else:
edge_mask = self.edges["scores"] > score_cut
row, col = self.edges["edge_index"][:, edge_mask]
edge_attr = np.ones(row.size(0))
N = self.hits["x"].size(0)
sparse_edges = sps.coo_matrix((edge_attr, (row.numpy(), col.numpy())), (N, N))
num_candidates, candidate_labels = sps.csgraph.connected_components(sparse_edges, directed=False, return_labels=True)
candidates = Candidates(self.hits["hid"], candidate_labels, building_method="CC")
return candidates
def __get_kf_candidates(self, **kwargs):
"""
Builds KF candidates from graph
"""
raise NotImplementedError("KF candidates not implemented yet")
def __get_all_paths(self, sanity_check=False, score_cut=0.5, **kwargs):
"""
Returns all paths from graph
"""
if sanity_check:
edge_mask = self.edges["y"].bool()
else:
edge_mask = self.edges["scores"] > score_cut
# Order edges by increasing R
r, phi, z = self.hits["x"].T
R = np.sqrt(r**2 + z**2)
# in_edges are the nodes towards the inner of the detector, out_edges are the nodes towards the outer
in_edges, out_edges = self.edges["edge_index"][:, edge_mask]
# Ensure edges are numpy arrays
if (type(in_edges) != np.ndarray) or (type(out_edges) != np.ndarray):
in_edges = in_edges.numpy()
out_edges = out_edges.numpy()
# Sort edges by increasing R
wrong_direction_mask = R[in_edges] > R[out_edges]
in_edges[wrong_direction_mask], out_edges[wrong_direction_mask] = out_edges[wrong_direction_mask], in_edges[wrong_direction_mask]
starting_nodes = np.unique(in_edges[~np.isin(in_edges, out_edges)])
ending_nodes = np.unique(out_edges[~np.isin(out_edges, in_edges)])
# Build graph
G = nx.DiGraph()
G.add_edges_from(np.stack([in_edges, out_edges]).T)
all_paths = nx.shortest_path(G)
all_paths = {path: all_paths[path] for path in all_paths.keys() if path in starting_nodes}
valid_paths = [all_paths[start_key][end_key]
for start_key in all_paths.keys()
for end_key in all_paths[start_key].keys()
if (start_key != end_key and end_key in ending_nodes)]
hit_list = np.array(list(itertools.chain.from_iterable(valid_paths)))
track_label_list = np.repeat(np.arange(len(valid_paths)), [len(path) for path in valid_paths])
candidates = Candidates(hit_list, track_label_list, building_method="AP")
# TODO: CHECK THAT HIT ID IS USED CORRECTLY!!
return candidates
class EventTruth():
def __init__(self, event_file):
self.particles = None
self.hit_truth = None
assert type(event_file) == str or type(event_file) == np.str_, "Event file must be a string"
self.__process_data(event_file)
# Test data loaded properly
assert self.particles is not None
assert self.hit_truth is not None
# Define representation
def __repr__(self):
return f"EventTruth(particles={self.particles}, hit_truth={self.hit_truth})"
def __len__(self):
return len(self.particles)
def __process_data(self, event_file):
"""
Processes data to be used in the pipeline
"""
self.__get_particle_data(event_file)
self.__get_hit_truth_data(event_file)
def __get_particle_data(self, event_file):
"""
Returns particle data
"""
try:
particle_filename = event_file + "-particles.csv"
self.particles = pd.read_csv(particle_filename)
except:
raise ValueError("Could not find particles file")
def __get_hit_truth_data(self, event_file):
"""
Returns hit truth data
"""
try:
hit_truth_filename = event_file + "-truth.csv"
self.hit_truth = pd.read_csv(hit_truth_filename)
self.hit_truth = self.__process_hit_truth(self.hit_truth)
except:
raise ValueError("Could not find hit truth file")
def __process_hit_truth(self, hit_truth):
"""
Processes hit truth data
"""
hit_truth.drop_duplicates(subset=["hit_id"], inplace=True)
return hit_truth
class Candidates():
def __init__(self, hit_ids, track_ids, building_method, **kwargs):
self.hit_ids = hit_ids
self.track_ids = track_ids
self.building_method = building_method
self.evaluation = None
def __repr__(self):
return f"{self.__len__()} Candidates(hit_ids={self.hit_ids}, track_ids={self.track_ids})"
def __len__(self):
return len(np.unique(self.track_ids))
def get_df(self):
"""
Returns dataframe of candidates
"""
df = pd.DataFrame({"hit_id": self.hit_ids, "track_id": self.track_ids})
return df
def evaluate(self, method, event_truth, **kwargs):
"""
Returns evaluation of candidates
"""
if method == "matching":
self.evaluation = self.__matching_reconstruction(event_truth.particles, event_truth.hit_truth, **kwargs)
elif method == "iou":
self.evaluation = self.__iou_reconstruction(**kwargs)
else:
raise ValueError("Unknown method")
def __matching_reconstruction(self, particles, hit_truth, **kwargs):
"""
Evaluates track candidates from event with matching criteria. Criteria given by ratios of common hits in candidates ("reconstructed") and particles ("truth")
"""
particles, candidates = match_reco_tracks(self.get_df(), hit_truth, particles, build_method = self.building_method, **kwargs)
(n_true_tracks, n_reco_tracks,
n_matched_particles, n_single_matched_particles, n_matched_tracks,
n_duplicated_tracks, n_matched_tracks_poi) = get_statistics(particles, candidates)
evaluation = {
"evaluation_method": "matching",
"particles": particles,
"candidates": candidates,
"eff": n_matched_particles / n_true_tracks,
"fr": 1 - (n_matched_tracks / n_reco_tracks),
"dup": n_duplicated_tracks / n_reco_tracks,
"n_true_tracks": n_true_tracks,
"n_reco_tracks": n_reco_tracks,
"n_matched_particles": n_matched_particles,
"n_single_matched_particles": n_single_matched_particles,
"n_matched_tracks": n_matched_tracks,
"n_duplicated_tracks": n_duplicated_tracks,
"n_matched_tracks_poi": n_matched_tracks_poi
}
return evaluation
def __iou_reconstruction(self, **kwargs):
"""
Evaluates track candidates from event with Intersection over Union (IoU)
"""
raise NotImplementedError("IOU reconstruction not implemented yet")
| 2.4375 | 2 |
todo_api/services/todo_service.py | acuencadev/distributed-todo-api | 0 | 12792876 | import uuid
from typing import List, Optional
from todo_api.extensions import db
from todo_api.models import Todo
def create_todo(text: str, user_id: int) -> Todo:
todo = Todo(public_id=uuid.uuid4(), text=text, user_id=user_id)
db.session.add(todo)
db.session.commit()
return todo
def get_all_todos() -> List[Todo]:
return Todo.query.all()
def get_todo_by_public_id(public_id: str) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
return todo
def update_todo(public_id: str, text: str, completed: bool) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return None
todo.text = text
todo.completed = completed
db.session.commit()
return todo
def complete_todo(public_id: str) -> Optional[Todo]:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return None
todo.completed = True
db.session.commit()
return todo
def delete_todo(public_id: str) -> bool:
todo = Todo.query.filter_by(public_id=public_id).first()
if not todo:
return False
db.session.delete(todo)
db.session.commit()
return True
def delete_all_todos() -> int:
todos_deleted = Todo.query.delete()
db.session.commit()
return todos_deleted
| 2.390625 | 2 |
simpleml/pipelines/__init__.py | ptoman/SimpleML | 15 | 12792877 | <reponame>ptoman/SimpleML
'''
Import modules to register class names in global registry
Define convenience classes composed of different mixins
'''
__author__ = '<NAME>'
from .base_pipeline import Pipeline, AbstractPipeline, DatasetSequence, TransformedSequence
from .validation_split_mixins import Split, SplitContainer, NoSplitMixin, RandomSplitMixin,\
ChronologicalSplitMixin, ExplicitSplitMixin
# Mixin implementations for convenience
class NoSplitPipeline(Pipeline, NoSplitMixin):
pass
class ExplicitSplitPipeline(Pipeline, ExplicitSplitMixin):
pass
class RandomSplitPipeline(RandomSplitMixin, Pipeline):
# Needs to be used as base class because of MRO initialization
pass
class ChronologicalSplitPipeline(ChronologicalSplitMixin, Pipeline):
# Needs to be used as base class because of MRO initialization
pass
| 1.976563 | 2 |
serie3/matrix.py | Koopakiller/Edu-NLA | 0 | 12792878 | # Authors: <NAME> (lambertt) and <NAME> (odafaluy)
import numpy
import scipy
import scipy.linalg
import plot
class Matrix:
"""
Provides Methods for operations with an hilbert- or a special triangular matrix.
"""
def __init__(self, mtype, dim, dtype):
"""
Initializes the class instance.
:param mtype: The matrix type ("hilbert" or "saite" for triangular)
:param dim: The dimension. Must be > 0.
:param dtype: The type to use. Can be "float16", "float32" or "flaot64"
"""
if mtype not in ["saite", "hilbert"]:
raise Exception("Unknown mtype. Allowed are 'hilbert' and 'saite'.")
self.mtype = mtype
if dim <= 0:
raise Exception("dim must be > 0")
self.dim = dim
if dtype not in ["float16", "float32", "float64"]:
raise Exception("Unknown dtype. Allowed are 'float16', 'float32' and 'float64'.")
self.dtype = dtype
self.dtype_constructor = None
self.matrix = None
self.inv = None
self.l = None
self.u = None
self.create_matrix_and_inv()
def create_matrix_and_inv(self):
"""
Calculates the matrix from the values given to the constructor and its inverse.
:return: Nothing.
"""
arr = []
if self.mtype == "saite":
for row in xrange(0, self.dim):
arr.append([])
for col in xrange(0, self.dim):
if row == col:
arr[row].append(2)
elif row - 1 == col or col - 1 == row:
arr[row].append(-1)
else:
arr[row].append(0)
if self.mtype == "hilbert":
arr = scipy.linalg.hilbert(self.dim).tolist()
self.matrix = numpy.array(arr, dtype=self.dtype)
self.inv = scipy.linalg.inv(self.matrix)
def condition(self):
"""
Calculates the condition of the matrix.
:return: The condition of the matrix.
"""
return numpy.linalg.norm(self.matrix, ord=numpy.inf) * numpy.linalg.norm(self.inv, ord=numpy.inf)
def lu(self):
"""
Splits the matrix into l (left lower) and u (right upper) matrices. (Matrix A = LU)
:return: A Tuple l,u of matrices
"""
if self.l is None or self.u is None:
self.l, self.u = scipy.linalg.lu(self.matrix, permute_l=True)
return self.l, self.u
def solve(self, b):
"""
Solves the equation Ax=b for x and the matrix A.
:param b: The vector b to solve the Matrix for.
:return: The vector x from Ax=b.
"""
l, u = self.lu()
x = scipy.linalg.solve_triangular(l, b, lower=True)
x = scipy.linalg.solve_triangular(u, x, lower=False)
return x
def main_31b(mtypes, dims, dtypes):
"""
Executes experiments as described in 3.1B.
:param mtypes: The mtype-values to use.
:param dims: The dimensions to use.
:param dtypes: The dtype-values to use.
:return: Nothing.
"""
for mtype in mtypes:
for dim in dims:
for dtype in dtypes:
print("")
print("Experiment for mtype={0}, dim={1}, dtype={2}".format(mtype, dim, dtype))
identity = numpy.identity(dim, dtype)
matrix = Matrix(mtype, dim, dtype)
m = identity - (numpy.dot(matrix.matrix, matrix.inv))
try:
m_inv = scipy.linalg.inv(m)
except (numpy.linalg.linalg.LinAlgError, ValueError) as ex:
print("Cannot calculate inverse of M: " + ex.message)
continue
condition = numpy.linalg.norm(m, ord=numpy.inf) * numpy.linalg.norm(m_inv, ord=numpy.inf)
print("cond(M) = {1} || I - M M^(-1) || = {0}".format(condition, matrix.condition()))
def main_32b_saite(n):
plot.plot(n)
def main_32b_hilbert(i_max, dtype, n):
"""
Executes experiments as described in 3.2B B. (Hilbert)
:param i_max: The maximum i to use
:param dtype: the data-type to use (float16, float32 or float64)
:param n: The dimension to use.
:return: Nothing.
"""
matrix = Matrix("hilbert", n, dtype)
print("Hilbert Matrix with n={0} and type {1}".format(n, dtype))
result = numpy.identity(n, dtype=dtype)
for i in xrange(1, i_max + 1):
result = numpy.dot(result, matrix.matrix)
print("i = {0}, x^{0} = ".format(i))
print(result)
def main_32b(dtypes, n_iterable, i_iterable):
"""
Executes experiments as described in 3.2B
:param dtypes: the data-type to use (float16, float32 or float64)
:param n_iterable: The n-values to use.
:param i_iterable: The i-values to use. (if i>n it will be ignored).
:return: Nothing.
"""
for dtype in dtypes:
for n in n_iterable:
for i_max in i_iterable:
if i_max > n:
continue
main_32b_hilbert(i_max, dtype, n)
def main(experiment, mtypes=None, dims=None, dtypes=None, n_iterable=None, i_iterable=None):
"""
Executes experiments as described.
See start.py for more information.
:return: Nothing.
"""
if experiment == "3.1B":
main_31b(mtypes, dims, dtypes)
elif experiment == "3.2B - A":
for n in n_iterable:
main_32b_saite(n)
elif experiment == "3.2B - B":
main_32b(dtypes, n_iterable, i_iterable)
else:
print("Unknown experiment")
| 3.265625 | 3 |
examples/quadtree/quadtree_demo_insert.py | joshuaskelly/Toast | 0 | 12792879 | import pygame
import random
from toast.quadtree import QuadTree
from toast.scene_graph import GameObject, Scene
from toast.camera import Camera
from toast.event_manager import EventManager
from examples.demo_game import DemoGame
class QuadTreeVisualizer(GameObject):
def __init__(self, quadtree):
super(QuadTreeVisualizer, self).__init__()
self.quadtree = quadtree
def render(self, surface, offset=(0,0)):
self.render_quadtree(surface, self.quadtree)
def render_quadtree(self, surface, quadtree):
pygame.draw.rect(surface, (255,0,0), quadtree.quadrant, 1)
if quadtree.northwest_tree is not None:
self.render_quadtree(surface, quadtree.northwest_tree)
if quadtree.northeast_tree is not None:
self.render_quadtree(surface, quadtree.northeast_tree)
if quadtree.southwest_tree is not None:
self.render_quadtree(surface, quadtree.southwest_tree)
if quadtree.southeast_tree is not None:
self.render_quadtree(surface, quadtree.southeast_tree)
if quadtree.bucket is not []:
for item in quadtree.bucket:
item.render(surface)
class RectComponent(GameObject):
def __init__(self, left, top, width, height):
super(RectComponent, self).__init__()
self.left = left
self.top = top
self.width = width
self.height = height
def __getitem__(self, index):
if index == 0:
return self.left
if index == 1:
return self.top
if index == 2:
return self.width
if index == 3:
return self.height
def render(self, surface, offset=(0,0)):
rect = self.left, self.top, self.width, self.height
pygame.draw.rect(surface, (255,255,255), rect, 1)
class NewScene(Scene):
def __init__(self):
super(NewScene, self).__init__()
EventManager.subscribe(self, 'onMouseDown')
Camera.current_camera.viewport = 512, 512
Camera.current_camera.position = 256, 256
w = h = 2**9
region = (0,0,w,h)
self.quadtree = QuadTree([], region)
self.add(QuadTreeVisualizer(self.quadtree))
def onMouseDown(self, event):
if event.button is 1:
p = DemoGame.camera_to_world(event.pos)
d = 2 ** random.randint(1,5)
self.quadtree.insert(RectComponent(p[0], p[1], d, d))
game = DemoGame((512, 512), NewScene)
game.run()
| 2.578125 | 3 |
migrations/versions/c1ca0249cb60_update_tiles_v0.1.0.py | mzaglia/bdc-db | 0 | 12792880 | """empty message
Revision ID: c1ca0249cb60
Revises: 0b986a10b559
Create Date: 2020-01-07 08:36:09.067866
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c1ca0249cb60'
down_revision = '0b986a10b559'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tiles', sa.Column('max_y', sa.Float(), nullable=True))
op.add_column('tiles', sa.Column('min_x', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tiles', 'min_x')
op.drop_column('tiles', 'max_y')
# ### end Alembic commands ###
| 1.34375 | 1 |
test/language/expressions/python/FullConstTypeTest.py | dkBrazz/zserio | 86 | 12792881 | import unittest
from testutils import getZserioApi
class FullConstTypeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "expressions.zs").full_const_type
def testBitSizeOfWithOptional(self):
fullConstTypeExpression = self.api.FullConstTypeExpression(self.FULL_VALID_VALUE,
self.FULL_ADDITIONAL_VALUE)
self.assertEqual(self.FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL,
fullConstTypeExpression.bitsizeof())
def testBitSizeOfWithoutOptional(self):
fullConstTypeExpression = self.api.FullConstTypeExpression()
fullConstTypeExpression.value = self.FULL_INVALID_VALUE
self.assertEqual(self.FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL,
fullConstTypeExpression.bitsizeof())
FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITH_OPTIONAL = 10
FULL_CONST_TYPE_EXPRESSION_BIT_SIZE_WITHOUT_OPTIONAL = 7
FULL_VALID_VALUE = 0x01
FULL_INVALID_VALUE = 0x00
FULL_ADDITIONAL_VALUE = 0x03
| 2.546875 | 3 |
api/src/opentrons/config/gripper_config.py | Opentrons/protocol_framework | 0 | 12792882 | from __future__ import annotations
from dataclasses import dataclass
import logging
from typing import Tuple, Optional
from typing_extensions import Literal
log = logging.getLogger(__name__)
GripperName = Literal["gripper"]
GripperModel = Literal["gripper_v1"]
DEFAULT_GRIPPER_CALIBRATION_OFFSET = [0.0, 0.0, 0.0]
@dataclass(frozen=True)
class GripperConfig:
gripper_offset: Tuple[float, float, float]
gripper_current: float
display_name: str
name: GripperName
max_travel: float
home_position: float
steps_per_mm: float
idle_current: float
model: GripperModel
DUMMY_GRIPPER_CONFIG = GripperConfig(
gripper_offset=(0.0, 0.0, 0.0),
gripper_current=1.0,
display_name="dummy_gripper",
name="gripper",
max_travel=50.0,
home_position=0.0,
steps_per_mm=480.0,
idle_current=0.2,
model="gripper_v1",
)
def load(
gripper_model: Optional[int] = None, gripper_id: Optional[int] = None
) -> GripperConfig:
return DUMMY_GRIPPER_CONFIG # TODO: load actual gripper config
| 2.484375 | 2 |
importer/management/commands/delete_categories.py | dragon-dxw/nhs-ei.website | 0 | 12792883 | <gh_stars>0
import sys
from django.core.management.base import BaseCommand
from cms.categories.models import Category
from cms.posts.models import Post
from cms.blogs.models import Blog
class Command(BaseCommand):
help = "Deletes categories (bulk action)"
def handle(self, *args, **options):
"""remove categories first"""
posts = Post.objects.all()
blogs = Blog.objects.all()
if posts or blogs:
sys.stdout.write(
"⚠️ Please delete posts and blogs before running this commend\n"
)
sys.exit()
categories = Category.objects.all()
if not categories.count():
sys.stdout.write("✅ Categories is empty\n")
else:
categories_length = len(categories)
sys.stdout.write("Categories to delete: {}\n".format(categories_length))
for category in categories:
sys.stdout.write("-")
category.delete()
categories_length -= 1
sys.stdout.write("\n✅ Complete\n")
| 2.1875 | 2 |
application/pages/dialog_template/__init__.py | slamer59/awesome-panel | 0 | 12792884 | <filename>application/pages/dialog_template/__init__.py
"""Provides a servable view of a Panel application with a dialog"""
from .app import view
| 1.382813 | 1 |
app/visualization.py | mateuszbaranczyk/portfolio | 0 | 12792885 | import matplotlib.pyplot as plt
import pandas as pd
from app.requester import ExchangeRateRequester
class Grapher:
def __init__(self) -> None:
self.exchange_rate_requester = ExchangeRateRequester()
def _create_historical_rates_df(self, transactions) -> pd.DataFrame:
assert transactions, "There are no transactions"
first_transaction = transactions[0].date
historical_rates_date = self.exchange_rate_requester.get_historical_bids(first_transaction)
historical_rates_df = pd.DataFrame(list(historical_rates_date.items()), columns=["date", "rate"])
return historical_rates_df
@staticmethod
def run_calculations(operations_df: pd.DataFrame) -> pd.DataFrame:
operations_df["transaction[+/-]"] = operations_df["transaction[+/-]"].fillna(0)
operations_df["portfolio_value"] = operations_df["transaction[+/-]"].cumsum()
operations_df["transaction_rate"] = operations_df["transaction_rate"].fillna(method="backfill")
operations_df["value_pln_temp"] = operations_df["portfolio_value"] * operations_df["rate"]
operations_df["value_pln_after_transaction"] = (
operations_df["portfolio_value"] * operations_df["transaction_rate"]
)
operations_df["profit"] = (
operations_df["value_pln_temp"] / operations_df["value_pln_after_transaction"] - 1
) * 100
return operations_df
def _create_operations_df(self, transactions: list) -> pd.DataFrame:
assert transactions, "There are no transactions"
operations_df = self._create_historical_rates_df(transactions)
transactions_df = pd.DataFrame(transactions, columns=["date", "transaction[+/-]", "transaction_rate"])
operations_df = pd.merge(operations_df, transactions_df, on="date", how="outer")
calculated_operations_df = self.run_calculations(operations_df)
return calculated_operations_df
def plot_historical_rates(self, historical_rates: pd.DataFrame) -> None:
historical_rates.plot(x="date", y="rate")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=45)
plt.ylabel("rate")
plt.title("Historical rates [PLN]")
plt.tight_layout()
plt.show()
def plot_portfolio_value_pln(self, operations: pd.DataFrame) -> None:
operations.plot(x="date", y="value_pln_temp")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=55)
plt.ylabel("value")
plt.title("Historical portfolio value [PLN]")
plt.tight_layout()
plt.show()
def plot_profit(self, operations: pd.DataFrame) -> None:
operations.plot(x="date", y="profit")
plt.grid()
plt.xlabel("date")
plt.xticks(rotation=45)
plt.ylabel("profit")
plt.title("Historical portfolio profit [%]")
plt.tight_layout()
plt.show()
| 2.828125 | 3 |
biorxiv/biorxiv_extractor.py | danich1/annorxiver | 4 | 12792886 | <reponame>danich1/annorxiver
import os
from pathlib import Path
import re
import subprocess
import tqdm
import pandas as pd
files = (
list(Path("Back_Content").rglob("*.meca"))
+
list(Path("Current_Content").rglob("*.meca"))
)
doc_file_hash_mapper = []
already_seen = set()
for file_name in tqdm.tqdm(files):
doc_hash = file_name.name
result = (
subprocess.Popen(
f"unzip -l {file_name}",
shell=True, stdout=subprocess.PIPE
)
.communicate()
)
match = re.search(r'content/([\d]+)\.xml', str(result[0]))
content_file_name = match.group(1)
version = 1
updated_file_name = f"{content_file_name}_v{version}"
while updated_file_name in already_seen:
version += 1
updated_file_name = f"{content_file_name}_v{version}"
already_seen.add(updated_file_name)
if match is None:
print(f"{file_name} did not match the file pattern [\d]+")
continue
doc_file_hash_mapper.append(
{
"hash": str(file_name),
"doc_number": f"{updated_file_name}.xml"
}
)
result = (
subprocess
.Popen(
f"unzip -jo {file_name} content/{content_file_name}.xml -d biorxiv_articles/.",
shell=True, stdout=subprocess.PIPE
)
.communicate()
)
rename_result = (
subprocess
.Popen(
f"mv biorxiv_articles/{content_file_name}.xml biorxiv_articles/{updated_file_name}.xml",
shell=True, stdout=subprocess.PIPE
)
.communicate()
)
(
pd.DataFrame
.from_records(doc_file_hash_mapper)
.to_csv("biorxiv_doc_hash_mapper.tsv", sep="\t", index=False)
)
| 2.28125 | 2 |
tantrum/workflows/__init__.py | lifehackjim/tantrum | 3 | 12792887 | # -*- coding: utf-8 -*-
"""Workflow encapsulation package for performing actions using the Tanium API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import time
from collections import OrderedDict
from . import exceptions
from .. import utils
from .. import results
class Workflow(object):
def __init__(self, adapter, obj, lvl="info", result=None):
"""Constructor.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
obj (:obj:`tantrum.api_models.ApiModel`):
API Object to use for this workflow.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
result (:obj:`tantrum.results.Result`, optional):
Result object that ``obj`` was generated from.
Defaults to: None.
"""
self._lvl = lvl
self.log = utils.logs.get_obj_log(obj=self, lvl=lvl)
self.obj = obj
self.adapter = adapter
self._result = result
self._last_result = result
def __repr__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
return self.__str__()
@property
def api_objects(self):
return self.adapter.api_objects
class Clients(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = ["count={}".format(len(self.obj))]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@staticmethod
def build_last_reg_filter(
adapter, last_reg=300, operator="greaterequal", not_flag=False, filters=None
):
"""Build a set of filters to be used in :meth:`Clients.get_all.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
last_reg (:obj:`int`, optional):
Only return clients that have registered in N number of seconds.
Defaults to: 300.
operator (:obj:`str`, optional):
Defines how the last_registered attribute of each client status is
compared against the value in last_reg.
Must be one of :data:`OPERATOR_MAP`.
Defaults to: "greaterequal".
not_flag (:obj:`int`, optional):
If True have the API return all rows that do not match the operator.
Defaults to: 1000.
filters (:obj:`object`, optional):
If a CacheFilterList object is supplied, the last_registration filter
generated by this method will be appended to it. If this is None,
a new CacheFilterList will be created with the last_registration filter
being the only item in it.
Defaults to: None.
Returns:
:obj:`Clients`
"""
op_dict = get_operator_map(operator)
now_dt = datetime.datetime.utcnow()
ago_td = datetime.timedelta(seconds=-(int(last_reg)))
ago_dt = now_dt + ago_td
ago_str = ago_dt.strftime(adapter.api_objects.module_dt)
cfilter = adapter.api_objects.CacheFilter(
field="last_registration",
type="Date",
operator=op_dict["op"],
not_flag=not_flag,
value=ago_str,
)
filters = filters or adapter.api_objects.CacheFilterList()
filters.append(cfilter)
return filters
@classmethod
def get_all_iter(
cls,
adapter,
filters=None,
sort_fields="last_registration",
page_size=1000,
max_page_count=0,
cache_expiration=600,
sleep=2,
lvl="info",
**kwargs
):
"""Get all Clients as an iterator.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
filters (:obj:`object`, optional):
Tantrum CacheFilterList returned from
:meth:`Clients.build_last_reg_filter`.
Defaults to: None.
sort_fields (:obj:`str`, optional):
Attribute of a ClientStatus object to have API sort the return on.
Defaults to: "last_registration".
page_size (:obj:`int`, optional):
Get N number of clients at a time from the API.
If 0, disables paging and gets all clients in one call.
Defaults to: 1000.
max_page_count (:obj:`int`, optional):
Only fetch up to this many pages. If 0, get all pages.
Defaults to: 0.
cache_expiration (:obj:`int`, optional):
When page_size is not 0, have the API keep the cache of clients
for this many seconds before expiring the cache.
Defaults to: 600.
sleep (:obj:`int`, optional):
Wait N seconds between fetching each page.
Defaults to: 2.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get`.
Yields:
:obj:`tantrum.api_objects.ApiObjects`: ClientStatus API object
"""
log = utils.logs.get_obj_log(obj=cls, lvl=lvl)
get_args = {}
get_args.update(kwargs)
get_args["cache_sort_fields"] = sort_fields
get_args["obj"] = adapter.api_objects.ClientStatus()
row_start = 0
row_count = page_size
if filters is not None:
get_args["cache_filters"] = filters
if page_size:
get_args["row_start"] = row_start
get_args["row_count"] = row_count
get_args["cache_expiration"] = cache_expiration
result = adapter.cmd_get(**get_args)
result_obj = result()
received_rows = len(result_obj)
result_cache = getattr(result_obj, "cache_info", None)
total_rows = getattr(result_cache, "filtered_row_count", 0)
cache_id = getattr(result_cache, "cache_id", None)
get_args["cache_id"] = cache_id
page_count = 1
m = "Received initial page length={len}, cache_info={cache!r}"
m = m.format(len=received_rows, cache=result_cache)
log.info(m)
for obj in result_obj:
yield obj
if page_size:
paging_get_args = {k: v for k, v in get_args.items()}
while True:
if max_page_count and page_count >= max_page_count:
m = "Reached max page count {c}, considering all clients fetched"
m = m.format(c=max_page_count)
log.info(m)
break
if received_rows >= total_rows:
m = "Reached total rows count {c}, considering all clients fetched"
m = m.format(c=total_rows)
log.info(m)
break
page_count += 1
row_start += row_count
paging_get_args["row_start"] = row_start
paging_result = adapter.cmd_get(**paging_get_args)
log.debug(result.pretty_bodies())
paging_result_obj = paging_result()
page_rows = len(paging_result_obj)
received_rows += page_rows
m = [
"Received page_rows={page_rows}",
"received_rows={received_rows}",
"total_rows={total_rows}",
]
m = ", ".join(m)
m = m.format(
page_rows=page_rows,
received_rows=received_rows,
total_rows=total_rows,
)
log.info(m)
for obj in paging_result_obj:
yield obj
time.sleep(sleep)
@classmethod
def get_all(cls, adapter, **kwargs):
"""Get all Clients.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
**kwargs:
rest of kwargs:
Passed to :meth:`Clients.get_all_iter`.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: SystemStatusList API object
"""
obj = adapter.api_objects.SystemStatusList()
for client_obj in cls.get_all_iter(adapter=adapter, **kwargs):
obj.append(client_obj)
return obj
class Sensor(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = [
"name={!r}".format(self.obj.name),
"filter={}".format(", ".join(self.filter_vals)),
]
if self.params_defined or self.param_values:
bits += [
"params_defined={}".format(list(self.params_defined.keys())),
"param_values={}".format(list(self.param_values.items())),
]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@classmethod
def get_by_name(cls, adapter, name, lvl="info"):
"""Get a sensor object by name.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
name (:obj:`str`):
Name of sensor to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Sensor`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Sensor(name=name))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
@classmethod
def get_by_id(cls, adapter, id, lvl="info"):
"""Get a sensor object by id.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
id (:obj:`int`):
id of sensor to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Sensor`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Sensor(id=id))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
@property
def params_defined(self):
"""Get the parameter definitions for this sensor.
Notes:
Will try to resolve a default value and store it in "derived_default" key
for each parameter definition returned.
Returns:
:obj:`collections.OrderedDict`
"""
param_defs = json.loads(self.obj.parameter_definition or "{}")
params = param_defs.get("parameters", [])
for p in params:
pdef = p.get("defaultValue", "")
pval = p.get("value", "")
pvals = p.get("values", [])
if pdef not in ["", None]:
derived_default = pdef
elif pval not in ["", None]:
derived_default = pval
elif pvals:
derived_default = pvals[0]
else:
derived_default = ""
p["derived_default"] = derived_default
return OrderedDict((p["key"], p) for p in params)
@property
def param_values(self):
"""Get all of the parameter key and values.
Returns:
:obj:`OrderedDict`
"""
ret = OrderedDict()
for k in self.params_defined:
ret[k] = ""
for p in self.params:
ret[p.key] = p.value
return ret
@property
def params(self):
"""Get the parameters that are set for this sensor.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: ParameterList API object
"""
if not hasattr(self, "_params"):
self._params = self.api_objects.ParameterList()
return self._params
def set_parameter(
self, key, value="", derive_default=True, delim="||", allow_undefined=True
):
"""Set a parameters value for this sensor.
Args:
key (:obj:`str`):
Key name of parameter to set.
value (:obj:`str`, optional):
Value of parameter to set.
Defaults to: "".
derive_default (:obj:`bool`, optional):
Get default value from parameter definition if value is "".
Defaults to: True.
delim (:obj:`str`, optional):
String to put before and after parameter key name when sending to API.
Defaults to: "||".
allow_undefined (:obj:`bool`, optional):
Allow parameter keys that are not in the parameters definition
for this sensor to be set.
Throws exception if False and key not in :attr:`Sensor.param_keys`.
Defaults to: True.
"""
param_def = self.params_defined.get(key, None)
if param_def is None:
m = "Parameter key {o!r} is not one of the defined parameters {ov}"
m = m.format(o=key, ov=list(self.params_defined.keys()))
if allow_undefined:
self.log.info(m)
else:
raise exceptions.ModuleError(m)
elif derive_default and value == "":
value = param_def.get("derived_default", "")
key_delim = "{d}{key}{d}".format(d=delim, key=key)
param = self.api_objects.Parameter(key=key_delim, value=value)
self.params.append(param)
@property
def filter(self):
"""Get the filter for this sensor.
Returns:
:obj:`tantrum.api_objects.ApiObjects`: Filter API object
"""
if not hasattr(self, "_filter"):
self._filter = self.api_objects.Filter()
self._filter.sensor = self.api_objects.Sensor()
self._filter.sensor.hash = self.obj.hash
return self._filter
@property
def filter_vals(self):
"""Get the key value pairs of the filter for this sensor.
Returns:
:obj:`list` of :obj:`str`
"""
if any([self.filter.value, self.filter.operator]):
keys = [
"operator",
"value",
"ignore_case_flag",
"not_flag",
"all_values_flag",
"max_age_seconds",
"value_type",
]
vals = ["{}: {!r}".format(k, getattr(self.filter, k)) for k in keys]
else:
vals = []
return vals
def set_filter(
self,
value,
operator="regex",
ignore_case_flag=True,
not_flag=False,
all_values_flag=False,
max_age_seconds=0,
type=None,
):
"""Set a filter for this sensor to be used in a question.
Args:
value (:obj:`str`):
Filter sensor rows returned on this value.
operator (:obj:`str`, optional):
Operator to use for filter_value.
Must be one of :data:`OPERATOR_MAP`.
Defaults to: "regex".
ignore_case_flag (:obj:`bool`, optional):
Ignore case when filtering on value.
Defaults to: True.
not_flag (:obj:`bool`, optional):
If set, negate the match.
Defaults to: False.
max_age_seconds (:obj:`int`, optional):
How old a sensor result can be before we consider it invalid.
0 means to use the max age property of the sensor.
Defaults to: 0.
all_values_flag (:obj:`bool`, optional):
Have filter match all values instead of any value.
Defaults to: False.
type (:obj:`str`, optional):
Have filter consider the value type as this.
Must be one of :data:`TYPE_MAP`
Defaults to: None.
"""
op_dict = get_operator_map(operator)
if type:
get_type_map(type)
self.filter.value = op_dict["tmpl"].format(value=value)
self.filter.operator = op_dict["op"]
self.filter.ignore_case_flag = ignore_case_flag
self.filter.not_flag = not_flag
self.filter.all_values_flag = all_values_flag
self.filter.max_age_seconds = max_age_seconds
self.filter.value_type = type
def build_select(self, set_param_defaults=True, allow_empty_params=False):
select = self.api_objects.Select()
select.filter = self.filter
select.sensor = self.api_objects.Sensor()
for key in self.params_defined:
if key not in self.param_values and set_param_defaults:
self.set_parameter(key=key, derive_default=True)
for param in self.params:
if param.value in ["", None] and not allow_empty_params:
m = "Parameter {p.key!r} value {p.value!r} is empty, definition: {d}"
m = m.format(p=param, d=self.params_defined.get(key, None))
raise exceptions.ModuleError(m)
if self.params:
select.sensor.parameters = self.params
select.sensor.source_id = self.obj.id
select.filter.sensor.id = self.obj.id
else:
select.sensor.hash = self.obj.hash
select.WORKFLOW = self
return select
class Question(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
atmpl = "{k}='{v}'".format
attrs = ["id", "query_text"]
bits = [atmpl(k=attr, v=getattr(self.obj, attr, None)) for attr in attrs]
bits += [atmpl(k=k, v=v) for k, v in self.expiration.items()]
bits = "(\n {},\n)".format(",\n ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@classmethod
def new(cls, adapter, lvl="info"):
"""Create a new Question workflow.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Question`
"""
return cls(obj=adapter.api_objects.Question(), adapter=adapter, lvl=lvl)
@classmethod
def get_by_id(cls, adapter, id, lvl="info"):
"""Get a question object by id.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
id (:obj:`int`):
id of question to fetch.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
Returns:
:obj:`Question`
"""
result = adapter.cmd_get(obj=adapter.api_objects.Question(id=id))
return cls(adapter=adapter, obj=result(), lvl=lvl, result=result)
def _check_id(self):
"""Check that question has been asked by seeing if self.obj.id is set."""
if not self.obj.id:
m = "No id issued yet, ask the question!"
raise exceptions.ModuleError(m)
@property
def expiration(self):
"""Get expiration details for this question.
Returns:
:obj:`dict`
"""
now_dt = datetime.datetime.utcnow()
now_td = datetime.timedelta()
ret = {
"expiration": now_dt,
"expire_in": now_td,
"expire_ago": now_td,
"expired": True,
}
if self.obj.expiration:
ex_dt = self.api_objects.module_dt_format(self.obj.expiration)
is_ex = now_dt >= ex_dt
ret["expiration"] = ex_dt
ret["expired"] = is_ex
if is_ex:
ret["expire_ago"] = now_dt - ex_dt
else:
ret["expire_in"] = ex_dt - now_dt
return ret
def refetch(self):
"""Re-fetch this question."""
self._check_id()
result = self.adapter.cmd_get(obj=self.obj)
self._last_result = result
self.obj = result()
def ask(self, **kwargs):
"""Ask the question.
Args:
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_add`.
Notes:
If question has already been asked (id is set), we wipe out attrs:
["id", "context_group", "management_rights_group"], then add it.
"""
if self.obj.id:
wipe_attrs = ["id", "context_group", "management_rights_group"]
for attr in wipe_attrs:
setattr(self.obj, attr, None)
result = self.adapter.cmd_add(obj=self.obj, **kwargs)
self._last_result = result
self.obj = result()
self.refetch()
def add_left_sensor(
self, sensor, set_param_defaults=True, allow_empty_params=False
):
"""Add a sensor to the left hand side of the question.
Args:
sensor (:obj:`Sensor`):
Sensor workflow object.
set_param_defaults (:obj:`bool`, optional):
If sensor has parameters defined, and no value is set,
try to derive the default value from each parameters definition.
Defaults to: True.
allow_empty_params (:obj:`bool`, optional):
If sensor has parameters defined, and the value is not set, "", or None,
throw an exception.
Defaults to: True.
"""
select = sensor.build_select(
set_param_defaults=set_param_defaults, allow_empty_params=allow_empty_params
)
if not getattr(self.obj, "selects", None):
self.obj.selects = self.api_objects.SelectList()
self.obj.selects.append(select)
def answers_get_info(self, **kwargs):
"""Return the ResultInfo for this question.
Args:
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_info`.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultInfoList API Object
"""
self._check_id()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
result = self.adapter.cmd_get_result_info(**cmd_args)
self._last_result = result
infos = result()
self._last_infos = infos
m = "Received answers info: {infos}"
m = m.format(infos=infos.serialize())
self.log.debug(m)
self.log.debug(format(self))
return infos
def answers_poll(
self,
poll_pct=99,
poll_secs=0,
poll_total=0,
poll_sleep=5,
max_poll_count=0,
**kwargs
):
"""Poll for answers from clients for this question.
Args:
poll_sleep (:obj:`int`, optional):
Check for answers every N seconds.
Defaults to: 5.
poll_pct (:obj:`int`, optional):
Wait until the percentage of clients total is N percent.
Defaults to: 99.
poll_secs (:obj:`int`, optional):
If not 0, wait until N seconds for pct of clients total instead of
until question expiration.
Defaults to: 0.
poll_total (:obj:`int`, optional):
If not 0, wait until N clients have total instead of
``estimated_total`` of clients from API.
Defaults to: 0.
max_poll_count (:obj:`int`, optional):
If not 0, only poll N times.
Defaults to: 0.
**kwargs:
rest of kwargs:
Passed to :meth:`answers_get_info`.
Returns:
:obj:`object`: ResultInfoList API object
"""
# TODO: Add wait till error_count / no_results_count == 0
self._check_id()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
start = datetime.datetime.utcnow()
if poll_secs:
stop_dt = start + datetime.timedelta(seconds=poll_secs)
else:
stop_dt = self.expiration["expiration"]
m = "Start polling loop for answers until for {o} until {stop_dt}"
m = m.format(o=self, stop_dt=stop_dt)
self.log.debug(m)
infos = self.answers_get_info(**kwargs)
info = infos[0]
est_total = info.estimated_total
poll_total = est_total
if poll_total and poll_total <= est_total:
this_total = poll_total
now_pct = utils.tools.calc_percent(part=info.mr_passed, whole=this_total)
poll_count = 0
while True:
poll_count += 1
m = "New polling loop #{c} for {o}"
m = m.format(c=poll_count, o=self)
self.log.debug(m)
if now_pct >= poll_pct:
m = "Reached {now_pct} out of {pct}, considering all answers in"
m = m.format(now_pct=PCT_FMT(now_pct), pct=PCT_FMT(poll_pct))
self.log.info(m)
break
if datetime.datetime.utcnow() >= stop_dt:
m = "Reached stop_dt {stop_dt}, considering all answers in"
m = m.format(stop_dt=stop_dt)
self.log.info(m)
break
if self.expiration["expired"]:
m = "Reached expiration {expiration}, considering all answers in"
m = m.format(expiration=self.expiration)
self.log.info(m)
break
if max_poll_count and poll_count >= max_poll_count:
m = "Reached max poll count {c}, considering all answers in"
m = m.format(c=max_poll_count)
self.log.info(m)
break
infos = self.answers_get_info(**kwargs)
info = infos[0]
now_pct = utils.tools.calc_percent(part=info.mr_passed, whole=this_total)
m = [
"Answers in {now_pct} out of {pct}",
"{info.mr_passed} out of {this_total}",
"estimated_total: {info.estimated_total}",
"poll count: {c}",
]
m = ", ".join(m)
m = m.format(
now_pct=PCT_FMT(now_pct),
pct=PCT_FMT(poll_pct),
info=info,
this_total=this_total,
c=poll_count,
)
self.log.info(m)
time.sleep(poll_sleep)
end = datetime.datetime.utcnow()
elapsed = end - start
m = [
"Finished polling in: {dt}",
"clients answered: {info.mr_passed}",
"estimated clients: {info.estimated_total}",
"rows in answers: {info.row_count}",
"poll count: {c}",
]
m = ", ".join(m)
m = m.format(dt=elapsed, info=info, c=poll_count)
self.log.info(m)
return infos
def answers_get_data(self, hashes=False, **kwargs):
"""Get the answers for this question.
Args:
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values.
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Notes:
This will not use any paging, which means ALL answers will be returned
in one API response. For large data sets of answers, this is unwise.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultDataList API Object
"""
self._check_id()
start = datetime.datetime.utcnow()
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished getting answers in {dt}"
m = m.format(dt=elapsed)
self.log.info(m)
datas = result()
self._last_datas = datas
return datas
def answers_get_data_paged(
self,
page_size=1000,
max_page_count=0,
max_row_count=0,
cache_expiration=900,
hashes=False,
sleep=5,
**kwargs
):
"""Get the answers for this question one page at a time.
Args:
page_size (:obj:`int`, optional):
Size of each page to fetch at a time.
Defaults to: 1000.
max_page_count (:obj:`int`, optional):
Only fetch up to this many pages. If 0, get all pages.
Defaults to: 0.
max_row_count (:obj:`int`, optional):
Only fetch up to this many rows.
Defaults to: 0.
cache_expiration (:obj:`int`, optional):
Have the API keep the cache_id that is created on initial get
answers page alive for N seconds.
Defaults to: 900.
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
sleep (:obj:`int`, optional):
Wait N seconds between fetching each page.
Defaults to: 5.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Notes:
If max_page_count and max_row_count are 0, fetch pages until a page
returns no answers or the expected row count is hit.
Returns:
:obj:`tantrum.api_models.ApiModel`: ResultDataList API Object
"""
self._check_id()
start = datetime.datetime.utcnow()
row_start = 0
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["row_start"] = row_start
cmd_args["row_count"] = page_size
cmd_args["cache_expiration"] = cache_expiration
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
datas = result()
self._last_datas = datas
data = datas[0]
cmd_args["cache_id"] = data.cache_id
cmd_args["row_start"] += page_size
m = [
"Received initial answers: {d.rows}",
"expected row_count: {d.row_count}",
"estimated total clients: {d.estimated_total}",
]
m = ", ".join(m)
m = m.format(d=data)
self.log.info(m)
all_rows = data.rows
page_count = 1
page_rows = all_rows
while True:
if len(all_rows or []) >= data.row_count:
m = "Received expected row_count {c}, considering all answers received"
m = m.format(c=data.row_count)
self.log.info(m)
break
if not page_rows:
m = "Received a page with no answers, considering all answers received"
self.log.info(m)
break
if max_page_count and page_count >= max_page_count:
m = "Reached max page count {c}, considering all answers in"
m = m.format(c=max_page_count)
self.log.info(m)
break
if max_row_count and len(all_rows or []) >= max_row_count:
m = "Hit max pages of {max_row_count}, considering all answers received"
m = m.format(max_row_count=max_row_count)
self.log.info(m)
page_count += 1
page_result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = page_result
# this should catch errors where API returns result data as None sometimes
# need to refetch data for N retries if that happens
page_datas = page_result()
self._last_datas = page_datas
page_data = page_datas[0]
page_rows = page_data.rows
m = "Received page #{c} answers: {rows}"
m = m.format(c=page_count, rows=len(page_rows or []))
self.log.info(m)
all_rows += page_rows
cmd_args["row_start"] += page_size
time.sleep(sleep)
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished getting {rows} answers in {dt}"
m = m.format(rows=len(all_rows or []), dt=elapsed)
self.log.info(m)
return datas
def answers_sse_start_xml(self, hashes=False, **kwargs):
"""Start up a server side export for XML format and get an export_id.
Args:
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 1
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for XML format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for XML format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_start_csv(
self, flatten=False, headers=True, hashes=False, **kwargs
):
"""Start up a server side export for CSV format and get an export_id.
Args:
flatten (:obj:`bool`, optional):
Flatten CSV rows if possible (single line in each cell)
Defaults to: False.
headers (:obj:`bool`, optional):
Include column headers.
Defaults to: True.
hashes (:obj:`bool`, optional):
Have the API include the hashes of rows values
Defaults to: False.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 3 if flatten else 0
cmd_args["export_hide_csv_header_flag"] = False if headers else True
cmd_args["include_hashes_flag"] = hashes
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for CSV format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for CSV format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_start_cef(self, leading="", trailing="", **kwargs):
"""Start up a server side export for CEF format and get an export_id.
Args:
leading (:obj:`str`, optional):
Prepend this text to each line.
Defaults to: "".
trailing (:obj:`str`, optional):
Append this text to each line.
Defaults to: "".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_get_result_data`.
Returns:
:obj:`str`:
"""
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = self.obj
cmd_args["export_flag"] = True
cmd_args["export_format"] = 2
if leading:
cmd_args["export_leading_text"] = leading
if trailing:
cmd_args["export_trailing_text"] = trailing
result = self.adapter.cmd_get_result_data(**cmd_args)
self._last_result = result
m = ["Received Server Side Export start response for CEF format", "code={c}"]
m = ", ".join(m)
m = m.format(c=result.status_code)
self.log.debug(m)
export_id = result.object_obj["export_id"]
m = ["Started Server Side for CEF format", "export_id={e!r}"]
m = ", ".join(m)
m = m.format(e=export_id)
self.log.info(m)
return export_id
def answers_sse_get_status(self, export_id, **kwargs):
"""Get the status for this questions server side export.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`sse_start`.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapters.ApiClient`.
Returns:
:obj:`dict`:
"""
client_args = {}
client_args.update(kwargs)
client_args["method"] = "get"
client_args["path"] = "export/{export_id}.status".format(export_id=export_id)
client_args["data"] = ""
r = self.adapter.api_client(**client_args)
status_split = [x.strip().lower() for x in r.text.split(".") if x.strip()]
status = dict(zip(["status", "progress"], status_split))
status["export_id"] = export_id
m = [
"Received SSE status response: path={r.request.url!r}",
"code={r.status_code}",
"status={status}",
]
m = ", ".join(m)
m = m.format(r=r, status=status)
self.log.debug(m)
return status
def answers_sse_poll(self, export_id, poll_sleep=5, max_poll_count=0, **kwargs):
"""Poll a server side export for completion.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`answers_sse_start_xml` or
:meth:`answers_sse_start_csv` or :meth:`answers_sse_start_cef`.
poll_sleep (:obj:`int`, optional):
Check for answers every N seconds.
Defaults to: 5.
max_poll_count (:obj:`int`, optional):
If not 0, only poll N times.
Defaults to: 0.
**kwargs:
rest of kwargs:
Passed to :meth:`answers_sse_get_status`.
Returns:
:obj:`str`:
"""
self._check_id()
start = datetime.datetime.utcnow()
poll_count = 0
sse_args = {}
sse_args.update(kwargs)
sse_args["export_id"] = export_id
status = self.answers_sse_get_status(**sse_args)
while True:
poll_count += 1
if max_poll_count and poll_count >= max_poll_count:
m = [
"Server Side Export completed",
"reached max poll count {c}",
"status {status}",
]
m = ", ".join(m)
m = m.format(c=max_poll_count, status=status)
self.log.info(m)
break
if status["status"] == "completed":
m = "Server Side Export completed: {status}"
m = m.format(status=status)
self.log.info(m)
break
if status["status"] == "failed":
m = "Server Side Export failed: {status}"
m = m.format(status=status)
raise exceptions.ModuleError(m)
time.sleep(poll_sleep)
status = self.answers_sse_get_status(**sse_args)
end = datetime.datetime.utcnow()
elapsed = end - start
m = "Finished polling for Server Side Export in {dt}, {status}"
m = m.format(dt=elapsed, status=status)
self.log.info(m)
return status
def answers_sse_get_data(
self, export_id, return_dict=False, return_obj=True, **kwargs
):
"""Get the answers for this question in XML format using server side export.
Args:
export_id (:obj:`str`):
An export id returned from :meth:`sse_start`.
return_dict (:obj:`bool`, optional):
If export_id is an XML format, return a dictionary object.
Defaults to: False.
return_obj (:obj:`bool`, optional):
If export_id is XML format, return a ResultSet object.
Defaults to: True.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapters.ApiClient`.
Notes:
If export_id is not XML format or return_dict and return_obj False,
return the raw text as is.
Returns:
:obj:`tantrum.api_models.ApiModel` or :obj:`dict` or :obj:`str`:
If return_obj = True returns ResultSetList ApiModel object.
If return_dict = True returns dict.
Otherwise, return str.
"""
self._check_id()
client_args = {}
client_args.update(kwargs)
client_args["method"] = "get"
client_args["path"] = "export/{export_id}.gz".format(export_id=export_id)
client_args["data"] = ""
r = self.adapter.api_client(**client_args)
m = ["Received SSE data response", "code: {r.status_code}", "export_id: {e!r}"]
m = ", ".join(m)
m = m.format(r=r, e=export_id)
self.log.info(m)
data = r.text
if "xml" in export_id and (return_dict or return_obj):
result = results.Soap(
api_objects=self.api_objects,
response_body=r.text,
request_body=r.request.body,
method=r.request.method,
url=r.request.url,
status_code=r.status_code,
origin=r,
lvl=self.log.level,
)
data = "<{r}>{data}</{r}>".format(data=data, r="result_set")
src = "SSE get data response"
data = result.str_to_obj(text=data, src=src, try_int=False)
if return_dict:
return data
data = self.api_objects.ResultSet(**data["result_set"])
data = self.api_objects.ResultSetList(*[data])
return data
return data
class ParsedQuestion(Workflow):
def __str__(self):
"""Show object info.
Returns:
(:obj:`str`)
"""
ctmpl = "{c.__module__}.{c.__name__}".format
bits = [
"parse matches: {c}".format(c=len(self.obj)),
"has exact match: {em}".format(em=True if self.get_canonical else False),
]
bits = "({})".format(", ".join(bits))
cls = ctmpl(c=self.__class__)
return "{cls}{bits}".format(cls=cls, bits=bits)
@property
def get_canonical(self):
"""Return any parse result that is an exact match."""
for x in self.obj:
if x.question.from_canonical_text:
return x
return None
def map_select_params(self, pq):
"""Map parameters to sensors on the left hand side of the question."""
param_cls = self.api_objects.Parameter
param_values = pq.parameter_values
selects = pq.question.selects or []
for select in selects:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
sensor = select.sensor
if not sensor.parameter_definition:
m = "No parameters defined on sensor {s}, going to next"
m = m.format(s=sensor)
self.log.debug(m)
continue
sensor.source_id = sensor.id
sensor.id = None
sensor.parameters = self.api_objects.ParameterList()
params = json.loads(sensor.parameter_definition)["parameters"]
for param in params:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
key = "||{}||".format(param["key"])
value = param_values.pop(0)
sensor.parameters.append(param_cls(key=key, value=value))
m = "Mapped parameter {k!r}='{v}' for {s}"
m = m.format(k=key, v=value, s=sensor)
self.log.debug(m)
def map_group_params(self, pq, group):
"""Map parameters to filters on the right hand side of the question."""
param_cls = self.api_objects.Parameter
group_sensors = pq.question_group_sensors
param_values = pq.parameter_values
if not group:
m = "Empty group, not mapping group params"
self.log.debug(m)
return
if not group_sensors:
m = "No question group sensors defined, not mapping group params"
self.log.debug(m)
return
for group_filter in group.filters or []:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
m = "Now mapping parameters for group filter: {gf}"
m = m.format(gf=group_filter)
self.log.debug(m)
sensor_id = group_filter.sensor.id
sensor = [x for x in group_sensors if x.id == sensor_id][0]
if not sensor.parameter_definition:
m = "No parameters defined on sensor {s}, going to next"
m = m.format(s=sensor)
self.log.debug(m)
continue
sensor.source_id = sensor.id
sensor.id = None
sensor.parameters = self.api_objects.ParameterList()
params = json.loads(sensor.parameter_definition)["parameters"]
for param in params:
if not param_values:
m = "No more parameter values left to map"
self.log.debug(m)
return
key = "||{}||".format(param["key"])
value = param_values.pop(0)
sensor.parameters.append(param_cls(key=key, value=value))
m = "Mapped parameter {k!r}='{v}' for {s}"
m = m.format(k=key, v=value, s=sensor)
self.log.debug(m)
group_filter.sensor = sensor
for sub_group in group.sub_groups or []:
self.map_group_params(pq, sub_group)
@property
def result_indexes(self):
"""Get the parse result indices in str form."""
pq_tmpl = " index: {idx}, result: {text!r}, params: {params}, exact: {exact}"
pq_tmpl = pq_tmpl.format
pq_list = []
for idx, pq in enumerate(self.obj):
pq_txt = pq_tmpl(
idx=idx,
text=pq.question_text,
params=list(pq.parameter_values or []),
exact=bool(pq.question.from_canonical_text),
)
pq_list.append(pq_txt)
return "\n".join(pq_list)
def pick(self, index=None, use_exact_match=True, use_first=False, **kwargs):
"""Pick a parse result and ask it.
Args:
index (:obj:`int`, optional):
Index of parse result to ask.
Defaults to: None.
use_exact_match (:obj:`bool`, optional):
If index is None and one of the parse results is an exact match,
pick and ask it.
Defaults to: True.
use_first (:obj:`bool`, optional):
If index is None and there is no exact match,
pick the first parse result and ask it.
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_add_parsed_question`.
Returns:
:obj:`Question`
"""
if index:
pq = self.obj[index]
m = "Picking parsed question based on index {index}: {pq.question}"
m = m.format(index=index, pq=pq)
self.log.info(m)
elif use_exact_match and self.get_canonical:
pq = self.get_canonical
m = "Picking parsed question based on exact match: {pq.question}"
m = m.format(pq=pq)
self.log.info(m)
elif use_first:
pq = self.obj[0]
m = "Picking first matching parsed question: {pq.question}"
m = m.format(pq=pq)
self.log.info(m)
else:
err = [
"No index supplied",
"no exact matching parsed result",
"and use_first is False!",
]
err = ", ".join(err)
err = [err, "Supply an index of a parsed result:", self.result_indexes]
err = "\n".join(err)
raise exceptions.ModuleError(err)
self.map_select_params(pq=pq)
m = "Finished mapping parameters for selects, parameter values left: {pv!r}"
m = m.format(pv=pq.parameter_values)
self.log.debug(m)
self.map_group_params(pq=pq, group=pq.question.group)
m = "Finished mapping parameters for groups, parameter values left: {pv!r}"
m = m.format(pv=pq.parameter_values)
self.log.debug(m)
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["obj"] = pq
result = self.adapter.cmd_add_parsed_question(**cmd_args)
result_obj = result()
workflow = Question(
adapter=self.adapter, obj=result_obj, lvl=self.log.level, result=result
)
m = "Added parsed question: {w}"
m = m.format(w=workflow)
self.log.info(m)
workflow.refetch()
return workflow
@classmethod
def parse(cls, adapter, text, lvl="info", **kwargs):
"""Get parse results of text from API.
Args:
adapter (:obj:`tantrum.adapters.Adapter`):
Adapter to use for this workflow.
text (:obj:`str`):
Question text to parse.
lvl (:obj:`str`, optional):
Logging level.
Defaults to: "info".
**kwargs:
rest of kwargs:
Passed to :meth:`tantrum.adapter.Adapter.cmd_parse_question`.
Returns:
:obj:`ParsedQuestion`
"""
log = utils.logs.get_obj_log(obj=cls, lvl=lvl)
cmd_args = {}
cmd_args.update(kwargs)
cmd_args["text"] = text
result = adapter.cmd_parse_question(**cmd_args)
result_obj = result()
if result_obj is None:
m = "No parse results returned for text: {t!r}"
m = m.format(t=text)
raise exceptions.ModuleError(m)
any_canonical = any([x.question.from_canonical_text for x in result_obj])
m = "Received {n} parse results (any exact match: {ac})"
m = m.format(n=len(result_obj), ac=any_canonical)
log.info(m)
return cls(adapter=adapter, obj=result_obj, lvl=lvl, result=result)
OPERATOR_MAP = {
"less": {"op": "Less", "tmpl": "{value}"},
"lessequal": {"op": "LessEqual", "tmpl": "{value}"},
"greater": {"op": "Greater", "tmpl": "{value}"},
"greaterequal": {"op": "GreaterEqual", "tmpl": "{value}"},
"equal": {"op": "Equal", "tmpl": "{value}"},
"regex": {"op": "RegexMatch", "tmpl": "{value}"},
"startswith": {"op": "RegexMatch", "tmpl": ".*{value}"},
"endswith": {"op": "RegexMatch", "tmpl": "{value}.*"},
"contains": {"op": "RegexMatch", "tmpl": ".*{value}.*"},
"hash": {"op": "HashMatch", "tmpl": "{value}"},
}
TYPE_MAP = {
"Hash": 0,
# SENSOR_RESULT_TYPE_STRING
"String": 1,
# SENSOR_RESULT_TYPE_VERSION
"Version": 2,
# SENSOR_RESULT_TYPE_NUMERIC
"NumericDecimal": 3,
# SENSOR_RESULT_TYPE_DATE_BES
"BESDate": 4,
# SENSOR_RESULT_TYPE_IPADDRESS
"IPAddress": 5,
# SENSOR_RESULT_TYPE_DATE_WMI
"WMIDate": 6,
# e.g. "2 years, 3 months, 18 days, 4 hours, 22 minutes:
# 'TimeDiff', and 3.67 seconds" or "4.2 hours"
# (numeric + "Y|MO|W|D|H|M|S" units)
"TimeDiff": 7,
# e.g. 125MB or 23K or 34.2Gig (numeric + B|K|M|G|T units)
"DataSize": 8,
"NumericInteger": 9,
"VariousDate": 10,
"RegexMatch": 11,
"LastOperatorType": 12,
}
PCT_FMT = "{0:.0f}%".format
def get_operator_map(operator):
"""Validate operator against :data:`OPERATOR_MAP`."""
if operator in OPERATOR_MAP:
return OPERATOR_MAP[operator]
m = "Operator {o!r} is invalid, must be one of {vo}"
m = m.format(o=operator, vo=list(OPERATOR_MAP.keys()))
raise exceptions.ModuleError(m)
def get_type_map(type):
"""Validate type against :data:`TYPE_MAP`."""
if type in TYPE_MAP:
return TYPE_MAP[type]
m = "Type {o!r} is invalid, must be one of {vo}"
m = m.format(o=type, vo=list(TYPE_MAP.keys()))
raise exceptions.ModuleError(m)
| 2.359375 | 2 |
vision/visualization.py | yihui-he2020/epipolar-transformers | 360 | 12792888 | import os.path, sys, re, cv2, glob, numpy as np
import os.path as osp
from tqdm import tqdm
from IPython import embed
import scipy
import matplotlib.pyplot as plt
from skimage.transform import resize
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import auc
from matplotlib.patches import Circle
import torch
# from .ipv_vis import *
from vision.triangulation import triangulate
from vision.multiview import pix2coord, coord2pix
from core import cfg
from vision.multiview import de_normalize
from vision.visualizer_human import draw_2d_pose
from vision.visualizer_hand import plot_hand_3d
class Cursor(object):
def __init__(self, sample_ax, draw_ax):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.sample_ax.imshow(ref_img)
a, b, heatmap = heatmapat(x, y, weights[0])
im1= self.draw_ax[1].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[1].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[1])
im2= self.draw_ax[2].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[2].set_title("%f~%f" % (a, b))
a, b, heatmap = heatmapat(x, y, weights[2])
im3= self.draw_ax[3].imshow(heatmap, cmap=cmap.hot)
self.draw_ax[3].set_title("%f~%f" % (a, b))
# fig.colorbar(im2, ax=axs[0, 1])
circ = Circle((x, y),2,color='r')
axs[0, 0].add_patch(circ)
plt.show()
class Cursor_for_epipolar_line(object):
def __init__(self, sample_ax, draw_ax, sample_locs, H, W, axs, img2, outs):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.sample_locs = sample_locs
self.H = H
self.W = W
self.axs = axs
self.img2 = img2
self.outs = outs
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
# pr_cost_volume = self.depth[:, int(y), int(x)]
# cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
# xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f' % (x, y))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
self.axs[1, 0].clear()
self.axs[1, 0].imshow(self.img2)
inty, intx = int(y+0.5), int(x+0.5)
print(self.sample_locs[:, inty, intx])
_, _, _, debugsample_locs, intersections, mask, valid_intersections, start, vec = self.outs
print(intx, inty)
print('debugsample_locs', debugsample_locs[:, 0, inty, intx])
print('intersections', intersections.view(-1, 64, 64, 4, 2)[0, inty, intx])
print('mask', mask.view(-1, 64, 64, 4)[0, inty, intx])
print('valid_intersections', valid_intersections.view(-1, 64, 64, 2, 2)[0, inty, intx])
print('start', start.view(-1, 64, 64, 2)[0, inty, intx])
print('vec', vec.view(-1, 64, 64, 2)[0, inty, intx])
for i in range(64):
# pos = self.sample_locs[i][int(y+0.5)][int(x+0.5)]
pos = debugsample_locs[i, 0, inty, intx].cpu().numpy().copy()
depos = de_normalize(pos, self.H, self.W)
# circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
circ = Circle((depos[0], depos[1]), 1 , color='b', alpha=0.5)
self.axs[1, 0].add_patch(circ)
# circ = Circle((xx, yy),2,color='r')
self.axs[1, 0].add_patch(circ)
plt.show()
class Cursor_for_corrspondence(object):
def __init__(self, sample_ax, draw_ax, depth, corr_pos_pred, sample_locs, H, W):
self.sample_ax = sample_ax
self.draw_ax = draw_ax
self.lx = sample_ax.axhline(color='k') # the horiz line
self.ly = sample_ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = sample_ax.text(0, 0, '', va="bottom", ha="left")
self.depth = depth
self.corr_pos_pred = corr_pos_pred
self.sample_locs = sample_locs
self.H = H
self.W = W
def mouse_down(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.lx.set_ydata(y)
self.ly.set_xdata(x)
pr_cost_volume = self.depth[:, int(y), int(x)]
cost_volume_xs = np.arange(0, pr_cost_volume.shape[0])
xx, yy = self.corr_pos_pred[int(y)][int(x)]
self.txt.set_text('x=%1.1f, y=%1.1f depth=%.5f\nCorr xx=%d, yy=%d' % (x, y, np.max(pr_cost_volume), xx, yy))
self.sample_ax.figure.canvas.draw()
for i in self.draw_ax:
i.clear()
i.figure.canvas.draw()
axs[1, 0].clear()
axs[1, 0].imshow(img2)
for i in range(64):
pos = sample_locs[i][int(y)][int(x)]
depos = de_normalize(pos, H, W)
circ = Circle((int(depos[0]), int(depos[1])),1,color='b', alpha=0.5)
axs[1, 0].add_patch(circ)
circ = Circle((xx, yy),2,color='r')
axs[1, 0].add_patch(circ)
plt.show()
def toimg(x):
return x.squeeze().numpy().transpose([1,2,0])
def de_transform(img):
img[..., 0, :, :] = img[..., 0, :, :] * 0.229 + 0.485
img[..., 1, :, :] = img[..., 1, :, :] * 0.224 + 0.456
img[..., 2, :, :] = img[..., 2, :, :] * 0.225 + 0.406
return img
def draw_auc(predictions, pck, auc_path):
max_threshold = 20
thresholds = np.linspace(0, max_threshold, num=20)
pck = np.sum(pck, axis=0)
auc_value = auc(thresholds, pck) / max_threshold
print('AUC: ', auc_value)
plt.plot(thresholds, pck, 'r')
plt.axis([0, 20, 0, 1])
plt.savefig(auc_path)
plt.show()
def get_point_cloud(img1, img2, KRT1, KRT2, RT1, RT2, corr_pos, score):
"""
KRT:
corr_pos: feat_h x feat_w x 2
score: sample_size x feat_h x feat_w
"""
y = np.arange(0, img1.shape[0]) # 128
x = np.arange(0, img1.shape[1]) # 84
grid_x, grid_y = np.meshgrid(x, y)
grid_y = pix2coord(grid_y, cfg.BACKBONE.DOWNSAMPLE)
grid_y = grid_y * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid_x = pix2coord(grid_x, cfg.BACKBONE.DOWNSAMPLE)
grid_x = grid_x * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
# 2668 * 4076
grid_corr = pix2coord(corr_pos, cfg.BACKBONE.DOWNSAMPLE)
grid_corr = grid_corr * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
grid = np.stack((grid_x, grid_y))
grid = grid.reshape(2, -1)
grid_corr = grid_corr.reshape(-1, 2).transpose()
from scipy.misc import imresize
sample_size, fh, fw = score.shape
resized_img2 = imresize(img2, (fh, fw))
max_score = np.max(score.reshape(sample_size, -1), axis=0).reshape(fh, fw)
select_pos1 = max_score > 0.02
print('->', np.sum(select_pos1))
select_pos2 = np.sum(resized_img2, axis=2) > 20
print('->',np.sum(select_pos2))
select_pos3 = np.sum(corr_pos, axis=2) > -50
print('->',np.sum(select_pos2))
select_pos = np.logical_and(select_pos3, select_pos2).reshape(-1)
# select_pos = select_pos3
print('-->',np.sum(select_pos))
select_pos = select_pos.reshape(-1)
select_img_point = resized_img2.reshape(fh*fw, 3)[select_pos, :]
print(select_pos.shape)
print('total pos', sum(select_pos))
p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr[:,select_pos], grid[:,select_pos])
# p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr, grid)
# depth = np.ones((fh, fw)) * np.min((KRT1@p3D)[2, :])
depth = np.ones((fh, fw)) * np.max((KRT1@p3D)[2, :])
cnt = 0
for i in range(fh):
for j in range(fw):
if not select_pos[i*fw+j]:
continue
p_homo = (KRT1 @ p3D[:, cnt])
p = p_homo / p_homo[2]
depth[int(coord2pix(p[1], 32)), int(coord2pix(p[0], 32))] = p_homo[2]
cnt += 1
p3D /= p3D[3]
p3D = p3D[:3].squeeze()
depth = (depth - depth.min()) / (depth.max() - depth.min()) + 1
depth = np.log(depth)
depth = (depth - depth.min()) / (depth.max() - depth.min())
#######vis
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_1.imshow(img1)
ax1_2 = fig.add_subplot(332)
ax1_2.imshow(img2)
w = corr_pos[:, :, 0]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(334)
ax1_1.imshow(w)
w = corr_pos[:, :, 1]
w = (w - w.min()) / (w.max() - w.min())
ax1_1 = fig.add_subplot(335)
ax1_1.imshow(w)
# w1 = corr_pos[:, :, 0]
# w1 = (w1 - w1.min()) / (w1.max() - w1.min())
# w2 = corr_pos[:, :, 1]
# w2 = (w2 - w2.min()) / (w2.max() - w2.min())
# W = np.stack([w1, w2, np.ones(w2.shape)], axis=0)
# ax2_1 = fig.add_subplot(336)
# ax2_1.imshow(W.transpose(1,2,0))
ax1_1 = fig.add_subplot(336)
ax1_1.imshow(depth)
w = select_pos1.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(337)
ax2_1.imshow(w)
w = select_pos2.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(338)
ax2_1.imshow(w)
w = select_pos.reshape(fh,fw)
# w = (w - w.min()) / (w.max() - w.min())
ax2_1 = fig.add_subplot(339)
ax2_1.imshow(w)
####### end vis
# w = select_img_point[:, :10000].reshape(-1, 100, 100).transpose(1,2,0)
# w = (w - w.min()) / (w.max() - w.min())
# ax2_1 = fig.add_subplot(326)
# ax2_1.imshow(w)
plt.show()
return p3D, select_img_point
def visualization(cfg):
if cfg.VIS.POINTCLOUD and 'h36m' not in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
RT = inputs.get('RT')[0]
image_path = inputs.get('img-path')
print('image path:', image_path)
img = resize(plt.imread(image_path), (128, 84, 3))
other_KRT = inputs.get('other_KRT')[0]
other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img_path')[0]
print('other image path', other_image_path)
other_img = resize(plt.imread(other_image_path), (128, 84, 3))
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
import pdb; pdb.set_trace()
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_path,
'img2_path': other_image_path,
'RT' : RT,
'other_RT': other_RT,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open('baseline_' + "output_{:d}.pkl".format(cnt),"wb") as f:
pickle.dump(output, f)
print('saved! to ', 'baseline_' + "output_{:d}.pkl".format(cnt))
cnt += 1
# break
# ipv_prepare(ipv)
# ipv_draw_point_cloud(ipv, p3D, colors=img_pt, pt_size=1)
# ipv.xyzlim(500)
# ipv.show()
if cfg.VIS.POINTCLOUD and 'h36m' in cfg.OUTPUT_DIR:
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
baseline = "baseline" in cfg.VIS.SAVE_PRED_NAME
name = "_baseline" if baseline else ""
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
print(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions"+name+".pth"))
cnt = 0
# for inputs, pred in predictions:
while True:
inputs, pred = predictions[cnt]
print('input keys:')
print(inputs.keys())
print('pred keys:')
print(pred.keys())
heatmap = inputs.get('heatmap')
other_heatmap = inputs.get('other_heatmap')
points2d = inputs.get('points-2d')
KRT = inputs.get('KRT')[0]
camera = inputs.get('camera')
other_camera = inputs.get('other_camera')
image_path = inputs.get('img-path')[0]
print(image_path)
# image_path = 'images.zip@'
image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
image_path)
# from utils import zipreader
# data_numpy = zipreader.imread(
# image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# img = data_numpy[:1000]
# assert img.shape == (1000, 1000, 3), img.shape
img = inputs.get('img')
other_KRT = inputs.get('other_KRT')[0]
# other_RT = inputs.get('other_RT')[0]
other_image_path = inputs.get('other_img-path')[0]
print('other image path', other_image_path)
other_image_file = osp.join("datasets", 'h36m', 'images.zip@', 'images',
other_image_path)
other_img = inputs.get('other_img')
heatmap_pred = pred.get('heatmap_pred')
score_pred = pred.get('score_pred')
corr_pos_pred = pred.get('corr_pos')
sim = pred.get('depth')
batch_locs = pred.get('batch_locs')
# p3D, img_pt = get_point_cloud(img, other_img, KRT, other_KRT, RT, other_RT, corr_pos_pred, sim)
output = {
# 'p3D': p3D,
# 'img_pt': img_pt,
'img1': img,
'img2' : other_img,
'img1_path': image_file,
'img2_path': other_image_file,
# 'RT' : RT,
# 'other_RT': other_RT,
'heatmap': heatmap,
'other_heatmap': other_heatmap,
'points-2d': points2d,
'corr_pos_pred': corr_pos_pred,
'depth': sim,
'heatmap_pred': heatmap_pred,
'batch_locs': batch_locs,
'camera': camera,
'other_camera': other_camera,
}
if 'sample_locs' in pred:
sample_locs = pred.get('sample_locs')
output['sample_locs'] = sample_locs
else:
print('No sample_locs!!!!!')
import pickle
with open(cfg.OUTPUT_DIR + "/visualizations/h36m/output{}_{:d}.pkl".format(name, cnt),"wb") as f:
pickle.dump(output,f)
print('saved!')
cnt += 1
# depth = output['depth']
# corr_pos_pred = output['corr_pos_pred']
# sample_locs = output['sample_locs']
if cfg.EPIPOLAR.VIS:
if 'h36m' in cfg.OUTPUT_DIR:
from data.build import make_data_loader
if cfg.VIS.MULTIVIEWH36M:
data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
elif cfg.VIS.H36M:
from data.datasets.joints_dataset import JointsDataset
from data.datasets.multiview_h36m import MultiViewH36M
data_loader = MultiViewH36M('datasets', 'validation', True)
print(len(data_loader))
for i in tqdm(range(len(data_loader))):
data_loader.__getitem__(i)
data_loader = make_data_loader(cfg, is_train=False)[0]
# data_loader = make_data_loader(cfg, is_train=True, force_shuffle=True)
# data_loader = make_data_loader(cfg, is_train=False, force_shuffle=True)[0]
# for idx, batchdata in enumerate(tqdm(data_loader)):
if not cfg.VIS.MULTIVIEWH36M and not cfg.VIS.H36M:
cpu = lambda x: x.cpu().numpy() if isinstance(x, torch.Tensor) else x
from modeling.layers.epipolar import Epipolar
imgmodel = Epipolar()
debugmodel = Epipolar(debug=True)
KRT0 = batchdata['KRT'].squeeze()[None, 0]
KRT1 = batchdata['other_KRT'].squeeze()[None, 0]
# batchdata['img']: 1 x 4 x 3 x 256 x 256
input_img = batchdata['img'].squeeze()[None, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4]
input_other_img = batchdata['other_img'].squeeze()[None, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4]
outs = debugmodel(input_img, input_other_img, KRT0, KRT1)
H, W = input_img.shape[-2:]
print(H, W)
orig_img = de_transform(cpu(batchdata['img'].squeeze()[None, ...])[0][0])
orig_other_img = de_transform(cpu(batchdata['other_img'].squeeze()[None, ...])[0][0])
# outs = imgmodel(batchdata['heatmap'][:, 0], batchdata['heatmap'][:, 1], batchdata['KRT'][:, 0], batchdata['other_KRT'][:, 1])
out, sample_locs = imgmodel.imgforward_withdepth(input_img, input_other_img, KRT0, KRT1, outs[2][0])
if not cfg.VIS.CURSOR:
# show_img = de_transform(cpu(batchdata['img'][:, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4])[0][0])
# show_other_img = de_transform(cpu(batchdata['other_img'][:, 0, :, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::4])[0][0])
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax1.imshow(orig_img[::-1].transpose((1,2,0)))
ax2.imshow(orig_other_img[::-1].transpose((1,2,0)))
ax3.imshow(cpu(batchdata['heatmap'])[0][0].sum(0))
ax4.imshow(cpu(batchdata['other_heatmap'])[0][0].sum(0))
# ax5.imshow(cpu(outs[0])[0].sum(0))
print(out.shape)
out_img = de_transform(cpu(out)[0, ::-1].transpose((1,2,0)))
ax5.imshow(out_img)
plt.show()
else:
print(sample_locs.shape) # 64 x 1 x H x W x 2
sample_locs = sample_locs[:, 0, :, :, :]
# import pdb; pdb.set_trace()
fig, axs = plt.subplots(2, 2)
cus = Cursor_for_epipolar_line(axs[0,0], [axs[0,1], axs[1,0], axs[1,1]], sample_locs, H, W, axs, \
cpu(input_other_img)[0, :, :, :][::-1].transpose((1,2,0)), outs)
axs[0, 0].imshow(cpu(input_img)[0, :, :, :][::-1].transpose((1,2,0)))
# prob_im = axs[1, 1].imshow(max_score)
fig.canvas.mpl_connect('button_press_event', cus.mouse_down)
plt.show()
return
output_dir = cfg.OUTPUT_DIR
dataset_names = cfg.DATASETS.TEST
predictions = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "predictions.pth"))
pck = torch.load(os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "pck.pth"))
if cfg.VIS.AUC:
auc_path = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_names[0], "auc.png")
draw_auc(predictions, pck, auc_path)
total = 0
for inputs, pred in predictions:
heatmap = inputs.get('heatmap')
points2d = inputs.get('points-2d')
hand_side = inputs.get('hand-side')
img = inputs.get('img')
can_3dpoints = inputs.get('can-points-3d')
normed_3d = inputs.get('normed-points-3d')
target_global = inputs.get('points-3d')
rot_mat = inputs.get('rotation')
R_global = inputs.get('R')
keypoint_scale = inputs.get('scale')
visibility = inputs.get('visibility')
unit = inputs.get('unit')
image_path = inputs.get('img-path')
can_pred = pred.get('can_pred')
normed_pred = pred.get('normed_pred')
heatmap_pred = pred.get('heatmap_pred')
im = plt.imread(image_path)
image = np.array(im, dtype=np.int)
if cfg.DATASETS.TASK == 'keypoint':
fig = plt.figure(1)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
#ax1.imshow(image)
print(heatmap.min(), heatmap.max())
print(heatmap_pred.min(), heatmap_pred.max())
ax2.imshow(heatmap.sum(0).T)
ax3.imshow(heatmap_pred.sum(0).T)
else:
total += 1
visibility = visibility.squeeze()[..., None]
can_3dpoints = can_3dpoints * visibility
can_pred = can_pred * visibility
normed_3d = normed_3d * visibility
normed_pred = normed_pred * visibility
delta = normed_pred - normed_3d
print(delta)
print('L1 err = ', np.abs(delta).sum())
print('L2 err = ', ((delta**2).sum(-1)**0.5).mean())
fig = plt.figure(1)
ax1_1 = fig.add_subplot(331)
ax1_2 = fig.add_subplot(332)
#ax1_3 = fig.add_subplot(333)
#ax2 = fig.add_subplot(222)
ax2_1 = fig.add_subplot(334, projection='3d')
ax2_2 = fig.add_subplot(335, projection='3d')
ax2_3 = fig.add_subplot(336, projection='3d')
ax3_1 = fig.add_subplot(337, projection='3d')
ax3_2 = fig.add_subplot(338, projection='3d')
ax3_3 = fig.add_subplot(333, projection='3d')
ax1_1.imshow(image)
ax1_2.imshow(image)
#ax1_3.imshow(image)
#ax2.imshow(image)
plot_hand_3d(can_3dpoints, visibility, ax2_1)
ax2_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_pred, visibility, ax2_2)
ax2_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(can_3dpoints, visibility, ax2_3)
plot_hand_3d(can_pred, visibility, ax2_3)
ax2_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plot_hand_3d(normed_3d, visibility, ax3_1)
ax3_1.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_pred, visibility, ax3_2)
ax3_2.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
plot_hand_3d(normed_3d, visibility, ax3_3)
plot_hand_3d(normed_pred, visibility, ax3_3)
ax3_3.view_init(azim=-90.0, elev=-90.0) # aligns the 3d coord with the camera view
# ax3.set_xlim([-3, 3])
# ax3.set_ylim([-3, 3])
# ax3.set_zlim([-3, 3])
plt.show()
print("show")
| 2.125 | 2 |
lib/python/treadmill_aws/cli/admin/cell/zk.py | Morgan-Stanley/treadmill-aws | 6 | 12792889 | <filename>lib/python/treadmill_aws/cli/admin/cell/zk.py<gh_stars>1-10
"""Admin module to manage cell ZooKeeper servers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from treadmill import admin
from treadmill import context
from treadmill import cli
from treadmill import exc
import treadmill_aws
from treadmill_aws import awscontext
from treadmill_aws import ec2client
from treadmill_aws import hostmanager
_LOGGER = logging.getLogger(__name__)
def init():
"""Admin Cell CLI module"""
@click.group(name='zk')
@click.option('--aws-region', required=False,
envvar='AWS_REGION',
callback=treadmill_aws.cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.option('--aws-profile', required=False,
envvar='AWS_PROFILE',
callback=treadmill_aws.cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.option('--ipa-certs', required=False,
default='/etc/ipa/ca.crt',
callback=treadmill_aws.cli.handle_context_opt,
is_eager=True,
expose_value=False)
@click.option('--ipa-domain', required=False,
envvar='IPA_DOMAIN',
callback=treadmill_aws.cli.handle_context_opt,
is_eager=True,
expose_value=False)
def zk_grp():
"""Manage cell ZooKeeper servers."""
@click.option('--cell', required=True, envvar='TREADMILL_CELL')
@click.option('--hostname', help='Hostname to create')
@click.option('--instance-profile', help='EC2 instance profile')
@click.option('--instance-type', help='EC2 instance type')
@click.option('--subnet', help='Subnet')
@click.option('--image', help='Image')
@click.option('--disk', help='Disk size (G)')
@zk_grp.command(name='create')
def create_cmd(cell, hostname, instance_profile, instance_type, subnet,
image, disk):
"""Create cell ZooKeeper server(s)."""
ec2_conn = awscontext.GLOBAL.ec2
ipa_client = awscontext.GLOBAL.ipaclient
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
masters = admin_cell.get(cell, dirty=True)['masters']
if hostname:
masters = [
master for master in masters if master['hostname'] == hostname
]
if not masters:
cli.bad_exit('%s not found in the cell config', hostname)
for master in masters:
try:
ec2_instance = ec2client.get_instance(
ec2_conn, hostnames=[master['hostname']]
)
cli.out('%s EC2 instance already exists', master['hostname'])
_LOGGER.debug(ec2_instance)
except exc.NotFoundError:
hostmanager.create_zk(
ec2_conn=ec2_conn,
ipa_client=ipa_client,
master=master,
subnet_id=subnet,
instance_type=instance_type,
instance_profile=instance_profile,
image_id=image,
disk=disk
)
cli.out('Created: %s', master['hostname'])
@click.option('--cell', required=True, envvar='TREADMILL_CELL')
@click.option('--hostname', help='Hostname to rotate', required=True)
@click.option('--instance-profile', help='EC2 instance profile')
@click.option('--instance-type', help='EC2 instance type')
@click.option('--subnet', help='Subnet')
@click.option('--image', help='Image')
@click.option('--disk', help='Disk size (G)')
@zk_grp.command(name='rotate')
def rotate_cmd(cell, hostname, instance_profile, instance_type, subnet,
image, disk):
"""Rotate cell ZooKeeper server."""
ec2_conn = awscontext.GLOBAL.ec2
ipa_client = awscontext.GLOBAL.ipaclient
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
masters = admin_cell.get(cell, dirty=True)['masters']
try:
master = next(
master for master in masters if master['hostname'] == hostname
)
except StopIteration:
cli.bad_exit('%s not found in the cell config', hostname)
try:
ec2_instance = ec2client.get_instance(
ec2_conn, hostnames=[hostname]
)
_LOGGER.debug(ec2_instance)
except exc.NotFoundError:
cli.bad_exit('%s EC2 instance does not exist', hostname)
hostmanager.delete_hosts(ec2_conn, ipa_client, [hostname])
cli.out('Deleted: %s', hostname)
# Copy subnet, type and image from the old instance unless we override.
hostmanager.create_zk(
ec2_conn=ec2_conn,
ipa_client=ipa_client,
master=master,
subnet_id=subnet or ec2_instance['SubnetId'],
instance_type=instance_type or ec2_instance['InstanceType'],
instance_profile=instance_profile,
image_id=image or ec2_instance['ImageId'],
disk=disk
)
cli.out('Created: %s', hostname)
del create_cmd
del rotate_cmd
return zk_grp
| 1.773438 | 2 |
vendors/pipelines.py | nl-hugo/grapy | 2 | 12792890 | <reponame>nl-hugo/grapy
# -*- coding: utf-8 -*-
import logging
from vendors.exporters import RestApiExporter
logger = logging.getLogger(__name__)
class WineVendorsPipeline(object):
def __init__(self, api_url, api_key, forbidden_names, accepted_volumes):
# set api properties
self.api_url = api_url
self.api_key = api_key
# set item validation properties
self.forbidden_names = forbidden_names
self.accepted_volumes = accepted_volumes
@classmethod
def from_crawler(cls, crawler):
# get api settings from settings.py
api_url = crawler.settings.get("DYNAMODB_ENDPOINT")
api_key = crawler.settings.get("DYNAMODB_API_KEY")
# get item validation settings from settings.py
forbidden_names = crawler.settings.getlist("FORBIDDEN_NAMES")
accepted_volumes = crawler.settings.getlist("ACCEPTED_VOLUMES")
return cls(api_url, api_key, forbidden_names, accepted_volumes)
def open_spider(self, spider):
logger.info("Spider opened, open exporter")
self.exporter = RestApiExporter(self.api_url, self.api_key)
self.exporter.start_exporting()
def close_spider(self, spider):
logger.info("Spider closed, close exporter")
self.exporter.finish_exporting()
def process_item(self, item, spider):
logger.info(f"Processing item {item}")
item.validate(self.forbidden_names, self.accepted_volumes)
self.exporter.export_item(item)
return item
| 1.984375 | 2 |
PythonCode/src/MinDivLP.py | KoslickiLab/DiversityOptimization | 0 | 12792891 | <reponame>KoslickiLab/DiversityOptimization<filename>PythonCode/src/MinDivLP.py
import numpy as np
from .sparse_nnls import sparse_nnls
from scipy.sparse import vstack
def MinDivLP(A_k_small, A_k_large, y_small, y_large, const, q, thresh=0.01):
""" MinDivLP
A basic, regularized version of the MinDivLP algorithm.
Call via:
x_star = MinDivLP(A_k_small, A_k_large, y_small, y_large, lambda, q)
Parameters are:
A_k_small is the[m_small, N] - sized sensing matrix
A_k_large is the[m_large, N] - sized sensing matrix
y_small is the data vector of size[m_small, 1]
y_large is the data vector of size[m_large, 1]
lambda is the regularization paramater (larger values indicated better
fit to constraints, at the cost potentially higher execution time and
may lead to over - fitting if set too large. Typical value is 10000 or
1000
q is the parameter used in the MinDivLP algorithm. Must have 0 < q < 1,
typically, q is set to something like q = 0.1
Returns:
x_star: an [N, 1] vector
"""
B = A_k_large > 0
epsilon = 0.0001
denom = np.power(B.T @ y_large, 1 - q) + epsilon
f = 1/denom
x_star = sparse_nnls(vstack((f.T, const * A_k_small)), np.append(0, const * y_small))
x_star = x_star / sum(x_star)
x_star[np.where(x_star < thresh)] = 0 # Set threshold
return x_star
| 3.140625 | 3 |
pyblast/blast.py | tjomasc/pyblast | 1 | 12792892 | import subprocess
import base64
import json
import re
import hashlib
import tempfile
import os
from lxml import etree
import pprint
from math_tools import percentile
def get_blast_databases(exe_loc, db_loc):
"""
Look for BLAST databases using in given path and return a list
Args:
exe_loc: Location (directory) of the BLAST executables.
db_loc: Directory containing the BLAST DB.
Returns:
A dict containing lists of databases available.
# Test it!
>>> get_blast_databases('/Users/work/Projects/pyBlast/bin/', '/Users/work/Projects/pyBlast/db/')
{'protein': [{'location': '/Users/work/Projects/pyBlast/db/yeast.aa', 'title': 'yeast.aa'}], 'nucleotide': [{'location': '/Users/work/Projects/pyBlast/db/yeast.nt', 'title': 'yeast.nt'}]}
"""
found = subprocess.check_output([exe_loc+'blastdbcmd', '-list', db_loc, '-list_outfmt', "'%f %p %t'"])
try:
found = subprocess.check_output([exe_loc+'blastdbcmd', '-list', db_loc, '-list_outfmt', "'%f %p %t'"])
except:
found = ''
found = [entry.split(' ',2) for entry in re.split(r'\n', re.sub(r'\'', '', found)) if len(entry) > 1]
databases = {}
for f in found:
if f[1].lower() not in databases:
databases[f[1].lower()] = []
databases[f[1].lower()].append({'location': f[0], 'title': f[2]})
return databases
def get_blast_database_from_title(exe_loc, db_loc, title):
"""
For a give title get the actual name of the database (it may differ from title)
Args:
exe_loc: Location (directory) of the BLAST executables.
db_loc: Directory containing the BLAST DB.
title: The title of the BLAST database to search for.
Returns:
The location of the BLAST database.
"""
database_list = get_blast_databases(exe_loc, db_loc)
flat = []
for k,l in database_list.iteritems():
flat.extend(l)
for d in flat:
if title == d['title']:
return d['location']
return False
def get_sequence_from_database(exe_loc, db, seq_id):
"""
Extract a sequence from the given BLAST database and return it
Args:
exe_loc: Directory containing BLAST executables.
db: The database to get sequence from.
seq_id: The sequence ID of the sequence to get.
Returns:
The sequence if found else an empty string
# Test:
>>> get_sequence_from_database('/Users/work/Projects/pyBlast/bin/', '/Users/work/Projects/pyBlast/db/yeast.nt', 'gi|6226515|ref|NC_001224.1|')
"""
try:
found = subprocess.check_output([exe_loc+'blastdbcmd', '-db', db, '-entry', seq_id])
except:
found = ''
return found
def parse_extra_options(option_string, exclude=[]):
"""
Create an list of options filtering out excluded options
Args:
option_string: A string containing extra blast options.
exclude: Options to exclude from the generated list.
Returns:
A list of options except those in exclude
"""
options = re.findall(r'((-\w+) ([\w\d\.]+)?)\s?', option_string)
extras = []
for o in options:
if o[1] not in exclude:
extras.extend(o[1:])
return extras
def run_blast(database, program, filestore, file_uuid, sequence, options):
"""
Perform a BLAST search on the given database using the given query
Args:
database: The database to search (full path).
program: The program to use (e.g. BLASTN, TBLASTN, BLASTX).
filestore: The directory to store the XML output.
file_uuid: A unique identifier for the filename.
sequence: The sequence to BLAST.
options: Any extra options to pass to the BLAST executable.
Returns:
A tuple containing the stdout and stderr of the program.
# Test:
>>> seq = ">test\\nTTCATAATTAATTTTTTATATATATATTATATTATAATATTAATTTATATTATAAAAATAATATTTATTATTAAAATATT\\nTATTCTCCTTTCGGGGTTCCGGCTCCCGTGGCCGGGCCCCGGAATTATTAATTAATAATAAATTATTATTAATAATTATT\\n>test 2\\nAATGGTATTAGATTCAGTGAATTTGGTACAAGACGTCGTAGATCTCTGAAGGCTCAAGATCTAATTATGCAAGGAATCATGAAAGCTGTGAACGGTAACCCAGACAGAAACAAATCGCTATTATTAGGCACATCAAATATTTTATTTGCCAAGAAATATGGAGTCAAGCCAATCGGTACTGTGGCTCACGAGTGGGTTATGGGAGTCGCTTCTATTAGTGAAGATTATTTGCATGCCAATAAAAATGCAATGGATTGTTGGATCAATACTTTTGGTGCAAAAAATGCTGGTTTAGCATTAACGGATACTTTTGGAACTGATGACTTTTTAAAATCATTCCGTCCACCATATTCTGATGCTTACGTCGGTGTTAGACAAGATTCTGGAGACCCAGTTGAGTATACCAAAAAGATTTCCCACCATTACCATGACGTGTTGAAATTGCCTAAATTCTCGAAGATTATCTGTTATTCCGATTCTTTGAACGTCGAAAAGGCAATAACTTACTCCCATGCAGCTAAAGAGAATG"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
>>> seq = ">test\\nTTC"
>>> blast('/Users/work/Projects/pyBlast/db/yeast.nt', '/Users/work/Projects/pyBlast/bin/blastn', '/Users/work/Projects/pyBlast/store/', seq, {u'-evalue': 10.0, u'-strand': u'both'})
"""
query = [program, '-db', database, '-outfmt', '5', '-query', '-', '-out', "{0}{1}.xml".format(filestore, file_uuid), '-max_target_seqs', '50']
exclude = [
'-db',
'-query',
'-out',
'-subject',
'-html',
'-gilist',
'-negative_gilist',
'-entrez_query',
'-remote',
'-outfmt',
'-num_threads',
'-import_search_strategy',
'-export_search_strategy',
'-window_masker_db',
'-index_name',
'-use_index',
]
extra = parse_extra_options(options, exclude)
query.extend(extra)
p = subprocess.Popen(query, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
stdout, stderr = p.communicate(sequence)
return (stdout, stderr)
def poll(name):
"""
Check if the file <name> has been created, indicating BLAST has finished, and return results
Args:
name: The filename of the file that was created in a BLAST search.
Returns:
The file or False if it has not yet been created.
"""
try:
with open(name) as results:
if os.path.getsize(name) > 0:
return results.read()
raise IOError
except IOError:
return False
def chunk_string(s, l=10):
"""
Split a string into chunks of a set length.
Args:
s: The string to chunk.
l: The length of the chunks.
Returns:
A list containing the string chunks.
"""
return [s[i:i+l] for i in range(0,len(s),l)]
def format_bases(bases):
"""
Generate HTML that colours the bases in a string.
Args:
bases: A string containing a genetic sequence.
Returns:
An HTML string.
"""
formatted = ''
for b in bases:
formatted += '<span class="base-{}">{}</span>'.format(b,b)
return formatted
def create_formatted_sequences(hsp):
"""
Take a sequence and format it for display.
Args:
hsp: A dict containing the sequence information.
Returns:
An HTML string of the formatted sequence.
"""
cl = 60
query = chunk_string(hsp['query_seq'], cl)
match = chunk_string(hsp['midline'], cl)
subject = chunk_string(hsp['hit_seq'], cl)
output = ""
for ln, line in enumerate(query):
query_from = int(hsp['query_from']) if ln == 0 else int(hsp['query_from'])+(ln*cl)
query_to = query_from+(cl-1)
subject_from = int(hsp['hit_from']) if ln == 0 else int(hsp['hit_from'])+(ln*cl)
subject_to = subject_from+(cl-1)
qseq = format_bases(line)
sseq = format_bases(subject[ln])
output += '''
<div class="row">
<pre class="col-xs-1 seq-col-sm">Query
Subject
</pre>
<pre class="col-xs-1 seq-col-sm">{qsnum}
{ssnum}
</pre>
<pre class="col-xs-7 seq-col-lg">{qseq}
{match}
{sseq}
</pre>
<pre class="col-xs-1 seq-col-sm">{qenum}
{senum}
</pre>
</div>
'''.format(qseq=qseq,
match=match[ln],
sseq=sseq,
qsnum=str(query_from),
qenum=query_to,
ssnum=str(subject_from),
senum=subject_to
)
return output.rstrip()
def process_blast_result(filecontents, cutoff=0.0001):
"""
Take a BLAST XML results file and process into a usable dict.
Args:
filecontents: The contents of a BLAST XML file.
cutoff: The cutoff for which a sequence is considered relevant.
Returns:
A dict of the results.
"""
results = {'results':[], 'messages':[]}
messages = []
b = etree.fromstring(filecontents)
# Get BLAST details
db_loc = b.xpath('string(BlastOutput_db/text())').split('/')
results['details'] = {
'program': b.xpath('string(BlastOutput_program/text())'),
'version': b.xpath('string(BlastOutput_version/text())'),
'reference': b.xpath('string(BlastOutput_reference/text())'),
'db': db_loc[-1],
'query_id': b.xpath('string(BlastOutput_query-ID/text())'),
'query_def': b.xpath('string(BlastOutput_query-def/text())'),
'query_length': b.xpath('string(BlastOutput_query-len/text())'),
'params': {},
}
for t in b.findall('BlastOutput_param/Parameters/*'):
name = t.tag.split('_', 1)
results['details']['params'][name[-1]] = t.text
for it in b.findall('BlastOutput_iterations/Iteration'):
# The file may contain a message, stor that for later use
if it.find('.//Iteration_message') is not None:
results['messages'].append(it.find('.//Iteration_message').text)
else:
r = {
'details': {
'id': it.xpath('string(Iteration_query-ID/text())'),
'def': it.xpath('string(Iteration_query-def/text())'),
'length': it.xpath('string(Iteration_query-len/text())'),
},
'statistics': {
'db_num': b.xpath('string(Iteration_stat/Statistics/Statistics_db-num/text())'),
'db_length': b.xpath('string(Iteration_stat/Statistics/Statistics_db-len/text())'),
'hsp_length': b.xpath('string(Iteration_stat/Statistics/Statistics_hsp-len/text())'),
'eff_space': b.xpath('string(Iteration_stat/Statistics/Statistics_eff-space/text())'),
'kappa': b.xpath('string(Iteration_stat/Statistics/Statistics_kappa/text())'),
'lambda': b.xpath('string(Iteration_stat/Statistics/Statistics_lambda/text())'),
'entropy': b.xpath('string(Iteration_stat/Statistics/Statistics_entropy/text())'),
},
'hits': []
}
for ht in it.findall('Iteration_hits/Hit'):
h = {
'num': ht.xpath('string(Hit_num/text())'),
'id': ht.xpath('string(Hit_id/text())'),
'def': ht.xpath('string(Hit_def/text())'),
'accession': ht.xpath('string(Hit_accession/text())'),
'length': ht.xpath('string(Hit_len/text())'),
'hsps': [],
}
query_from = []
query_to = []
for hs in ht.findall('.//Hsp'):
hsp = {
'num': hs.xpath('string(Hsp_num/text())'),
'bit_score': hs.xpath('string(Hsp_bit-score/text())'),
'score': hs.xpath('string(Hsp_score/text())'),
'evalue': hs.xpath('string(Hsp_evalue/text())'),
'query_from': hs.xpath('string(Hsp_query-from/text())'),
'query_to': hs.xpath('string(Hsp_query-to/text())'),
'hit_from': hs.xpath('string(Hsp_hit-from/text())'),
'hit_to': hs.xpath('string(Hsp_hit-to/text())'),
'query_frame': hs.xpath('string(Hsp_query-frame/text())'),
'hit_frame': hs.xpath('string(Hsp_hit-frame/text())'),
'identity': hs.xpath('string(Hsp_identity/text())'),
'positive': hs.xpath('string(Hsp_positive/text())'),
'gaps': hs.xpath('string(Hsp_gaps/text())'),
'align_length': hs.xpath('string(Hsp_align-len/text())'),
'query_seq': hs.xpath('string(Hsp_qseq/text())'),
'hit_seq': hs.xpath('string(Hsp_hseq/text())'),
'midline': hs.xpath('string(Hsp_midline/text())'),
}
hsp['identity_percent'] = int(hsp['identity'])/float(hsp['align_length'])*100
hsp['gaps_percent'] = int(hsp['gaps'])/float(hsp['align_length'])*100
if float(hsp['evalue']) < cutoff: #float(hsp['bit_score']) > bit_score_filter:
query_from.append(int(hsp['query_from']))
query_to.append(int(hsp['query_to']))
hsp['formatted'] = create_formatted_sequences(hsp)
hsp['query_chunk'] = chunk_string(hsp['query_seq'], 60)
hsp['match_chunk'] = chunk_string(hsp['midline'], 60)
hsp['subject_chunk'] = chunk_string(hsp['hit_seq'], 60)
h['hsps'].append(hsp)
if len(h['hsps']) > 0:
if sum(query_from) > sum(query_to):
h['query_from'] = max(query_from)
h['query_to'] = min(query_to)
else:
h['query_from'] = min(query_from)
h['query_to'] = max(query_to)
r['hits'].append(h)
results['results'].append(r)
return results
| 2.765625 | 3 |
Scrapers/setup.py | TLTFinancialConsulting/Stock-Analysis | 0 | 12792893 | from distutils.core import setup
import py2exe
setup(console=['Single Stock Scraper.py'])
| 1.109375 | 1 |
src/model/UrlMap.py | joyghosh/tiny | 1 | 12792894 | '''
Created on 02-Jul-2016
@author: <NAME>
@version: 1.0
@since: 1.0
'''
from flask_sqlalchemy import SQLAlchemy
from restful.tiny_routes import app
db = SQLAlchemy(app)
class UrlMap(db.Model):
'''
A model responsible for storing shortened to long url mapping.
'''
id = db.Column('id', db.Integer, primary_key = True)
uuid = db.Column('uuid', db.Integer, unique = True)
short_url = db.Column('short_url', db.String(255), unique = True)
url = db.Column('url', db.String(255), unique = True)
def __init__(self, uuid, short_url, url):
'''
Constructor
'''
self.uuid = uuid
self.short_url = short_url
self.url = url
| 2.96875 | 3 |
method2/utils.py | Kenneth111/BlindWatermark | 2 | 12792895 | <gh_stars>1-10
import numpy as np
from scipy.fftpack import dct, idct
def dct2(a):
return dct( dct( a, axis=0, norm='ortho' ), axis=1, norm='ortho' )
def idct2(a):
return idct( idct( a, axis=0 , norm='ortho'), axis=1 , norm='ortho')
def binarizeImg(img):
threshold = 200
table = []
for i in range( 256 ):
if i < threshold:
table.append(0)
else:
table.append(1)
tmp_img = img.point(table)
return np.array(tmp_img) | 2.546875 | 3 |
Python/Programming Fundamentals/Lists Basics/15. Search.py | teodoramilcheva/softuni-software-engineering | 0 | 12792896 | n = int(input())
word = input()
list_of_strings = [input() for _ in range(n)]
filtered_list = []
for i in range(n):
if word in list_of_strings[i]:
filtered_list.append(list_of_strings[i])
print(list_of_strings)
print(filtered_list)
| 3.9375 | 4 |
examples/poll_card.py | smaeda-ks/twitter-python-ads-sdk | 162 | 12792897 | from twitter_ads.campaign import Tweet
from twitter_ads.client import Client
from twitter_ads.creative import MediaLibrary, PollCard
from twitter_ads.enum import MEDIA_TYPE
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
ACCOUNT_ID = ''
# initialize the client
client = Client(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# load the advertiser account instance
account = client.accounts(ACCOUNT_ID)
# most recent Media Library video
ml = MediaLibrary(account).all(account, media_type=MEDIA_TYPE.VIDEO)
media_key = ml.first.media_key
# create Poll Card with video
pc = PollCard(account)
pc.duration_in_minutes = 10080 # one week
pc.first_choice = 'Northern'
pc.second_choice = 'Southern'
pc.name = ml.first.name + ' poll card from SDK'
pc.media_key = media_key
pc.save()
# create Tweet
Tweet.create(account, text='Which hemisphere do you prefer?', card_uri=pc.card_uri)
# https://twitter.com/apimctestface/status/973002610033610753
| 2.53125 | 3 |
HAFTA-2/DERS-5/1.py | aydan08/Python-Kursu-15.02.21 | 1 | 12792898 | <reponame>aydan08/Python-Kursu-15.02.21
a=input("Sayı Girin:")
a=int(a)
b=input("İkinci Sayı Girin:")
b=int(b)
c=a+b
print(c)
| 3.65625 | 4 |
tagannotator/base/migrations/0004_auto_20200113_0232.py | kixlab/suggestbot-instagram-context-annotator | 0 | 12792899 | # Generated by Django 2.2.7 on 2020-01-13 02:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0003_auto_20200113_0225'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='user',
),
migrations.AddField(
model_name='photo',
name='title',
field=models.CharField(blank=True, max_length=255),
),
]
| 1.46875 | 1 |
kibitzr/cli.py | paulmassen/kibitzr | 478 | 12792900 | import sys
import logging
import click
import entrypoints
LOG_LEVEL_CODES = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
}
def merge_extensions(click_group):
"""
Each extension is called with click group for
ultimate agility while preserving cli context.
"""
for extension in load_extensions():
extension(click_group)
return click_group
def load_extensions():
"""Return list of Kibitzr CLI extensions"""
return [
point.load()
for point in entrypoints.get_group_all("kibitzr.cli")
]
@click.group()
@click.option("-l", "--log-level", default="info",
type=click.Choice(LOG_LEVEL_CODES.keys()),
help="Logging level")
@click.pass_context
def cli(ctx, log_level):
"""Run kibitzr COMMAND --help for detailed descriptions"""
ctx.obj = {'log_level': LOG_LEVEL_CODES[log_level.lower()]}
@cli.command()
def version():
"""Print version"""
from kibitzr import __version__ as kibitzr_version
print(kibitzr_version)
@cli.command()
def firefox():
"""Launch Firefox with persistent profile"""
from kibitzr.app import Application
Application().run_firefox()
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def once(ctx, name):
"""Run kibitzr checks once and exit"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name))
@cli.command()
@click.argument('name', nargs=-1)
@click.pass_context
def run(ctx, name):
"""Run kibitzr in the foreground mode"""
from kibitzr.app import Application
app = Application()
sys.exit(app.run(once=False, log_level=ctx.obj['log_level'], names=name))
@cli.command()
def init():
"""Create boilerplate configuration files"""
from kibitzr.app import Application
Application.bootstrap()
@cli.command()
def telegram_chat():
"""Return chat id for the last message sent to Telegram Bot"""
# rename import to escape name clashing:
from kibitzr.app import Application
app = Application()
app.telegram_chat()
@cli.command()
def clean():
"""Clean change history"""
from kibitzr.storage import PageHistory
PageHistory.clean()
@cli.command()
def stash():
"""Print stash contents"""
from kibitzr.stash import Stash
Stash.print_content()
extended_cli = merge_extensions(cli)
if __name__ == "__main__":
extended_cli()
| 2.140625 | 2 |
tests/test_board.py | meyer1994/connect5 | 0 | 12792901 | <reponame>meyer1994/connect5
from unittest import TestCase
from ai.board import Board
class TestBoard(TestCase):
def setUp(self):
self.board = Board(5, 5)
self.board.board = ''.join([
'----X',
'XXXX-',
'-----',
'XXXXO',
'X-X-X'
])
# X - X - X
# X X X X O
# - - - - -
# X X X X -
# - - - - X
def test_constructor(self):
self.assertEqual(self.board.width, 5)
self.assertEqual(self.board.height, 5)
def test_row(self):
res = self.board.row(0)
print(type(self.board.board))
row = '----X'
self.assertEqual(res, row)
res = self.board.row(3)
row = 'XXXXO'
self.assertEqual(res, row)
def test_rows(self):
for i, row in enumerate(self.board.rows):
exp = self.board.row(i)
self.assertEqual(exp, row)
def test_cols(self):
for i, col in enumerate(self.board.cols):
exp = self.board.col(i)
self.assertEqual(exp, col)
def test_col(self):
res = self.board.col(0)
col = '-X-XX'
self.assertEqual(res, col)
res = self.board.col(3)
col = '-X-X-'
self.assertEqual(res, col)
def test_rdiag(self):
expected = [
'X',
'X-',
'-XX',
'X-X-',
'-X-XX',
'-X-O',
'-X-',
'--',
'X'
]
for diag, exp in enumerate(expected):
res = self.board.rdiag(diag)
print(res)
self.assertEqual(res, exp)
def test_ldiag(self):
expected = [
'-',
'X-',
'-X-',
'X-X-',
'XX-XX',
'-X--',
'XX-',
'-O',
'X'
]
for i, exp in enumerate(expected):
res = self.board.ldiag(i)
self.assertEqual(res, exp)
def test_diags(self):
expected = [
'X',
'X-',
'-XX',
'X-X-',
'-X-XX',
'-X-O',
'-X-',
'--',
'X',
'-',
'X-',
'-X-',
'X-X-',
'XX-XX',
'-X--',
'XX-',
'-O',
'X'
]
results = list(self.board.diags)
self.assertListEqual(results, expected)
def test_get(self):
coords = [ (0, 0), (0, 3), (0, 1), (3, 3) ]
expected = [ '-', 'X', 'X', 'X' ]
results = [ self.board.get(x, y) for x, y in coords ]
self.assertListEqual(expected, results)
def test_set(self):
self.board.set(1, 1, 'O')
res = self.board.get(1, 1)
self.assertEqual(res, 'O')
def test_str(self):
string = ('X - X - X\n'
'X X X X O\n'
'- - - - -\n'
'X X X X -\n'
'- - - - X')
res = str(self.board)
self.assertEqual(string, res)
res = repr(self.board)
self.assertEqual(string, res)
def test_len(self):
res = len(self.board)
self.assertEqual(res, 25)
def test_eq(self):
b1 = Board(20, 20)
b2 = Board(20, 20)
self.assertEqual(b1, b2)
b1.set(0, 0, 'x')
self.assertNotEqual(b1, b2)
| 3.5625 | 4 |
stubs/esp32_1_10_0/btree.py | jmannau/micropython-stubber | 0 | 12792902 | "Module 'btree' on firmware 'v1.10-247-g0fb15fc3f on 2019-03-29'"
DESC = 2
INCL = 1
def open():
pass
| 0.984375 | 1 |
src/elm_fluent/html_compiler.py | elm-fluent/elm-fluent | 17 | 12792903 | <reponame>elm-fluent/elm-fluent
"""
HTML specific compilation functions
"""
import re
import bs4
from fluent.syntax import ast
from elm_fluent import codegen
from elm_fluent.stubs import defaults as dtypes, html, html_attributes
html_output_type = dtypes.List.specialize(a=html.Html)
def compile_pattern(pattern, local_scope, compiler_env):
skeleton, expr_replacements = replace_non_text_expressions(pattern.elements)
# TODO - handle parse failures gracefully, and check the parser is ensuring
# well-formedness
dom = bs4.BeautifulSoup("<root>{0}</root>".format(skeleton), "lxml").find("root")
return dom_nodes_to_elm(
list(dom.children), expr_replacements, local_scope, compiler_env
)
def dom_nodes_to_elm(nodes, expr_replacements, local_scope, compiler_env):
# We have to structure this as a list of lists, then do a List.concat
# at the end. In many cases the List.concat will disappear after
# simplify.
from elm_fluent import compiler
items = []
for node in nodes:
if isinstance(node, bs4.element.NavigableString):
parts = interpolate_replacements(str(node), expr_replacements)
for part in parts:
if isinstance(part, str):
items.append(
HtmlList(
[
local_scope.variables["Html.text"].apply(
codegen.String(str(part))
)
]
)
)
else:
val = compiler.compile_expr(part, local_scope, compiler_env)
if val.type == html_output_type:
# This is a list type, so simply append to our list of lists
items.append(val)
else:
val = local_scope.variables["Html.text"].apply(
compiler.render_to_string(val, local_scope, compiler_env)
)
items.append(HtmlList([val]))
else:
assert isinstance(node, bs4.element.Tag)
tag_name = node.name.lower()
static_attributes = []
for attr_name, attr_value in sorted(node.attrs.items()):
if isinstance(attr_value, list):
# Bs4 treats class attribute differently, returns a list, which we convert
# back to a string here:
attr_value = " ".join(attr_value)
attr_value_parts = interpolate_replacements(
attr_value, expr_replacements
)
attr_output_parts = []
for part in attr_value_parts:
if isinstance(part, str):
attr_output_parts.append(codegen.String(str(part)))
else:
with compiler_env.modified(html_context=False):
attr_output_parts.append(
compiler.render_to_string(
compiler.compile_expr(
part, local_scope, compiler_env
),
local_scope,
compiler_env,
)
)
attr_final_value = codegen.StringConcat(attr_output_parts)
if attr_name in html_attributes.ATTRIBUTES:
attr_constructor = local_scope.variables[
"Attributes.{0}".format(attr_name)
]
else:
attr_constructor = local_scope.variables[
"Attributes.attribute"
].apply(codegen.String(attr_name))
static_attributes.append(attr_constructor.apply(attr_final_value))
if compiler_env.dynamic_html_attributes:
selectors_for_node = codegen.List(
list(
map(
codegen.String,
get_selectors_for_node(node, expr_replacements),
)
)
)
dynamic_attributes = local_scope.variables[
"Fluent.selectAttributes"
].apply(
local_scope.variables[compiler.ATTRS_ARG_NAME], selectors_for_node
)
else:
dynamic_attributes = codegen.List([])
attributes = codegen.ListConcat(
[codegen.List(static_attributes), dynamic_attributes],
dtypes.List.specialize(a=html.Attribute),
)
sub_items = dom_nodes_to_elm(
list(node.children), expr_replacements, local_scope, compiler_env
)
if tag_name in html.ELEMENTS:
node_constructor = local_scope.variables["Html.{0}".format(tag_name)]
else:
node_constructor = local_scope.variables["Html.node"].apply(
codegen.String(tag_name)
)
item = node_constructor.apply(attributes, sub_items)
items.append(HtmlList([item]))
return HtmlListConcat(items)
class HtmlList(codegen.List):
def simplify(self, changes):
retval = super(HtmlList, self).simplify(changes)
if retval is not self:
return retval
def is_html_text_call(item):
return (
isinstance(item, codegen.FunctionCall)
and isinstance(item.expr, codegen.VariableReference)
and (
"{0}.{1}".format(item.expr.module_name, item.expr.name)
== "Html.text"
)
)
new_items = []
for item in self.items:
if (
len(new_items) > 0
and is_html_text_call(new_items[-1])
and is_html_text_call(item)
):
last_item = new_items[-1]
if not isinstance(last_item.args[0], codegen.StringConcat):
last_item.args = [codegen.StringConcat([last_item.args[0]])]
last_item.args[0].parts.append(item.args[0])
changes.append(True)
else:
new_items.append(item)
self.items = new_items
return self
class HtmlListConcat(codegen.ListConcat):
literal = HtmlList
def __init__(self, parts):
super(HtmlListConcat, self).__init__(parts, html_output_type)
def replace_non_text_expressions(elements):
"""
Given a list of ast.Expression objects, returns a string
with replacement markers and a dictionary of replacement info
"""
parts = []
expr_replacements = {}
for element in elements:
if isinstance(element, ast.TextElement):
parts.append(element.value)
else:
# Need a replacement that doesn't have any special HTML chars in it
# that would cause the HTML parser to do anything funny with it.
# TODO - some mechanism that would guarantee this generated string
# does not appear by chance in the actual message.
replacement_name = "SSS{0}EEE".format(str(id(element)))
expr_replacements[replacement_name] = element
parts.append(replacement_name)
return "".join(parts), expr_replacements
def interpolate_replacements(text, expr_replacements):
"""
Given a text with replacement markers, and a dictionary
of replacement markers to expression objects, returns
a list containing text/expression objects.
"""
if not expr_replacements:
return [text]
replacement_strings = list(expr_replacements.keys())
splitter = re.compile(
"({0})".format("|".join(re.escape(r) for r in replacement_strings))
)
split_text = [p for p in splitter.split(text) if p]
return [expr_replacements.get(t, t) for t in split_text]
def get_selectors_for_node(node, expr_replacements):
tag_name = node.name.lower()
yield tag_name
def is_static_only(attr_value):
parts = interpolate_replacements(attr_value, expr_replacements)
return all(isinstance(p, str) for p in parts)
classes = node.attrs.get("class", [])
if is_static_only(" ".join(classes)):
for class_ in classes:
class_selector = ".{0}".format(class_)
yield class_selector
yield tag_name + class_selector
id = node.attrs.get("id", None)
if id is not None and is_static_only(id):
id_selector = "#{0}".format(id)
yield id_selector
yield tag_name + id_selector
for attr_name, attr_value in sorted(node.attrs.items()):
if attr_name in ["id", "class"]:
continue
attr_present_selector = "[{0}]".format(attr_name)
yield attr_present_selector
yield tag_name + attr_present_selector
if is_static_only(attr_value):
attr_value_selector = '[{0}="{1}"]'.format(attr_name, attr_value)
yield attr_value_selector
yield tag_name + attr_value_selector
| 2.71875 | 3 |
api/tests/opentrons/file_runner/test_create_file_runner.py | mrakitin/opentrons | 0 | 12792904 | """Tests for the create_protocol_runner factory."""
import pytest
from pathlib import Path
from opentrons.hardware_control import API as HardwareAPI
from opentrons.protocol_engine import ProtocolEngine, create_protocol_engine
from opentrons.file_runner import (
ProtocolFileType,
ProtocolFile,
JsonFileRunner,
PythonFileRunner,
create_file_runner,
)
@pytest.fixture
async def protocol_engine(hardware: HardwareAPI) -> ProtocolEngine:
"""Get an actual ProtocolEngine for smoke-test purposes."""
return await create_protocol_engine(hardware=hardware)
async def test_create_json_runner(
protocol_engine: ProtocolEngine,
json_protocol_file: Path,
) -> None:
"""It should be able to create a JSON file runner."""
protocol_file = ProtocolFile(
file_type=ProtocolFileType.JSON,
file_path=json_protocol_file,
)
result = create_file_runner(
protocol_file=protocol_file,
engine=protocol_engine,
)
assert isinstance(result, JsonFileRunner)
async def test_create_python_runner(
protocol_engine: ProtocolEngine,
python_protocol_file: Path,
) -> None:
"""It should be able to create a Python file runner."""
protocol_file = ProtocolFile(
file_type=ProtocolFileType.PYTHON,
file_path=python_protocol_file,
)
result = create_file_runner(
protocol_file=protocol_file,
engine=protocol_engine,
)
assert isinstance(result, PythonFileRunner)
| 2.46875 | 2 |
KGQA/LSTM/test_api.py | johnson7788/EmbedKGQA | 0 | 12792905 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2022/4/18 11:45 上午
# @File : test_api.py
# @Author:
# @Desc : 测试
import unittest
import requests
import time, os
import json
import base64
import random
import string
import pickle
import sys
class LSTMKQGATestCase(unittest.TestCase):
host_server = f'http://l8:9966'
def test_lstmkgqa_file(self):
"""
测试文件接口
:return:
:rtype:
"""
url = f"{self.host_server}/api/predict_file"
params = {'data_apth': "./../data/QA_data/MetaQA/qa_test_1hop.txt"}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(params), timeout=360)
result = r.json()
print(result)
assert r.status_code == 200
assert result is not None, "返回结果为None"
#检查结果,里面肯定是字典格式
print("对文件接口测试完成")
def test_lstmkgqa(self):
"""
测试数据的正确答案
what does [Grégoire Colin] appear in Before the Rain
[Joe Thomas] appears in which movies The Inbetweeners Movie|The Inbetweeners 2
what films did [Michelle Trachtenberg] star in Inspector Gadget|Black Christmas|Ice Princess|Harriet the Spy|The Scribbler
what does [Helen Mack] star in The Son of Kong|Kiss and Make-Up|Divorce
测试接口
:return:
['问题是:what does Grégoire Colin appear in, 答案是: Before the Rain', '问题是:NE appears in which movies, 答案是: The Inbetweeners Movie', '问题是:what films did Michelle Trachtenberg star in, 答案是: Harriet the Spy', '问题是:what does <NAME> star in, 答案是: The Son of Kong', '问题是:what films did Shahid Kapoor act in, 答案是: Haider']
:rtype:
"""
url = f"{self.host_server}/api/predict"
data = [
['<NAME>', 'what does NE appear in'],
['<NAME>', 'NE appears in which movies'],
['<NAME>', 'what films did NE star in'],
['<NAME>', 'what does NE star in'],
['<NAME>', 'what films did NE act in'],
]
params = {'data':data}
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=json.dumps(params), timeout=360)
result = r.json()
print(result)
assert r.status_code == 200
assert result is not None, "返回结果为None"
#检查结果,里面肯定是字典格式
print("对文件接口测试完成")
| 2.8125 | 3 |
Python-Automate-Email/main.py | abhijeetpandit7/Flight-Deals | 0 | 12792906 | <gh_stars>0
#This file will need to use the DataManager,FlightSearch, FlightData, NotificationManager classes to achieve the program requirements.
from notification_manager import NotificationManager
from flight_search import FlightSearch
from flight_data import FlightData
from data_manager import DataManager
notification_manager = NotificationManager()
flight_search = FlightSearch()
flight_data = FlightData()
data_manger = DataManager()
user_list = data_manger.get_users()
destination_list = data_manger.get_data()
for city in destination_list:
city_id = city['id']
lowest_price = city['lowestPrice']
fly_to = city['iataCode']
price_data = flight_search.get_data(fly_to)
# Check if no flights available
if not price_data:
continue
is_cheap_deal = flight_data.compare(price_data, lowest_price)
if is_cheap_deal:
flight_details = flight_data.get_data()
data_manger.update_data(city_id, flight_data.min_price)
notification_manager.send_alert(flight_details, user_list) | 3.125 | 3 |
test/import_it.py | cltl/FrameNetNLTK | 1 | 12792907 | <reponame>cltl/FrameNetNLTK<gh_stars>1-10
import sys
sys.path.insert(0, '../..')
from FrameNetNLTK import load
my_fn = load(folder='test_lexicon',
verbose=2) | 1.539063 | 2 |
examples/pybullet/gym/pybullet_envs/minitaur/agents/trajectory_generator/tg_inplace.py | felipeek/bullet3 | 9,136 | 12792908 | """Trajectory Generator for in-place stepping motion for quadruped robot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
TWO_PI = 2 * math.pi
def _get_actions_asymmetric_sine(phase, tg_params):
"""Returns the leg extension given current phase of TG and parameters.
Args:
phase: a number in [0, 2pi) representing current leg phase
tg_params: a dictionary of tg parameters:
stance_lift_cutoff -- switches the TG between stance (phase < cutoff) and
lift (phase > cutoff) phase
amplitude_swing -- amplitude in swing phase
amplitude_lift -- amplitude in lift phase
center_extension -- center of leg extension
"""
stance_lift_cutoff = tg_params['stance_lift_cutoff']
a_prime = np.where(phase < stance_lift_cutoff, tg_params['amplitude_stance'],
tg_params['amplitude_lift'])
scaled_phase = np.where(
phase > stance_lift_cutoff, np.pi + (phase - stance_lift_cutoff) /
(TWO_PI - stance_lift_cutoff) * np.pi, phase / stance_lift_cutoff * np.pi)
return tg_params['center_extension'] + a_prime * np.sin(scaled_phase)
def step(current_phases, leg_frequencies, dt, tg_params):
"""Steps forward the in-place trajectory generator.
Args:
current_phases: phases of each leg.
leg_frequencies: the frequency to proceed the phase of each leg.
dt: amount of time (sec) between consecutive time steps.
tg_params: a set of parameters for trajectory generator, see the docstring
of "_get_actions_asymmetric_sine" for details.
Returns:
actions: leg swing/extensions as output by the trajectory generator.
new_state: new swing/extension.
"""
new_phases = np.fmod(current_phases + TWO_PI * leg_frequencies * dt, TWO_PI)
extensions = []
for leg_id in range(4):
extensions.append(
_get_actions_asymmetric_sine(new_phases[..., leg_id], tg_params))
return new_phases, extensions
def reset():
return np.array([0, np.pi * 0.5, np.pi, np.pi * 1.5])
| 3.0625 | 3 |
1_Agent_based_modeling.py | gy19sp/GEO5003-Practicals | 0 | 12792909 | <reponame>gy19sp/GEO5003-Practicals<gh_stars>0
import random # imports the random function
""" ransomised position of y coordinate of agent in a 99*99 grid, this function varies
from the random.random function, as the randint functions offers the possibility to create
a range of numbers whereas random.random offers a range from 0-1"""
y0= random.randint (0,99)
x0= random.randint (0,99)
#Make Random movement by 1 step in the y coordinate
if random.random() < 0.5:# generates a random number with a value of 0-0.99
y0 += 1 #increases a step in the y direction
else:
y0 -= 1 #deccreases a step in the y direction
#This is the same movement for the x coordinate
if random.random() < 0.5:
x0 += 1 #increases a step in the x direction
else:
x0 -= 1 #decreases a step in the x direction
""" same action for a second agent"""
y1= random.randint (0,99) #generates a number within the specified parameters in this case 0-99
x1= random.randint (0,99)
#same method as for previous agent
if random.random() < 0.5:
y1 += 1
else:
y1 -= 1
if random.random() < 0.5:
x1 += 1
else:
x1 -= 1
"""
#y0 = 0
#x0 = 0
#y1 = 4
#x1 = 3 values used to test pythagoras theorem that gives an answer of 5
the randomised sequence gives varying answers"""
answer = (((y0 - y1)**2) + ((x0 - x1)**2))**0.5
"""a suared+ b squared= c squared, **0.5 being the square root of c, which is
the distance between 1 and 2"""
print (answer)# prints the result from the triangulation
| 3.859375 | 4 |
082 - LISTA, dividindo valores entre LISTAS.py | Rprjunior/PraticandoPython | 0 | 12792910 | '''082 - LISTA, DIVIDINDO VALORES ENTRE LISTAS.
PROGRAMA QUE LEIA VÁRIOS VALORES E GUARDE NUMA LISTA.
DIVIDA OS VALORES IMPARES E PARES EM OUTRAS DUAAS LISTAS E MOSTRE AS 3 NO FINAL.'''
numeros = list()
pares = list()
impares = list()
while True:
numeros.append(int(input('Digite um valor: ')))
resposta = str(input('Quer continuar? [S/N]: '))
if resposta in 'Nn':
break
for indice, valor in enumerate(numeros):
if valor % 2 == 0:
pares.append(valor)
elif valor % 2 == 1:
impares.append(valor)
print(f'A LISTA completa é: {numeros}')
print(f'A LISTA de PARES é: {pares}')
print(f'A LISTA de ÍMPARES é: {impares}')
| 4 | 4 |
htic/data.py | jenskutilek/HumbleTypeInstructionCompiler | 2 | 12792911 | from __future__ import absolute_import
from .error import HumbleError
class Data(object):
"""Manage parsed data"""
def __init__(self):
self.gasp = []
"""[(int size, bool doGridfit, bool doGray, bool symSmoothing, bool symGridfit)]"""
self.maxp = {}
"""{string name : int value}"""
self.cvt = []
"""[int]"""
self.fpgm = None
"""Block"""
self.prep = None
"""Block"""
self.glyphs = {}
"""{string name : Block block}"""
self.__cvtLookup = {}
self.__functionLookup = {}
self.__functionRecipeLookup = {}
self.__voidFunctionList = []
self.__storageLookup = {}
self.__flagLookup = {}
def addGasp(self, size, doGridFit, doGray, symSmoothing, symGridfit):
self.gasp.append((size, doGridFit, doGray, symSmoothing, symGridfit))
def addMaxp(self, name, value):
self.maxp[name] = value
def setFpgm(self, block):
self.fpgm = block
def setPrep(self, block):
self.prep = block
def addGlyph(self, name, block):
self.glyphs[name] = block
def addCvt(self, name, value):
if name in self.__cvtLookup:
raise HumbleError("Duplicate CVT identifier: {}".format(name))
index = len(self.cvt)
self.cvt.append(value)
self.__cvtLookup[name] = index
def addFunction(self, index, name, recipe, isVoid):
if name in self.__functionLookup:
raise HumbleError("Duplicate function identifier: {}".format(name))
if index in self.__functionLookup.values():
raise HumbleError("Duplicate function index: {} {}".format(index, name))
self.__functionLookup[name] = index
self.__functionRecipeLookup[name] = recipe
if isVoid:
self.__voidFunctionList.append(name)
def addStorage(self, name):
if name not in self.__storageLookup:
index = len(self.__storageLookup)
self.__storageLookup[name] = index
def addFlag(self, name, value):
self.__flagLookup[name] = value
def getCvtIndex(self, name):
try:
return self.__cvtLookup[name]
except KeyError:
raise HumbleError("Undeclared CVT identifier: {}".format(name))
def getFunctionIndex(self, name):
try:
return self.__functionLookup[name]
except KeyError:
raise HumbleError("Undeclared function identifier: {}".format(name))
def getFunctionRecipe(self, name):
try:
return self.__functionRecipeLookup[name]
except KeyError:
return ()
def isVoidFunction(self, name):
return name in self.__voidFunctionList
def getStorageIndex(self, name):
try:
return self.__storageLookup[name]
except KeyError:
raise HumbleError("Undeclared storage identifier: {}".format(name))
def getFlagValue(self, name):
try:
return self.__flagLookup[name]
except KeyError:
raise HumbleError("Undeclared flag alias: {}".format(name))
| 2.4375 | 2 |
Task1B.py | AndrewKeYanzhe/part-ia-flood-warning-system | 0 | 12792912 | <filename>Task1B.py
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance
def run():
stations = build_station_list()
p = (52.2053, 0.1218)
distances = stations_by_distance(stations, p)
print("The closest stations are: ", distances[:10])
print("The furthest stations are: ", distances[-10:])
if __name__ == "__main__":
print("*** Task 1B: CUED Part IA Flood Warning System ***")
run()
| 3.15625 | 3 |
Models/FCNNs.py | alexchartrand/IoT | 0 | 12792913 | <gh_stars>0
# FCNNs
#<NAME>, <NAME>, <NAME> (2017b) Time series classification from scratch with deep neural networks:
#A strong baseline. In: International Joint Conference on Neural Networks, pp 1578–1585
import torch.nn as nn
import torch.optim as optim
from . import Utility
class FCNNs(nn.Module):
def __init__(self, in_feature, out_feature):
super(FCNNs, self).__init__()
#self.activation = nn.Softmax(dim=1) # This is include in the loss
self.conv1 = Utility.ConvBlock(in_feature, 128, 8)
self.conv2 = Utility.ConvBlock(128, 256, 5)
self.conv3 = Utility.ConvBlock(256, 128, 3)
self.gap = Utility.GAP()
self.lin = nn.Linear(128, out_feature)
def forward(self, x):
h=self.conv1(x)
h = self.conv2(h)
h = self.conv3(h)
h = self.gap(h)
h = self.lin(h)
return h
def getOptimizer(self):
return optim.Adam(self.parameters(), lr=0.001, betas=(0.9,0.999),eps=1e-7)
| 2.625 | 3 |
client/paddleflow/run/run_info.py | Mo-Xianyuan/PaddleFlow | 0 | 12792914 | """
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/env python3
# -*- coding:utf8 -*-
class RunInfo(object):
"""the class of RunInfo info"""
def __init__(self, runId, fsname, username, status, name, description, entry, parameters,
run_yaml, runtime, dockerEnv, updateTime, source, runMsg, createTime,
activateTime):
"""init """
self.runId = runId
self.fsname = fsname
self.username = username
self.status = status
self.name = name
self.description = description
self.entry = entry
self.parameters = parameters
self.run_yaml = run_yaml
self.runtime = runtime
self.dockerEnv = dockerEnv
self.updateTime = updateTime
self.source = source
self.runMsg = runMsg
self.createTime = createTime
self.activateTime = activateTime
class JobInfo(object):
""" the class of job info"""
def __init__(self, name, deps, parameters, command, env, status, start_time, end_time, image, jobid):
self.name = name
self.deps = deps
self.parameters = parameters
self.command = command
self.env = env
self.status = status
self.start_time = start_time
self.end_time = end_time
self.image = image
self.jobId = jobid
class RunCacheInfo(object):
""" the class of runcache info"""
def __init__(self, cacheid, firstfp, secondfp, runid, source, step, fsname, username, expiredtime, strategy, custom,
createtime, updatetime):
self.cacheid = cacheid
self.firstfp = firstfp
self.secondfp = secondfp
self.runid = runid
self.source = source
self.step = step
self.fsname = fsname
self.username = username
self.expiredtime = expiredtime
self.strategy = strategy
self.custom = custom
self.createtime = createtime
self.updatetime = updatetime
class ArtifaceInfo(object):
""" the class of artiface info"""
def __init__(self, runid, fsname, username, artifactpath, atype, step, artifactname, meta,
createtime, updatetime):
self.runid = runid
self.fsname = fsname
self.username = username
self.artifactpath = artifactpath
self.type = atype
self.step = step
self.artifactname = artifactname
self.meta = meta
self.createtime = createtime
self.updatetime = updatetime | 1.859375 | 2 |
src/RFDTypeDefinition.py | RFDaemoniac/ReadableFormattedData | 1 | 12792915 | <reponame>RFDaemoniac/ReadableFormattedData
import re
import pprint
import pdb
from RFDUtilityFunctions import LogValidationCheck, LogError, GetInteger, GetBoolean, GetFloat, GetString, ParseValue
class RootTypes():
Unspecified = 'Unspecified'
Bool = 'Bool'
Int = 'Int'
Float = 'Float'
String = 'String'
Array = 'Array'
Object = 'Object'
Any = 'Any'
AllowedDefinitionMembers = {
'type',
'min',
'max',
'regex',
'extends', # rmf todo: this is important, but requires partial validation
'default_value',
'length',
'one_of', # rmf todo: @incomplete will need more work to make this able to be nested
'members',
'name',
'required',
'delete_members' # from parent
}
RootTypeNameToPythonType = {
RootTypes.Bool : (bool),
RootTypes.Int : (int),
RootTypes.Float : (float),
RootTypes.String : (basestring),
RootTypes.Array : (list),
RootTypes.Object : (dict),
RootTypes.Any : ()
}
BasicTypes = {
RootTypes.Bool,
RootTypes.Int,
RootTypes.Float,
RootTypes.String
}
BasicTypeParsers = {
RootTypes.Bool : GetBoolean,
RootTypes.Int : GetInteger,
RootTypes.Float : GetFloat,
RootTypes.String : GetString
}
class DefinitionNode():
def __init__(self, definition):
self.definition = definition
class DataNode():
def __init__(self, data_type, value, definition):
self.data_type = data_type
self.value = value
self.definition = definition
def ValitateTypeMatch(context, data, type_name_or_definition):
validation_type = GetBasicType(type_name_or_definition)
if (validation_type == RootTypes.String):
type_name = type_name_or_definition
if (type_name in RootTypeNameToPythonType):
success = ValidateBuiltinTypeMatch(data, type_name)
LogValidationCheck(data, type_name, success)
return success
else if (type_name in context.loaded_definitions):
#rmf todo: @incomplete it's not just about whether it's a dict, but whether it is a definition, values should probably happen in a different loaded_ place
if (not isinstance(context.loaded_definitions[type_name], dict)):
LogError("Tried to validate against type that is data value")
LogValidationCheck(data, type_name, success)
return False
definition = context.loaded_definitions[type_name]
root_type = GetRootType(context, type_name)
else:
LogValidationCheck(data, type_name, success)
return False
else if (validation_type == RootTypes.Object):
definition = type_name_or_definition
root_type = GetRootType(data, definition)
#rmf todo: this doesn't support extending either array or string, I think.
if (root_type in BasicTypes):
success = ValidateDefinitionOfBasicType(context, data, definition)
else if (root_type == RootTypes.Array):
success = ValidateDefinitionOfArrayType(context, data, definition)
else:
# rmf todo: @incomplete validate objects
pass
LogValidationCheck(data, type_name, success)
return False
def Validate(context, data, type_name_or_definition):
ValitateTypeMatch(context, data, type_name_or_definition)
def ValidateBuiltinTypeMatch(data, type_name):
if (type_name == RootTypes.Any):
return True
else:
return isinstance(data, RootTypeNameToPythonType[type_name])
def GetBasicType(data):
for type_name in BasicTypes:
if (ValidateBuiltinTypeMatch(data, type_name)):
return type_name
return None
def IsBasicType(data):
return (GetBasicType(data) != None)
def ParseTypedBasicValue(string_buffer, type_name):
parsed_value = BasicTypeParsers[type_name](string_buffer)
if (parsed_value == None):
LogError("Expected value " + string_buffer + " to be of type " + type_name)
parsed_value = ParseValue(string_buffer)
return parsed_value
def GetRootType(context, type_name_or_defintion):
# rmf todo: @Incomplete this shouldn't use definition['type'], it should be based on explicit extension
validation_type = GetBasicType(type_name_or_definition)
if (validation_type == RootTypes.String):
type_name = type_name_or_definition
else if (validation_type == RootTypes.Object):
type_definition = type_name_or_definition
if ('type' in type_defintion):
return type_definition['type']
else if ('extends' in type_defition):
type_name = type_defintion['extends']
else:
return RootTypes.Unspecified
checked_types = set()
while (type_name != None):
if (type_name in BasicTypes):
return type_name
if (type_name in checked_types):
return None # prevent circular references
checked_types.add(type_name)
if (type_name not in context.loaded_definitions):
return None
type_defintion = context.loaded_definitions[type_name]
if ('type' in type_defintion):
return type_definition['type']
if ('extends' in type_defintion)
type_name = type_defintion['extends']
return RootTypes.Unspecified
def ValidateDefinitionOfBasicType(context, data, type_name_or_definition):
if isinstance(definition, dict):
definition = type_name_or_definition
else:
if type_name_or_definition in RootTypeNameToPythonType:
return ValidateBuiltinTypeMatch(data, type_name_or_definition)
if type_name_or_definition not in context.loaded_definitions:
LogError("Unknown type name " + str(type_name_or_definition))
return False
definition = context.loaded_definitions[type_name_or_definition]
if 'extends' in definition:
#rmf todo: @incomplete allow extending multiple types
if not definition['extends'] in context.loaded_definitions:
return False
if not ValidateTypeMatch(context, data, definition['extends']):
return False
if 'type' in definition:
if definition['type'] in RootTypeNameToPythonType:
if not ValidateBuiltinTypeMatch(data, definition['type']):
return False
else:
return False
if 'min' in definition:
if data < definition['min']:
return False
if 'max' in definition:
if data > definition['max']:
return False
if 'regex' in definition:
if not re.match(definition['regex']):
return False
if 'one_of' in definition:
found = False
for potential_node in definition['one_of']:
# @RMF TODO: @Incomplete if the one of prevents extra members that are defined outside of one_of then this validation might fail when it should actually be valid
if ValidateDefinitionOfBasicType(context, data, potential_node):
found = True
break
if not found:
return False
return True
def ValidateDefinitionOfArrayType(context, data, definition):
if 'length' in definition:
length_value = definition['length']
length_type = GetBasicType(length_value)
if (length_type == RootTypes.Int):
if (len(data) != length_value):
return False
else if (length_type == RootTypes.Object):
if not ValidateDefinitionOfBasicType(context, len(data), definition['length']):
return False
if 'elements' in definition:
elements_value = definition['elements']
elements_type = GetBasicType(elements_value)
if (elements_type == RootTypes.String):
for element in data:
if not Validate(context, element, elements_type):
return False
else if (elements_type == RootTypes.Object):
| 2.34375 | 2 |
lib/modes/mode_twitch.py | okonomichiyaki/parrot.py | 80 | 12792916 | <reponame>okonomichiyaki/parrot.py<gh_stars>10-100
from lib.detection_strategies import single_tap_detection, loud_detection, medium_detection, percentage_detection
import threading
import numpy as np
import pyautogui
from pyautogui import press, hotkey, click, scroll, typewrite, moveRel, moveTo, position
from time import sleep
from subprocess import call
from lib.system_toggles import toggle_eyetracker, turn_on_sound, mute_sound, toggle_speechrec
import os
import math
class TwitchMode:
def __init__(self, modeSwitcher):
self.mode = "regular"
self.modeSwitcher = modeSwitcher
def start( self ):
turn_on_sound()
moveTo( 500, 500 )
click()
moveTo(2000, 2000)
hotkey('ctrl', 'f')
def handle_input( self, dataDicts ):
if( percentage_detection(dataDicts, "whistle", 90 ) or percentage_detection(dataDicts, "bell", 90 ) ):
self.modeSwitcher.switchMode('browse')
def exit( self ):
self.mode = "regular"
mute_sound()
press('esc') | 2.375 | 2 |
main.py | scsole/rpi-gpio-video | 0 | 12792917 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
import RPi.GPIO as GPIO
import time
import subprocess
import os
PIR_PIN = 11 # GPIO11
GPIO.setmode(GPIO.BOARD) # Use header pin numbers
GPIO.setup(PIR_PIN, GPIO.IN)
running = False # Is a video currently playing?
player = "omxplayer" # The video player being used
video_path = "/home/pi/video.mp4" # Path to video file
child = 0
if player == "vlc":
opt = '--play-and-exit'
else:
opt = ''
try:
print("Waiting for motion")
while True:
if not GPIO.input(PIR_PIN):
if running == False:
print("Motion detected")
child = subprocess.Popen([player, video_path, opt])
running = True
print("Playing video")
if running == True:
child.poll()
if child.returncode == 0:
running = False
print("Video complete, waiting for motion")
time.sleep(1)
except KeyboardInterrupt:
print("Quit")
GPIO.cleanup()
| 3.0625 | 3 |
scripts/feature_def_gen/feature_def_gen.py | isb-cgc/ISB-CGC-Webapp | 13 | 12792918 | ###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from csv import DictWriter
from json import load as load_json
import logging
from io import StringIO
from time import sleep
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "isb_cgc.settings")
import django
django.setup()
import click
from bq_data_access.v2.feature_id_utils import FeatureDataTypeHelper
logging.basicConfig(level=logging.INFO)
def run_query(project_id, provider, config):
job_reference = provider.submit_query_and_get_job_ref(project_id)
poll_retry_limit = provider.BQ_JOB_POLL_MAX_RETRIES
poll_sleep_time = provider.BQ_JOB_POLL_SLEEP_TIME
all_done = False
total_retries = 0
poll_count = 0
# Poll for completion
while all_done is False and total_retries < poll_retry_limit:
poll_count += 1
total_retries += 1
is_finished = provider.is_bigquery_job_finished(project_id)
all_done = is_finished
sleep(poll_sleep_time)
logging.debug("Done: {done} retry: {retry}".format(done=str(all_done), retry=total_retries))
query_result = provider.download_and_unpack_query_result()
return query_result
def load_config_from_path(config_class, config_json_path):
config_dict = load_json(open(config_json_path, 'r'))
return config_class.from_dict(config_dict)
def get_csv_object(data_rows, schema, include_header=False):
fieldnames = [x['name'] for x in schema]
file_obj = StringIO()
writer = DictWriter(file_obj, fieldnames=fieldnames)
if include_header:
writer.writeheader()
writer.writerows(data_rows)
return file_obj
def save_csv(data_rows, schema, csv_path, include_header=False):
file_obj = get_csv_object(data_rows, schema, include_header=include_header)
with open(csv_path, 'w') as file_handle:
file_handle.write(file_obj.getvalue())
@click.command()
@click.argument('data_type', type=str)
@click.option('--config_json', type=str)
@click.option('-chr', "chromosome_array", type=str, multiple=True, help="Chromosome (required for methylation)")
def print_query(data_type, config_json, chromosome_array):
feature_type = FeatureDataTypeHelper.get_type(data_type)
logging.info("Feature type: {}".format(str(feature_type)))
config_class = FeatureDataTypeHelper.get_feature_def_config_from_data_type(feature_type)
provider_class = FeatureDataTypeHelper.get_feature_def_provider_from_data_type(feature_type)
if config_json is not None:
config_instance = load_config_from_path(config_class, config_json)
else:
config_dict = FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type(feature_type)
config_instance = config_class.from_dict(config_dict)
if not chromosome_array:
chromosome_array = [str(c) for c in range(1, 23)]
chromosome_array.extend(['X', 'Y'])
provider = provider_class(config_instance, chromosome_array=chromosome_array)
query = provider.build_query(config_instance)
print(query)
# project_id: project number of the BQ data project (typically isb-cgc's project number)
# data_type: 4-letter data type code, eg. GNAB
@click.command()
@click.argument('project_id', type=click.INT)
@click.argument('data_type', type=str)
@click.argument('csv_path', type=str)
@click.option('--config_json', type=str)
@click.option('-chr', "chromosome_array", type=str, multiple=True, help="Chromosome (required for methylation)")
def run(project_id, data_type, csv_path, config_json, chromosome_array):
feature_type = FeatureDataTypeHelper.get_type(data_type)
logging.info("Feature type: {}".format(str(feature_type)))
config_class = FeatureDataTypeHelper.get_feature_def_config_from_data_type(feature_type)
provider_class = FeatureDataTypeHelper.get_feature_def_provider_from_data_type(feature_type)
if config_json is not None:
config_instance = load_config_from_path(config_class, config_json)
else:
config_dict = FeatureDataTypeHelper.get_feature_def_default_config_dict_from_data_type(feature_type)
config_instance = config_class.from_dict(config_dict)
if not chromosome_array:
chromosome_array = [str(c) for c in range(1, 23)]
chromosome_array.extend(['X', 'Y'])
else:
chromosome_array = chromosome_array[0].split(",")
provider = provider_class(config_instance, chromosome_array=chromosome_array)
logging.info("Output CSV: {}".format(csv_path))
logging.info("Config: {}".format(str(config_instance)))
result = run_query(project_id, provider, config_instance)
save_csv(result, provider.get_mysql_schema(), csv_path, include_header=True)
@click.group()
def main():
pass
main.add_command(print_query)
main.add_command(run)
if __name__ == '__main__':
main()
| 1.9375 | 2 |
pysot/models/backbone/resnet.py | eldercrow/tracking-pytorch | 0 | 12792919 | <gh_stars>0
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18 as _resnet18
from torchvision.models import resnet34 as _resnet34
from torchvision.models import resnet50 as _resnet50
__all__ = ['ResnetCGD', 'resnet18', 'resnet34', 'resnet50']
class NormLayer(nn.Module):
def __init__(self, kernel_size, padding=(0, 0), eps=1e-06):
'''
'''
super().__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if kernel_size[0] > 1 or kernel_size[1] > 1:
self.pool = nn.AvgPool2d(kernel_size, 1, padding)
else:
self.pool = None
self.eps = eps
def forward(self, x):
u_x = torch.mean(x, dim=1, keepdim=True)
u_x2 = torch.mean(x*x, dim=1, keepdim=True)
if self.pool is not None:
u_x = self.pool(u_x)
u_x2 = self.pool(u_x2)
v_x = F.relu(u_x2 - (u_x * u_x), inplace=True)
out = x / torch.sqrt(v_x + self.eps)
return out
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
class ResnetCGD(nn.Module):
'''
'''
def __init__(self, backbone='resnet18', pretrained=False):
'''
'''
super().__init__()
net = self._get_backbone(backbone)(pretrained=pretrained)
self.base_layers = nn.ModuleList(list(net.children())[:-2])
self.base_names = [l[0] for l in net.named_children()]
self.color_layer = nn.Sequential(
nn.AvgPool2d(4, stride=4),
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
NormLayer(3, 1, 1),
nn.AvgPool2d(2, 2, padding=0),
)
self.grad_norm_layer = nn.Sequential(
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2),
NormLayer(3, 1, 1),
nn.AvgPool2d(2, 2, padding=0),
)
# self.downsample_layer = nn.Sequential(
# nn.ReLU(inplace=True),
# nn.MaxPool2d(2, stride=2),
# NormLayer(3, 1, 1),
# )
self.upsample_layer = nn.Sequential(
nn.ReLU(inplace=True),
DepthToSpace(2),
NormLayer(3, 1, 1),
)
self.mid_layer = nn.Sequential(
nn.ReLU(inplace=True),
NormLayer(3, 1, 1),
)
def _get_backbone(self, name):
if name == 'resnet18':
return _resnet18
elif name == 'resnet34':
return _resnet34
elif name == 'resnet50':
return _resnet50
else:
raise ValueError('Not supported backbone')
def forward(self, x):
'''
'''
# color layer
color_feat = self.color_layer(x)
# base layers
base_layers = {}
for n, layer in zip(self.base_names, self.base_layers):
x = layer(x)
base_layers[n] = x
# grad layer
grad_feat = self.grad_norm_layer(base_layers['layer1'])
# deep feature layer
# fd = self.downsample_layer(base_layers['layer2'])
fm = self.mid_layer(base_layers['layer3'])
fu = self.upsample_layer(base_layers['layer4'])
# deep_feat = torch.cat([fd, fm, fu], dim=1)
feat = torch.cat([color_feat, grad_feat, fm, fu], dim=1)
return feat
def resnet18(**kwargs):
return ResnetCGD(backbone='resnet18', **kwargs)
def resnet34(**kwargs):
return ResnetCGD(backbone='resnet34', **kwargs)
def resnet50(**kwargs):
return ResnetCGD(backbone='resnet50', **kwargs) | 2.28125 | 2 |
clients/abn_client.py | xahgmah/abnamro2ynab | 1 | 12792920 | import abna
import json
import settings
class ABNClient:
mutations = None
new_last_transaction = None
FILENAME = "last_transactions.json"
def __init__(self):
self.sess = abna.Session(settings.ABNA_ACCOUNT)
self.sess.login(settings.ABNA_PASSNUMBER, settings.ABNA_PASSWORD)
self.last_transactions = self.get_last_transaction_timestamp()
def get_mutations(self, iban):
mutations = self.sess.mutations(iban)
return self.get_only_new_mutations(iban, mutations)
def get_only_new_mutations(self, iban, mutations):
result = []
last_transaction_timestamp = int(self.last_transactions.get(iban, 0))
new_last_transaction = 0
for mutation in mutations['mutationsList']['mutations']:
transaction_timestamp = int(mutation['mutation']['transactionTimestamp'])
if transaction_timestamp > new_last_transaction:
new_last_transaction = transaction_timestamp
if transaction_timestamp > last_transaction_timestamp:
result.append(mutation['mutation'])
self.last_transactions[iban] = new_last_transaction
return result
def save_last_transaction_timestamp(self):
with open(self.FILENAME, 'w') as f:
json.dump(self.last_transactions, f)
def get_last_transaction_timestamp(self):
try:
with open(self.FILENAME, 'r') as f:
data = json.load(f)
return data
except FileNotFoundError:
return {}
| 2.34375 | 2 |
Leetcode/0105. Construct Binary Tree from Preorder and Inorder Traversal.py | luckyrabbit85/Python | 1 | 12792921 | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: list[int], inorder: list[int]) -> list[TreeNode]:
if not preorder or not inorder:
return None
root = TreeNode(preorder[0])
mid = inorder.index(preorder[0])
root.left = self.buildTree(preorder[1 : mid + 1], inorder[:mid])
root.right = self.buildTree(preorder[mid + 1 :], inorder[mid + 1 :])
return root
| 3.640625 | 4 |
oo/pessoa.py | DouglasPortela0403/pythonbirds | 0 | 12792922 | <gh_stars>0
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=None):
self.nome = nome
self.idade = idade
self.filhos =list(filhos)
def cumprimentar(self):
return 'Olá'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classe(cls):
return f'{cls}, {olhos}'
if __name__ == '__main__':
p = Pessoa(nome='Mariane')
print(Pessoa.cumprimentar(p))
print(p.cumprimentar())
print(p.nome)
print(p.idade)
p.nome = 'Douglas'
p.idade = 35
print(p.nome)
print(p.idade)
print(Pessoa.olhos)
print(p.__dict__)
print(Pessoa.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe())
| 3.6875 | 4 |
autos/googleapi/__init__.py | hans-t/autos | 1 | 12792923 | from .drive import Drive
from .sheets import Sheets
| 0.980469 | 1 |
bin/nrcSpreadsheetScraper.py | SkyTruth/scraper | 2 | 12792924 | #!/usr/bin/env python
# This document is part of scraper
# https://github.com/SkyTruth/scraper
# =================================================================================== #
#
# The MIT License (MIT)
#
# Copyright (c) 2014 SkyTruth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# =================================================================================== #
"""
Scraper for the "temporary" NRC incident spreadsheet
Sample command:
./bin/nrcSpreadsheetScraper.py --db-name test_skytruth --db-user `whoami` --db-host localhost
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import getpass
import os
from os.path import *
import sys
import urllib2
import psycopg2
import psycopg2.extras
import xlrd
#/* ======================================================================= */#
#/* Python setup
#/* ======================================================================= */#
if sys.version[0] is 2:
range = xrange
#/* ======================================================================= */#
#/* Build information
#/* ======================================================================= */#
__version__ = '0.1-dev'
__release__ = 'August 8, 2014'
__author__ = '<NAME>'
__source__ = 'https://github.com/SkyTruth/scraper'
__docname__ = basename(__file__)
__license__ = '''
The MIT License (MIT)
Copyright (c) 2014 SkyTruth
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#/* ======================================================================= */#
#/* Define print_usage() function
#/* ======================================================================= */#
def print_usage():
"""
Command line usage information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Usage:
{0} [--help-info] [options] [--no-download] [--download-url URL]
{1} [--db-connection-string] [--db-host hostname] [--db-user username]
{1} [--db-pass password] [--no-print-progress] [--print-queries]
{1} [--no-execute-queries] [--overwrite]
Options:
--db-connection-string Explicitly define a Postgres supported connection
string. All other --db-* options are ignored.
--db-host Hostname for the target database
[default: localhost]
--db-user Username used for database connection
[default: current user]
--db-name Name of target database
[default: skytruth]
--db-pass Password for database user
[default: '']
--download-url URL from which to download the input file
--no-download Don't download the input file
--overwrite-download If the --file-to-process already exists and --no-download
has not been specified, blindly overwrite the file.
Unless the user is specifying a specific target for
the download, this flag is not needed due the default
file name containing datetime down to the second.
--file-to-process Specify where the input file will be downloaded to
If used in conjunction with --no-download it is
assumed that the specified file already exists and
should be used for processing
[default: Current_<CURRENT_DATETIME>.xlsx]
--no-print-progress Don't print the progress indicator
--print-queries Print queries immediately before execution
Automatically turns off the progress indicator
--no-execute-queries Don't execute queries
""".format(__docname__, " " * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_license() function
#/* ======================================================================= */#
def print_license():
"""
Print out license information
:return: 1 for exit code purposes
:rtype: int
"""
print(__license__)
return 1
#/* ======================================================================= */#
#/* Define print_help() function
#/* ======================================================================= */#
def print_help():
"""
Detailed help information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help: {0}
------{1}
{2}
""".format(__docname__, '-' * len(__docname__), main.__doc__))
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help flags:
--help More detailed description of this utility
--usage Arguments, parameters, flags, options, etc.
--version Version and ownership information
--license License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print script version information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
""" % (__docname__, __version__, __release__))
return 1
#/* ======================================================================= */#
#/* Define dms2dd() function
#/* ======================================================================= */#
def dms2dd(degrees, minutes, seconds, quadrant):
"""
Convert degrees, minutes, seconds, quadrant to decimal degrees
:param degrees: coordinate degrees
:type degrees: int
:param minutes: coordinate minutes
:type minutes: int
:param seconds: coordinate seconds
:type seconds: int
:param quadrant: coordinate quadrant (N, E, S, W)
:type quadrant: str|unicode
:return: decimal degrees
:rtype: float
"""
illegal_vals = (None, '', u'')
for iv in illegal_vals:
if iv in (degrees, minutes, seconds, quadrant):
raise ValueError("ERROR: Illegal value: %s" % iv)
if quadrant.lower() not in ('n', 'e', 's', 'w'):
raise ValueError("ERROR: Invalid quadrant: %s" % quadrant)
output = int(degrees) + int(minutes) / 60 + int(seconds) / 3600
if quadrant.lower() in ('s', 'w'):
output *= -1
return output
#/* ======================================================================= */#
#/* Define column_names() function
#/* ======================================================================= */#
def column_names(sheet, formatter=str):
"""
Get the ordered column names from an XLRD sheet object
:param sheet: XLRD sheet object
:type sheet: xlrd.Sheet
:param formatter:
:type formatter: type|function
:return: list of column names
:rtype: list
"""
return [formatter(cell.value) for cell in sheet.row(0)]
#/* ======================================================================= */#
#/* Define sheet2dict() function
#/* ======================================================================= */#
def sheet2dict(sheet):
"""
Convert an XLRD sheet object into a list of rows, each structured as a dictionary
Example Input:
"Column1","Column2","Column3"
"Row 1 Val","Another Row 1 Val","Even More Row 1 Values"
"Row 2 Val","Another Row 2 Val","Even More Row 2 Values"
"Row 3 Val","Another Row 3 Val","Even More Row 3 Values"
Example Output:
[
{
'Column1': 'Row 1 Val',
'Column2': 'Another Row 1 Val',
'Column3': 'Even More Row 1 Values'
},
{
'Column1': 'Row 2 Val',
'Column2': 'Another Row 2 Val',
'Column3': 'Even more Row 2 Values'
}
{
'Column1': 'Row 3 Val',
'Column2': 'Another Row 3 Val',
'Column3': 'Even more Row 3 Values'
}
]
:param sheet: XLRD sheet object from xlrd.open_workbook('workbook').sheet_by_name('name')
:type sheet: xlrd.Sheet
:return: list of elements, each containing one row of the sheet as a dictionary
:rtype: dict
"""
output = []
columns = column_names(sheet)
for r in range(1, sheet.nrows): # Skip first row since it contains the header
output.append(dict((columns[c], sheet.cell_value(r, c)) for c in range(sheet.ncols)))
return output
#/* ======================================================================= */#
#/* Define report_exists() function
#/* ======================================================================= */#
def report_exists(**kwargs):
"""
Check to see if a report has already been submitted to a table
:param seqnos: reportnum
:type seqnos: int|float
:param field:
:type field:
:return:
:rtype: bool
"""
reportnum = kwargs['reportnum']
cursor = kwargs['db_cursor']
table = kwargs['table']
field = kwargs.get('field', 'reportnum')
schema = kwargs['schema']
# TODO: replace this hack with something better.
# Perhpas have a report_exists method on each of the field map classes so we don't have to
# have the same existance test for all tables
if table=='"BotTaskStatus"':
cursor.execute("""SELECT * FROM %s.%s WHERE bot='NrcExtractor' AND task_id = %s""" % (schema, table, reportnum))
else:
cursor.execute("""SELECT * FROM %s.%s WHERE %s = %s""" % (schema, table, field, reportnum))
return len(cursor.fetchall()) > 0
#/* ======================================================================= */#
#/* Define timestamp2datetime() function
#/* ======================================================================= */#
def timestamp2datetime(stamp, workbook_datemode, formatter='%Y-%m-%d %I:%M:%S'):
"""
Convert a float formatted date a Postgres supported timestamp
:param stamp: timestamp from XLRD reading a date encoded field
:type stamp: float
:param workbook_datemode: from xlrd.Workbook.datemode
:type workbook_datemode: int
:return: date capable of being inserted into Postgres timestamp field
:rtype: str|unicode
"""
dt = datetime(*xlrd.xldate_as_tuple(stamp, workbook_datemode))
return dt.strftime(formatter)
#/* ======================================================================= */#
#/* Define get_current_spreadsheet() function
#/* ======================================================================= */#
def download(url, destination, overwrite=False):
"""
Download a file
:param url: URL to download from
:type url: str|unicode
:param destination: target path and filename for downloaded file
:type destination: str|unicode
:param overwrite: specify whether or not an existing destination should be overwritten
:type overwrite: bool
:return: path to downloaded file
:rtype: str|unicode
"""
# Validate arguments
if not overwrite and isfile(destination):
raise ValueError("ERROR: Overwrite=%s and outfile exists: %s" % (overwrite, destination))
# Download
response = urllib2.urlopen(url)
with open(destination, 'w') as f:
f.write(response.read())
return destination
#/* ======================================================================= */#
#/* Define name_current_file() function
#/* ======================================================================= */#
def name_current_file(input_name):
"""
Generate the output Current.xlsx name for permanent archival
:param input_name: input file name (e.g. Current.xlsx)
:type input_name: str|unicode
:return: output formatted name
:rtype: str|unicode
"""
dt = datetime.now()
dt = dt.strftime("_%Y-%m-%d_%I:%M:%S")
input_split = input_name.split('.')
input_split[0] += dt
return '.'.join(input_split)
#/* ======================================================================= */#
#/* Define db_row_count() function
#/* ======================================================================= */#
def db_row_count(cursor, schema_table):
"""
:param cursor: Postgres formatted database connection string
:type cursor: psycopg2.cursor
:param schema_table: schema.table
:type schema_table: str|unicode
:return: number of rows in the specified schema.table
:rtype: int
"""
query = """SELECT COUNT(1) FROM %s;""" % schema_table
cursor.execute(query)
result = cursor.fetchall()
return int(result[0][0])
#/* ======================================================================= */#
#/* Define process_field_map() function
#/* ======================================================================= */#
def process_field_map(**kwargs):
db_cursor = kwargs['db_cursor']
uid = kwargs['uid']
workbook = kwargs['workbook']
row = kwargs['row']
db_null_value = kwargs['db_null_value']
map_def = kwargs['map_def']
sheet = kwargs['sheet']
all_field_maps = kwargs['all_field_maps']
sheet_seqnos_field = kwargs['sheet_seqnos_field']
db_write_mode = kwargs['db_write_mode']
print_queries = kwargs['print_queries']
execute_queries = kwargs['execute_queries']
raw_sheet_cache = kwargs['raw_sheet_cache']
db_seqnos_field = kwargs['db_seqnos_field']
if map_def['processing'] is None:
try:
value = row[map_def['column']]
except KeyError:
# UID doesn't appear in the specified sheet - populate a NULL value
value = db_null_value
# Pass all necessary information to the processing function in order to get a result
else:
value = map_def['processing']['function'](db_cursor=db_cursor, uid=uid,
workbook=workbook,
row=row, db_null_value=db_null_value,
map_def=map_def,
sheet=sheet,
all_field_maps=all_field_maps,
sheet_seqnos_field=sheet_seqnos_field,
db_write_mode=db_write_mode,
print_queries=print_queries,
execute_queries=execute_queries,
raw_sheet_cache=raw_sheet_cache,
db_seqnos_field=db_seqnos_field)
return value
#/* ======================================================================= */#
#/* Define NrcScrapedReportField() class
#/* ======================================================================= */#
class NrcScrapedReportFields(object):
"""
Some fields in the NRC spreadsheet do not map directly to a column in the
database. These fields require an additional processing step that is
highly specific and cannot be re-used. The field map definition contains
all of the additional arguments and information necessary to execute one
of these processing functions.
A class is used as a namespace to provide better organization and to
prevent having to name functions something like:
'get_NrcScrapedReport_material_name_field'
"""
#/* ----------------------------------------------------------------------- */#
#/* Define material_name() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def material_name(**kwargs):
# Parse arguments
map_def = kwargs['map_def']
print_queries = kwargs['print_queries']
execute_queries = kwargs['execute_queries']
extras_field_maps = map_def['processing']['args']['extras_field_maps']
db_write_mode = kwargs['db_write_mode']
uid = kwargs['uid']
sheet_seqnos_field = kwargs['sheet_seqnos_field']
db_cursor = kwargs['db_cursor']
raw_sheet_cache = kwargs['raw_sheet_cache']
db_seqnos_field = kwargs['db_seqnos_field']
db_null_value = kwargs['db_null_value']
sheet_cache = kwargs['sheet_cache']
# TODO: This currently only reads rows from the sheet specified in the field map and NOT the extra field maps
# specified in the processing args. Currently not a problem since
# Build query
initial_value_to_be_returned = None
for row in raw_sheet_cache[map_def['sheet_name']]:
extra_query_fields = []
extra_query_values = []
# Found a matching row
if row[sheet_seqnos_field] == uid:
# The first instance goes into the table specified in the field map
# This query must be handled by the parent process so this value is
# returned at the very end
if initial_value_to_be_returned is None:
initial_value_to_be_returned = row[map_def['column']]
# ALL occurrences are sent to a different table - specified in the field map arguments
for e_db_map in extras_field_maps:
for e_map_def in extras_field_maps[e_db_map]:
value = process_field_map(db_cursor=db_cursor, uid=uid, workbook=kwargs['workbook'],
row=row, db_null_value=db_null_value, map_def=e_map_def,
sheet=sheet_cache[e_map_def['sheet_name']],
all_field_maps=kwargs['all_field_maps'],
sheet_seqnos_field=sheet_seqnos_field, db_write_mode=db_write_mode,
print_queries=print_queries, execute_queries=execute_queries,
raw_sheet_cache=raw_sheet_cache,
db_seqnos_field=db_seqnos_field)
# Make sure the value is properly quoted
if value not in (None, '', u'', db_null_value):
if isinstance(value, str) or isinstance(value, unicode):
value = value.replace("'", '"') # Single quotes cause problems on insert
try:
if e_map_def['db_field_width']:
value = value[:e_map_def['db_field_width']]
except KeyError:
pass
extra_query_values.append("'%s'" % value) # String value
else:
extra_query_values.append("%s" % value) # int|float value
extra_query_fields.append(e_map_def['db_field'])
# Do something with the query
query = """%s %s.%s (%s) VALUES (%s);""" % (db_write_mode,
e_map_def['db_schema'],
e_map_def['db_table'],
', '.join(extra_query_fields),
', '.join(extra_query_values))
if print_queries:
print("")
print(query)
if execute_queries:
db_cursor.execute(query)
# This processing function handled ALL inserts - tell parent process there's nothing left to do
return initial_value_to_be_returned
#/* ----------------------------------------------------------------------- */#
#/* Define full_report_url() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def full_report_url(**kwargs):
"""
Default value
"""
return 'http://nrc.uscg.mil/'
#/* ----------------------------------------------------------------------- */#
#/* Define materials_url() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def materials_url(**kwargs):
"""
Default value
"""
return NrcScrapedReportFields.full_report_url()
#/* ----------------------------------------------------------------------- */#
#/* Define time_stamp() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def time_stamp(**kwargs):
"""
Required to insert a NULL value
"""
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define ft_id() function
#/* ----------------------------------------------------------------------- */#
@staticmethod
def ft_id(**kwargs):
"""
Required to insert a NULL value
"""
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define _datetime_caller() function
#/* ----------------------------------------------------------------------- */#
@staticmethod
def _datetime_caller(**kwargs):
"""
Several methods require converting a timestamp to a Postgres supported
timestamp format. This method eliminates repitition
:param workbook:
:type workbook:
:param row:
:type row:
:param map_def:
:type map_def:
:rtype:
:return:
"""
# TODO: Use 24 hour time
workbook = kwargs['workbook']
row = kwargs['row']
map_def = kwargs['map_def']
return timestamp2datetime(row[map_def['column']], workbook.datemode)
#/* ----------------------------------------------------------------------- */#
#/* Define recieved_time() function
#/* ----------------------------------------------------------------------- */#
@staticmethod
def recieved_datetime(**kwargs):
"""
See documentation for function called in the return statement
"""
return NrcScrapedReportFields._datetime_caller(**kwargs)
#/* ----------------------------------------------------------------------- */#
#/* Define incident_datetime() function
#/* ----------------------------------------------------------------------- */#
@staticmethod
def incident_datetime(**kwargs):
"""
See documentation for function called in the return statement
"""
return NrcScrapedReportFields._datetime_caller(**kwargs)
#/* ----------------------------------------------------------------------- */#
#/* Define incident_datetime() function
#/* ----------------------------------------------------------------------- */#
@staticmethod
def calltype(**kwargs):
"""
Database is expecting
"""
map_def = kwargs['map_def']
row = kwargs['row']
value = row[map_def['column']]
if value == 'INC':
value = 'INCIDENT'
return value
#/* ======================================================================= */#
#/* Define NrcParsedReportFields() class
#/* ======================================================================= */#
class NrcParsedReportFields(object):
"""
Some fields in the NRC spreadsheet do not map directly to a column in the
database. These fields require an additional processing step that is
highly specific and cannot be re-used. The field map definition contains
all of the additional arguments and information necessary to execute one
of these processing functions.
A class is used as a namespace to provide better organization and to
prevent having to name functions something like:
'get_NrcScrapedReport_material_name_field'
"""
#/* ----------------------------------------------------------------------- */#
#/* Define areaid() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def areaid(**kwargs):
# TODO: Implement - currently returning NULL
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define blockid() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def blockid(**kwargs):
# TODO: Implement - currently returning NULL
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define platform_letter() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def platform_letter(**kwargs):
# TODO: Implement - currently returning NULL
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define _sheen_handler() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def _sheen_handler(**kwargs):
"""
Several converters require
"""
row = kwargs['row']
map_def = kwargs['map_def']
db_null_value = kwargs['db_null_value']
value = row[map_def['column']]
unit = row[map_def['processing']['args']['unit_field']]
# If the value is not a float, change it to nothing so the next test fails
try:
value = float(value)
except ValueError:
value = ''
# No sheen size - nothing to do
if value == '' or unit == '':
return db_null_value
# Found a sheen size and unit - perform conversion
else:
multipliers = {
'F': 1,
'FE': 1,
'FEET': 1,
'IN': 0.0833333,
'INCHES': 0.0833333,
'KILOMETERS': 3280.84,
'METER': 3.28084,
'METERS': 3.28084,
'MI': 5280,
'MIL': 5280,
'MILES': 5280,
'NI': 5280, # Assumed mistyping of 'MI'
'UN': 0.0833333, # Assumed mistyping of 'IN'
'YARDS': 3
}
# Database is expecting to handle the normalization by reading from a field containing "1.23 METERS"
# This function takes care of that but must still supply the expected post-normalization format
if unit.upper() not in multipliers:
return db_null_value
return unicode(multipliers[unit.upper()] * value) + ' FEET'
#/* ----------------------------------------------------------------------- */#
#/* Define sheen_size_length() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def sheen_size_length(**kwargs):
"""
See called function documentation
"""
return NrcParsedReportFields._sheen_handler(**kwargs)
#/* ----------------------------------------------------------------------- */#
#/* Define sheen_size_width() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def sheen_size_width(**kwargs):
"""
See called function documentation
"""
return NrcParsedReportFields._sheen_handler(**kwargs)
#/* ----------------------------------------------------------------------- */#
#/* Define affected_area() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def affected_area(**kwargs):
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define time_stamp() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def time_stamp(**kwargs):
"""
Required to insert a NULL value
"""
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define ft_id() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def ft_id(**kwargs):
"""
Required to insert a NULL value
"""
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define _coord_formatter() protected static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def _coord_formatter(**kwargs):
"""
The latitude() and longitude() methods require the same general
logic.
"""
try:
row = kwargs['row']
col_deg = kwargs['map_def']['processing']['args']['col_degrees']
col_min = kwargs['map_def']['processing']['args']['col_minutes']
col_sec = kwargs['map_def']['processing']['args']['col_seconds']
col_quad = kwargs['map_def']['processing']['args']['col_quadrant']
output = dms2dd(row[col_deg], row[col_min], row[col_sec], row[col_quad])
except (ValueError, KeyError):
output = kwargs['db_null_value']
return output
#/* ----------------------------------------------------------------------- */#
#/* Define latitude() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def latitude(**kwargs):
"""
Convert coordinates from DMS to DD
"""
return NrcParsedReportFields._coord_formatter(**kwargs)
#/* ----------------------------------------------------------------------- */#
#/* Define longitude() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def longitude(**kwargs):
"""
Convert coordinates from DMS to DD
"""
return NrcParsedReportFields._coord_formatter(**kwargs)
#/* ======================================================================= */#
#/* Define NrcScrapedMaterialFields() class
#/* ======================================================================= */#
class NrcScrapedMaterialFields(object):
"""
Some fields in the NRC spreadsheet do not map directly to a column in the
database. These fields require an additional processing step that is
highly specific and cannot be re-used. The field map definition contains
all of the additional arguments and information necessary to execute one
of these processing functions.
A class is used as a namespace to provide better organization and to
prevent having to name functions something like:
'get_NrcScrapedReport_material_name_field'
"""
#/* ----------------------------------------------------------------------- */#
#/* Define ft_id() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def ft_id(**kwargs):
return kwargs.get('db_null_value', None)
#/* ----------------------------------------------------------------------- */#
#/* Define st_id() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def st_id(**kwargs):
return kwargs.get('db_null_value', None)
#/* ======================================================================= */#
#/* Define BotTaskStatusFields() class
#/* ======================================================================= */#
class BotTaskStatusFields(object):
"""
Some fields in the NRC spreadsheet do not map directly to a column in the
database. These fields require an additional processing step that is
highly specific and cannot be re-used. The field map definition contains
all of the additional arguments and information necessary to execute one
of these processing functions.
A class is used as a namespace to provide better organization and to
prevent having to name functions something like:
'get_NrcScrapedReport_material_name_field'
"""
#/* ----------------------------------------------------------------------- */#
#/* Define status() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def status(**kwargs):
return 'DONE'
#/* ----------------------------------------------------------------------- */#
#/* Define bot() static method
#/* ----------------------------------------------------------------------- */#
@staticmethod
def bot(**kwargs):
return 'NrcExtractor'
#/* ======================================================================= */#
#/* Define main() function
#/* ======================================================================= */#
def main(args):
"""
Main routine to parse, transform, and insert Current.xlsx into the tables
used by the Alerts system.
http://nrc.uscg.mil/FOIAFiles/Current.xlsx
Before doing any transformations, a set of SEQNOS/reportnum's are gathered
from one of the workbook's sheets. The default column in 'CALLS' but can be
specified by the user. This set of ID's are treated as primary keys and drive
processing.
Rather than process the input document sheet by sheet and row by row, a set
of field map definitions are declared to describe which fields in which
sheets should be inserted into which table in which schema. Each field map
is applied against each ID which means that if ID number 1234 is being
processed, the bare minimum field map example below states that whatever
value is in sheet 'CALLS' and column 'RESPONSIBLE_COMPANY' can be sent to
public."NrcScrapedReport".suspected_responsible_company The more complicated
field map states that a specific function must do more of the heavy lifting.
Field maps are grouped by table and center around the target field. There
should be one map for every field in a table. The structure for field maps
is roughly as follows:
All field maps = {
'table_name': [
{
'db_table': Name of target table,
'db_field': Name of target field,
'db_field_width': Maximum width for this field - used in string slicing
'db_schema': Name of target schema,
'sheet_name': Name of source sheet in input file,
'column': Name of source column in sheet_name,
'processing': { # Optional - should be set to None if not used
'function': Callable object responsible for additional sub-processing
'args': { # Essentially kwargs
'Arg1': parameter,
'Arg2': ...
}
}
},
{
'db_table': Name of target table,
'db_field': Name of target field,
'db_schema': Name of target schema,
'sheet_name': Name of source sheet in input file,
'column': Name of source column in sheet_name,
'processing': { # Optional - should be set to None if not used
'function': Callable object responsible for additional sub-processing
'args': { # Essentially kwargs
'Arg1': parameter,
'Arg2': ...
}
}
},
],
'TABLE_NAME': [
{
'db_table': Name of target table,
'db_field': Name of target field,
'db_schema': Name of target schema,
'sheet_name': Name of source sheet in input file,
'column': Name of source column in sheet_name,
'processing': { # Optional - should be set to None if not used
'function': Callable object responsible for additional sub-processing
'args': { # Essentially kwargs
'Arg1': parameter,
'Arg2': ...
}
}
}
],
}
The order of operations for a given ID is as follows:
1. Get an ID
2. Get a set of field maps for one target table
3. Process all field maps and assemble an insert query
4. Execute the insert statement
5. Repeat steps 2-4 until all tables have been processed
Example bare minimum field map:
The field map below shows that the value in the 'RESPONSIBLE_COMPANY'
column in the 'CALLS' sheet can be sent directly to
public."NrcScrapedReport".suspected_responsible_company without any
additional processing. Note the quotes around the table name.
{
'db_table': '"NrcScrapedReport"',
'db_field': 'suspected_responsible_company',
'db_field_width': 32,
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': 'RESPONSIBLE_COMPANY',
'processing': None
},
Example field map with all options:
This field map shows that no specific column contains the value required
for public."NrcParsedReport".longitude Instead, some information must be
passed to the NrcParsedReportFields.longitude() function where the actual
processing happens. Field maps using additional processing always receive
the following kwargs:
all_field_maps All field maps with keys set to schema.table
db_cursor The cursor to be used for all queries
db_null_value Value to use for NULL
db_seqnos_field The reportnum field in the database
db_write_mode The first part of the SQL statement for writes
(e.g. INSERT INTO)
execute_queries Specifies whether or not queries should actually
be executed
map_def Current map definition being processed (example
below)
print_queries Specifies whether or not queries should be printed
as they are executed
raw_sheet_cache Structured similar to the normal sheet cache,
but with a list of rows instead of a dictionary
containing reportnums as keys and rows as values
row The current row being processed - structured
just like a csv.DictReader row
sheet The entire sheet from which the row was extracted
as described in the field map
sheet_seqnos_field The field in all sheets containing the reportnum
uid The current SEQNOS/reportnum being processed
workbook XLRD workbook object
The callable object specified in map_def['processing']['function'] is
responsible for ALL queries. The processing functions are intended
to return a final value to be inserted into the target field described
in the field map but this behavior is not required. If the function
itself handles all queries internally it can return '__NO_QUERY__' in
order to be excluded from the insert statement for that table.
{
'db_table': '"NrcParsedReport"',
'db_field': 'longitude',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.longitude,
'args': {
'col_degrees': 'LONG_DEG',
'col_minutes': 'LONG_MIN',
'col_seconds': 'LONG_SEC',
'col_quadrant': 'LONG_QUAD'
}
}
},
:param args: arguments from the commandline (sys.argv[1:] in order to drop the script name)
:type args: list
:return: 0 on success and 1 on error
:rtype: int
"""
#/* ----------------------------------------------------------------------- */#
#/* Define Field Maps
#/* ----------------------------------------------------------------------- */#
field_map_order = ['public."NrcScrapedReport"', 'public."NrcParsedReport"','public."BotTaskStatus"' ]
field_map = {
'public."NrcScrapedReport"': [
{
'db_table': '"NrcScrapedReport"',
'db_field': 'reportnum',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': 'SEQNOS',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'recieved_datetime',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': 'DATE_TIME_RECEIVED',
'processing': {
'function': NrcScrapedReportFields.recieved_datetime
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'calltype',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': 'CALLTYPE',
'processing': {
'function': NrcScrapedReportFields.calltype
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'suspected_responsible_company',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': 'RESPONSIBLE_COMPANY',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'description',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'DESCRIPTION_OF_INCIDENT',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'incident_datetime',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'INCIDENT_DATE_TIME',
'processing': {
'function': NrcScrapedReportFields.incident_datetime
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'incidenttype',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'TYPE_OF_INCIDENT',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'cause',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'INCIDENT_CAUSE',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'location',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_ADDRESS',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'state',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_STATE',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'nearestcity',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_NEAREST_CITY',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'county',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_COUNTY',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'medium_affected',
'db_schema': 'public',
'sheet_name': 'INCIDENT_DETAILS',
'column': 'MEDIUM_DESC',
'processing': None
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'material_name',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'NAME_OF_MATERIAL',
'processing': {
'function': NrcScrapedReportFields.material_name,
'args': {
'extras_table': '"NrcScrapedMaterial"',
'extras_schema': 'public',
'extras_field_maps': {
'public."NrcScrapedReport"': [
{
'db_table': "NrcScrapedMaterial",
'db_field': 'reportnum',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'SEQNOS',
'processing': None
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'name',
'db_field_width': 32,
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'NAME_OF_MATERIAL',
'processing': None
},
{
'db_table': "NrcScrapedMaterial",
'db_field': 'reached_water',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'IF_REACHED_WATER',
'processing': None
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'amt_in_water',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'AMOUNT_IN_WATER',
'processing': None
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'amt_in_water_unit',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'UNIT_OF_MEASURE_REACH_WATER',
'processing': None
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'chris_code',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'CHRIS_CODE',
'processing': None
},
{ # TODO: Not populated
'db_table': '"NrcScrapedMaterial"',
'db_field': 'amount',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'AMOUNT_OF_MATERIAL',
'processing': None
},
{ # TODO: Not populated
'db_table': '"NrcScrapedMaterial"',
'db_field': 'unit',
'db_schema': 'public',
'sheet_name': 'MATERIAL_INVOLVED',
'column': 'UNIT_OF_MEASURE',
'processing': None
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'ft_id',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedMaterialFields.ft_id
}
},
{
'db_table': '"NrcScrapedMaterial"',
'db_field': 'st_id',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedMaterialFields.st_id
}
}
]
}
}
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'full_report_url',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedReportFields.full_report_url
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'materials_url',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedReportFields.materials_url
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'time_stamp',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedReportFields.time_stamp
}
},
{
'db_table': '"NrcScrapedReport"',
'db_field': 'ft_id',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcScrapedReportFields.ft_id
}
}
],
'public."NrcParsedReport"': [
{
'db_table': '"NrcParsedReport"',
'db_field': 'reportnum',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'SEQNOS',
'processing': None
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'latitude',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.latitude,
'args': {
'col_degrees': 'LAT_DEG',
'col_minutes': 'LAT_MIN',
'col_seconds': 'LAT_SEC',
'col_quadrant': 'LAT_QUAD'
}
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'longitude',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.longitude,
'args': {
'col_degrees': 'LONG_DEG',
'col_minutes': 'LONG_MIN',
'col_seconds': 'LONG_SEC',
'col_quadrant': 'LONG_QUAD'
}
}
},
{ # TODO: Implement - check notes about which column to use
'db_table': '"NrcParsedReport"',
'db_field': 'areaid',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.areaid
}
},
{ # TODO: Implement - check notes about which column to use
'db_table': '"NrcParsedReport"',
'db_field': 'blockid',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.blockid
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'zip',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_ZIP',
'processing': None
},
{ # TODO: Implement - check notes about which column to use
'db_table': '"NrcParsedReport"',
'db_field': 'platform_letter',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.platform_letter
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'sheen_size_length',
'db_schema': 'public',
'sheet_name': 'INCIDENT_DETAILS',
'column': 'SHEEN_SIZE_LENGTH',
'processing': {
'function': NrcParsedReportFields.sheen_size_length,
'args': {'unit_field': 'SHEEN_SIZE_LENGTH_UNITS'}
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'sheen_size_width',
'db_schema': 'public',
'sheet_name': 'INCIDENT_DETAILS',
'column': 'SHEEN_SIZE_WIDTH',
'processing': {
'function': NrcParsedReportFields.sheen_size_width,
'args': {'unit_field': 'SHEEN_SIZE_WIDTH_UNITS'}
}
},
{ # TODO: Implement
'db_table': '"NrcParsedReport"',
'db_field': 'affected_area',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': NrcParsedReportFields.affected_area,
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'county',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'LOCATION_COUNTY',
'processing': None
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'time_stamp',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcParsedReportFields.time_stamp,
}
},
{
'db_table': '"NrcParsedReport"',
'db_field': 'ft_id',
'db_schema': 'public',
'sheet_name': 'CALLS',
'column': None,
'processing': {
'function': NrcParsedReportFields.ft_id,
}
}
],
'public."BotTaskStatus"': [
{
'db_table': '"BotTaskStatus"',
'db_field': 'task_id',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': 'SEQNOS',
'processing': None
},
{
'db_table': '"BotTaskStatus"',
'db_field': 'status',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': BotTaskStatusFields.status,
}
},
{
'db_table': '"BotTaskStatus"',
'db_field': 'bot',
'db_schema': 'public',
'sheet_name': 'INCIDENT_COMMONS',
'column': None,
'processing': {
'function': BotTaskStatusFields.bot,
}
},
],
}
#/* ----------------------------------------------------------------------- */#
#/* Define Defaults
#/* ----------------------------------------------------------------------- */#
# Database
db_connection_string = None
db_host = 'localhost'
db_name = 'skytruth'
db_user = getpass.getuser()
db_pass = ''
db_write_mode = 'INSERT INTO'
db_seqnos_field = 'reportnum'
db_null_value = 'NULL'
sheet_seqnos_field = 'SEQNOS'
# NRC file I/O
download_url = 'http://nrc.uscg.mil/FOIAFiles/Current.xlsx'
file_to_process = os.getcwd() + sep + name_current_file(basename(download_url))
overwrite_downloaded_file = False
download_file = True
process_subsample = None
process_subsample_min = 0
# User feedback settings
print_progress = True
print_queries = False
execute_queries = True
final_table_counts = ['public."NrcParsedReport"', 'public."NrcScrapedMaterial"', 'public."NrcScrapedReport"']
#/* ----------------------------------------------------------------------- */#
#/* Parse arguments
#/* ----------------------------------------------------------------------- */#
i = 0
arg_error = False
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help-info', '-help-info', '--helpinfo', '-help-info'):
return print_help_info()
elif arg in ('--help', '-help', '--h', '-h'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--version', '-version'):
return print_version()
elif arg in ('--license', '-usage'):
return print_license()
# Spreadsheet I/O
elif arg == '--no-download':
i += 1
download_file = False
elif arg == '--download-url':
i += 2
download_url = args[i - 1]
elif arg == '--file-to-process':
i += 2
file_to_process = abspath(args[i - 1])
# Database connection
elif arg == '--db-connection-string':
i += 2
db_connection_string = args[i - 1]
elif arg == '--db-host':
i += 2
db_host = args[i - 1]
elif arg == '--db-user':
i += 2
db_user = args[i - 1]
elif arg == '--db-name':
i += 2
db_name = args[i - 1]
elif arg == '--db-pass':
i += 2
db_pass = args[i - 1]
# Commandline print-outs
elif arg == '--no-print-progress':
i += 1
print_progress = False
elif arg == '--print-queries':
i += 1
print_queries = True
print_progress = False
elif arg == '--no-execute-queries':
i += 1
execute_queries = False
# Additional options
elif arg == '--overwrite-download':
i += 1
overwrite_downloaded_file = True
elif arg == '--subsample':
i += 2
process_subsample = args[i - 1]
elif arg == '--subsample-min':
i += 2
process_subsample_min = args[i - 1]
# Positional arguments and errors
else:
# Invalid argument
i += 1
arg_error = True
print("ERROR: Invalid argument: %s" % arg)
# This catches three conditions:
# 1. The last argument is a flag that requires parameters but the user did not supply the parameter
# 2. The arg parser did not properly consume all parameters for an argument
# 3. The arg parser did not properly iterate the 'i' variable
except IndexError:
i += 1
arg_error = True
print("ERROR: An argument has invalid parameters")
#/* ----------------------------------------------------------------------- */#
#/* Adjust options
#/* ----------------------------------------------------------------------- */#
# Database - must be done here in order to allow the user to overwrite the default credentials and settings
if db_connection_string is None:
db_connection_string = "host='%s' dbname='%s' user='%s' password='%s'" % (db_host, db_name, db_user, db_pass)
#/* ----------------------------------------------------------------------- */#
#/* Validate parameters
#/* ----------------------------------------------------------------------- */#
bail = False
# Make sure arguments were properly parse
if arg_error:
bail = True
print("ERROR: Did not successfully parse arguments")
# Make sure the downloaded file is not going to be accidentally deleted
if download_file and not overwrite_downloaded_file and isfile(file_to_process):
bail = True
print("ERROR: Overwrite=%s and download target exists: %s" % (overwrite_downloaded_file, file_to_process))
# Make sure the user has write permission to the target directory
if not os.access(dirname(file_to_process), os.W_OK):
bail = True
print("ERROR: Need write permission for download directory: %s" % dirname(file_to_process))
# Handle subsample
if process_subsample is not None:
try:
process_subsample = int(process_subsample)
process_subsample_min = int(process_subsample_min)
except ValueError:
bail = True
print("ERROR: Invalid subsample or subsample min - must be an int: %s" % process_subsample)
# Exit if any problems were encountered
if bail:
return 1
#/* ----------------------------------------------------------------------- */#
#/* Prep DB connection and XLRD workbook for processing
#/* ----------------------------------------------------------------------- */#
# Test connection
print("Connecting to DB: %s" % db_connection_string)
try:
connection = psycopg2.connect(db_connection_string)
connection.close()
except psycopg2.OperationalError as e:
print("ERROR: Could not connect to database: %s" % db_connection_string)
print(" Postgres Error: %s" % e)
return 1
#/* ----------------------------------------------------------------------- */#
#/* Download the spreadsheet
#/* ----------------------------------------------------------------------- */#
if download_file:
print("Downloading: %s" % download_url)
print("Target: %s" % file_to_process)
try:
download(download_url, file_to_process)
except urllib2.URLError, e:
print("ERROR: Could not download from URL: %s" % download_url)
print(" URLLIB Error: %s" % e)
return 1
# Prep workbook
print("Opening workbook: %s" % file_to_process)
with xlrd.open_workbook(file_to_process, 'r') as workbook:
# Establish a DB connection and turn on dict reading
db_conn = psycopg2.connect(db_connection_string)
db_conn.autocommit = True
db_cursor = db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
#/* ----------------------------------------------------------------------- */#
#/* Validate field map definitions
#/* ----------------------------------------------------------------------- */#
validate_field_map_error = False
print("Validating field mapping ...")
for db_map in field_map_order:
# Check each field definition in the set of mappings
for map_def in field_map[db_map]:
# Attempt to get the sheet to test
if map_def['sheet_name'] is not None and map_def['column'] is not None:
try:
sheet = workbook.sheet_by_name(map_def['sheet_name'])
if map_def['column'] not in column_names(sheet):
validate_field_map_error = True
print("ERROR: Can't find source: %s -> %s.%s"
% (file_to_process, map_def['sheet_name'], map_def['column']))
# Could not get the sheet to test
except xlrd.XLRDError:
validate_field_map_error = True
print("ERROR: Sheet does not exist: %s" % map_def['sheet_name'])
# Make sure schema and table exist in the DB
query = "SELECT * FROM information_schema.columns WHERE table_schema = '%s' AND table_name = '%s' AND column_name = '%s';" \
% (map_def['db_schema'], map_def['db_table'].replace('"', ''), map_def['db_field'])
db_cursor.execute(query)
results = db_cursor.fetchall()
if not results:
validate_field_map_error = True
print("ERROR: Invalid DB target: %s.%s.%s.%s.%s"
% (db_host, db_name, map_def['db_schema'], map_def['db_table'], map_def['db_field']))
# Encountered an error validating the field map
if validate_field_map_error:
db_cursor.close()
db_conn.close()
return 1
#/* ----------------------------------------------------------------------- */#
#/* Cache initial DB row counts for final stat printing
#/* ----------------------------------------------------------------------- */#
initial_db_row_counts = {ts: db_row_count(db_cursor, ts) for ts in final_table_counts}
#/* ----------------------------------------------------------------------- */#
#/* Additional prep
#/* ----------------------------------------------------------------------- */#
# Cache all sheets needed by the field definitions as dictionaries
print("Caching sheets ...")
sheet_cache = {}
raw_sheet_cache = {}
for sname in workbook.sheet_names():
if sname not in sheet_cache:
try:
sheet_dict = sheet2dict(workbook.sheet_by_name(sname))
raw_sheet_cache[sname] = sheet_dict
sheet_cache[sname] = {row[sheet_seqnos_field]: row for row in sheet_dict}
except IndexError:
# Sheet was empty
pass
# Get a list of unique report id's
unique_report_ids = []
for s_name, s_rows in sheet_cache.iteritems():
for reportnum in s_rows.keys():
unique_report_ids.append(reportnum)
unique_report_ids = list(set(unique_report_ids))
# Grab a subsample if necessary
if process_subsample is not None and process_subsample < len(unique_report_ids):
# TODO: Delete constraining line - needed to verify everything was wroking
unique_report_ids = [i for i in unique_report_ids if i > 1074683]
unique_report_ids.sort()
unique_report_ids = unique_report_ids[process_subsample_min:process_subsample_min + process_subsample]
#/* ----------------------------------------------------------------------- */#
#/* Process data
#/* ----------------------------------------------------------------------- */#
# Loops:
# Get a report number to process
# Get a set of field maps for a single table to process
# Get a field map to process
print("Processing workbook ...")
num_ids = len(unique_report_ids)
uid_i = 0
# Loop through the primary keys
for uid in unique_report_ids:
# Update user
uid_i += 1
if print_progress:
sys.stdout.write("\r\x1b[K" + " %s/%s" % (uid_i, num_ids))
sys.stdout.flush()
# Get field maps for one table
for db_map in field_map_order:
query_fields = []
query_values = []
# If the report already exists, in the target table, skip everything else
_schema, _table = db_map.split('.')
if not report_exists(db_cursor=db_cursor, reportnum=uid, schema=_schema, table=_table):
# Get a single field map to process
for map_def in field_map[db_map]:
# Don't need to process the reportnum information since it was added to the initial query above
if map_def['db_field'] == db_seqnos_field:
query_fields = [db_seqnos_field]
query_values = [str(uid)]
else:
# Get the row for this sheet
try:
row = sheet_cache[map_def['sheet_name']][uid]
except KeyError:
row = None
# If no additional processing is required, simply grab the value from the sheet and add to the query
if row is not None:
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Value goes from input file straight into DB
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
if map_def['processing'] is None:
try:
value = row[map_def['column']]
except KeyError:
# UID doesn't appear in the specified sheet - populate a NULL value
value = db_null_value
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Value with additional processing
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
# Pass all necessary information to the processing function in order to get a result
else:
value = map_def['processing']['function'](db_cursor=db_cursor, uid=uid, workbook=workbook,
row=row, db_null_value=db_null_value,
map_def=map_def,
sheet=sheet_cache[map_def['sheet_name']],
all_field_maps=field_map,
sheet_seqnos_field=sheet_seqnos_field,
db_write_mode=db_write_mode,
print_queries=print_queries,
execute_queries=execute_queries,
raw_sheet_cache=raw_sheet_cache,
db_seqnos_field=db_seqnos_field,
sheet_cache=sheet_cache)
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Add this field map to the insert statement
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
# Handle NULL values - these should be handled elsewhere so this is more of a safety net
if value is None or not value:
value = db_null_value
# Assemble query
if value not in ('__NO_QUERY__', db_null_value):
query_fields.append(map_def['db_field'])
# Only put quotes around specific values
if isinstance(value, str) or isinstance(value, unicode):
# Having single quotes in the string causes problems on insert because the entire
# value is single quoted
value = value.replace("'", '"')
query_values.append("'%s'" % value)
else:
query_values.append("%s" % value)
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
#/* Execute query
#/* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */#
# Execute query, but not if the report already exists
query = """%s %s (%s) VALUES (%s);""" \
% (db_write_mode, db_map, ", ".join(query_fields), ", ".join(query_values))
if print_queries:
print("")
try:
print(query)
except Exception as e:
print ("Error printing SQL query to console (unicode weirdness?")
print (e.message)
if execute_queries:
db_cursor.execute(query)
# Done processing - update user
if print_progress:
print(" - Done")
#/* ----------------------------------------------------------------------- */#
#/* Cleanup and final return
#/* ----------------------------------------------------------------------- */#
# Update user
padding = max([len(i) for i in field_map.keys()])
indent = " " * 2
print("Initial row counts:")
for schema_table, count in initial_db_row_counts.iteritems():
print("%s%s%s" % (indent, schema_table + ' ' * (padding - len(schema_table) + 4), count))
print("Final row counts:")
final_db_row_counts = {ts: db_row_count(db_cursor, ts) for ts in final_table_counts}
for schema_table, count in final_db_row_counts.iteritems():
print("%s%s%s" % (indent, schema_table + ' ' * (padding - len(schema_table) + 4), count))
print("New rows:")
for schema_table, count in final_db_row_counts.iteritems():
print("%s%s%s" % (indent, schema_table + ' ' * (padding - len(schema_table) + 4),
final_db_row_counts[schema_table] - initial_db_row_counts[schema_table]))
# Success - commit inserts and destroy DB connections
# db_conn.commit() # connection is now set to autocommit
db_cursor.close()
db_conn.close()
return 0
#/* ======================================================================= */#
#/* Commandline Execution
#/* ======================================================================= */#
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1.78125 | 2 |
s3splitmerge/tests/aws.py | MacHu-GWU/s3splitmerge-project | 0 | 12792925 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
S3 object for testing naming convention::
s3://{bucket}/{prefix}/{module}/{function/method_name}/{filename}
"""
import boto3
# --- manually configure following settings
aws_profile = None
aws_region = "us-east-1"
bucket = "aws-data-lab-sanhe-aws-etl-solutions"
prefix = "s3splitmerge/tests"
boto_ses = boto3.session.Session(profile_name=aws_profile, region_name=aws_region)
s3_client = boto_ses.client("s3")
| 1.617188 | 2 |
modules/infra/admin_piccolo/tables.py | Fates-List/BotList | 17 | 12792926 | <reponame>Fates-List/BotList<filename>modules/infra/admin_piccolo/tables.py
import datetime
from piccolo.columns.column_types import (UUID, Array, BigInt, Boolean, Float,
ForeignKey, Integer, Secret, Text,
Timestamptz, Varchar)
from piccolo.columns.readable import Readable
from piccolo.table import Table
from modules.models import enums
class Vanity(Table, tablename="vanity"):
type = Integer()
vanity_url = Text(primary_key = True)
redirect = BigInt()
class User(Table, tablename="users"):
vote_epoch = Timestamptz(help_text = "When the user has last voted")
description = Text(default = "This user prefers to be an enigma")
badges = Array(base_column = Text(), help_text = "Custom User Badges. The ones currently on profiles are special and manually handled without using this column.")
username = Text()
profile_css = Text(default = "")
user_css = Text(default = "")
state = Integer(default = 0, choices = enums.UserState)
coins = Integer(default = 0)
js_allowed = Boolean(default = False, help_text = "Is the user allowed to use javascript")
api_token = Text()
class Bot(Table, tablename="bots"):
username_cached = Text()
verifier = BigInt()
state = Integer(choices = enums.BotState, default = 1)
description = Text()
long_description_type = Integer(default = 0, choices = enums.LongDescType)
long_description = Text()
votes = BigInt(default = 0)
guild_count = BigInt(default = 0)
shard_count = BigInt(default = 0)
shards = Array(base_column = Integer())
user_count = BigInt(default = 0)
last_stats_post = Timestamptz(default = datetime.datetime.now())
created_at = Timestamptz(default = datetime.datetime.now())
webhook_type = Integer(choices = enums.WebhookType)
webhook = Text()
bot_library = Text()
css = Text(default = "")
prefix = Varchar(length = 13)
di_text = Text(help_text = "Discord Integration Text")
website = Text()
discord = Text()
banner_card = Text()
banner_page = Text()
github = Text()
donate = Text()
privacy_policy = Text()
nsfw = Boolean(default = False)
api_token = Text()
js_allowed = Boolean(default = True)
invite = Text()
invite_amount = Integer(default = 0)
features = Array(base_column = Text(), default = [])
class BotTag(Table, tablename="bot_tags"):
bot_id = ForeignKey(references=Bot)
tag = Text(null = False)
class Review(Table, tablename="reviews"):
"""Never ever make reviews on your own through this panel"""
id = UUID()
target_type = Integer(choices=enums.ReviewType)
target_id = BigInt()
user_id = ForeignKey(references=User)
star_rating = Float(help_text = "Amount of stars a bot has")
review_text = Text()
review_upvotes = Array(base_column = BigInt(), default = [])
review_downvotes = Array(base_column = BigInt(), default=[])
flagged = Boolean(default=False)
epoch = Array(base_column = BigInt(), default=[])
replies = Array(base_column=UUID(), default=[])
reply = Boolean(default=False)
@classmethod
def get_readable(cls):
return Readable(template="%s", columns=[cls.name])
| 2.203125 | 2 |
01_Python_Basico_Intermediario/Aula027/aula27.py | Joao-Inacio/Curso-de-Python3 | 1 | 12792927 | """
Expressão condicional com operador OR
"""
nome = input('Qual é seu nome: ')
print(nome or 'Você não digitou nada!')
| 3.734375 | 4 |
elements-of-programming-interviews/14.1.sorted-array-intersection.py | vtemian/interviews-prep | 8 | 12792928 | <filename>elements-of-programming-interviews/14.1.sorted-array-intersection.py
from typing import List
def intersection(x: List[int], y: List[int]) -> List[int]:
result = []
idx_x = idx_y = 0
while idx_x < len(x) and idx_y < len(y):
if x[idx_x] == y[idx_y] and (not result or result[-1] != x[idx_x]):
result.append(x[idx_x])
idx_x += 1
idx_y += 1
elif x[idx_x] > y[idx_y]:
idx_y += 1
else:
idx_x += 1
return result
result = intersection([1, 2, 2, 3, 3, 4], [2, 3, 3])
assert result == [2, 3]
result = intersection([1, 2, 2, 3, 3, 4], [4])
assert result == [4]
result = intersection([1, 2, 2, 3, 3, 4], [5])
assert result == []
| 3.828125 | 4 |
src/semver/__init__.py | b0uh/python-semver | 159 | 12792929 | <gh_stars>100-1000
"""
semver package major release 3.
A Python module for semantic versioning. Simplifies comparing versions.
"""
from ._deprecated import (
bump_build,
bump_major,
bump_minor,
bump_patch,
bump_prerelease,
compare,
finalize_version,
format_version,
match,
max_ver,
min_ver,
parse,
parse_version_info,
replace,
cmd_bump,
cmd_compare,
cmd_nextver,
cmd_check,
createparser,
process,
main,
)
from .version import Version, VersionInfo
from .__about__ import (
__version__,
__author__,
__maintainer__,
__author_email__,
__description__,
__maintainer_email__,
SEMVER_SPEC_VERSION,
)
| 0.898438 | 1 |
src/lda_test.py | mpenza19/LatentDirichletAlloc | 0 | 12792930 | # Adapted from:
# https://www.analyticsvidhya.com/blog/2016/08/beginners-guide-to-topic-modeling-in-python/
import read_bibtex
import os, shutil
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
import string
import gensim
from gensim import corpora
from gensim.test.utils import datapath
import numpy as np
stop = set(stopwords.words('english'))
stop.add("exist")
stop.add("because")
stop.add("via")
stop.add("interest")
stop.add("therefore")
stop.add("hence")
stop.add("this")
exclude = set(string.punctuation)
exclude.add("-")
exclude.add("_")
exclude.add(".")
exclude.add(";")
lemma = WordNetLemmatizer()
stemmer = PorterStemmer()
ntopics = 30
npasses = 400
result_dir="doc_results_all_500_30"
model_dir="model_all_500_30"
year_from=1980
# Creating the object for LDA model using gensim library
Lda = gensim.models.ldamodel.LdaModel
def clean(doc):
punc_free = ''.join(ch for ch in doc if ch not in exclude)
lemmatized = " ".join(lemma.lemmatize(word)+" " for word in punc_free.lower().split())
stemmed = " ".join(stemmer.stem(word) for word in lemmatized.split())
stop_free = " ".join([i for i in stemmed.split() if i not in stop])
return stop_free
def main():
if result_dir in os.listdir("."): shutil.rmtree("./"+result_dir)
os.mkdir("./"+result_dir)
# Read and clean data
doc_set = read_bibtex.bibtex_tostring_from(year_from)
doc_clean = [clean(doc).split() for doc in doc_set]
# Creating the term dictionary of our courpus, where every unique term is assigned an index.
dictionary = corpora.Dictionary(doc_clean)
# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.
doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]
# Loading the LDA model
ldamodel = Lda.load("./"+model_dir+"/all")
# Infer topic distribution for each doc
topic_dist = [ldamodel.get_document_topics(dictionary.doc2bow(doc)) for doc in doc_clean]
# Save results
np.save("./"+result_dir+"/all", np.array(topic_dist))
dist_array = np.array(topic_dist)
transpose_array = [[] for x in range(n_topics)]
for itr in range(len(dist_array)):
for top, weight in dist_array[itr]:
transpose_array[top].append((itr, weight))
for row in transpose_array:
row.sort(key=lambda x: x[1], reverse=True)
np.save("./"+result_dir+"/all_transpose", np.array(transpose_array))
main() | 3.09375 | 3 |
test_package/conanfile.py | appimage-conan-community/appimagetool_installer | 0 | 12792931 | <reponame>appimage-conan-community/appimagetool_installer
import os
from conans import ConanFile, CMake, tools
class AppimagetoolinstallerTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
exports_sources = ("appimage.svg", "org.appimagecraft.TestApp.desktop")
# build_requires = ("cmake_installer/3.10.0@conan/stable")
def build(self):
cmake = CMake(self)
cmake.definitions["CMAKE_INSTALL_PREFIX"] = self.build_folder + "/AppDir"
cmake.configure()
cmake.build(target="install")
def test(self):
if not tools.cross_building(self.settings):
self.run("appimagetool %s" % (self.build_folder + "/AppDir"), run_environment=True)
self.run(self.build_folder + "/Test_App-x86_64.AppImage --appimage-extract-and-run")
| 1.84375 | 2 |
docs/examples/tutorial/2_split/main3.py | ynikitenko/lena | 4 | 12792932 | <filename>docs/examples/tutorial/2_split/main3.py
from __future__ import print_function
import os
from lena.core import Sequence, Split, Source
from lena.structures import Histogram
from lena.math import mesh
from lena.output import ToCSV, Write, LaTeXToPDF, PDFToPNG
from lena.output import MakeFilename, RenderLaTeX
from lena.variables import Variable
from read_data import ReadData
def main():
data_file = os.path.join("..", "data", "normal_3d.csv")
write = Write("output")
s = Sequence(
ReadData(),
Split([
(
Variable("x", lambda vec: vec[0]),
Histogram(mesh((-10, 10), 10)),
),
(
Variable("y", lambda vec: vec[1]),
Histogram(mesh((-10, 10), 10)),
),
(
Variable("z", lambda vec: vec[2]),
Histogram(mesh((-10, 10), 10)),
),
]),
MakeFilename("{{variable.name}}"),
ToCSV(),
write,
RenderLaTeX("histogram_1d.tex", "templates"),
write,
LaTeXToPDF(),
PDFToPNG(),
)
results = s.run([data_file])
for res in results:
print(res)
if __name__ == "__main__":
main()
| 2.65625 | 3 |
build-support/mini-cluster/relocate_binaries_for_mini_cluster.py | granthenke/kudu | 2 | 12792933 | <reponame>granthenke/kudu
#!/usr/bin/env python
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################################
# This script makes Kudu release binaries relocatable for easy use by
# integration tests using a mini cluster. The resulting binaries should never
# be deployed to run an actual Kudu service, whether in production or
# development, because all security dependencies are copied from the build
# system and will not be updated if the operating system on the runtime host is
# patched.
################################################################################
import logging
import optparse
import os
import os.path
import re
import shutil
import subprocess
import sys
SOURCE_ROOT = os.path.join(os.path.dirname(__file__), "../..")
# Add the build-support dir to the system path so we can import kudu-util.
sys.path.append(os.path.join(SOURCE_ROOT, "build-support"))
from kudu_util import check_output, Colors, init_logging
from dep_extract import DependencyExtractor
# Constants.
LC_RPATH = 'LC_RPATH'
LC_LOAD_DYLIB = 'LC_LOAD_DYLIB'
KEY_CMD = 'cmd'
KEY_NAME = 'name'
KEY_PATH = 'path'
# Exclude libraries that are GPL-licensed and libraries that are not portable
# across Linux kernel versions.
PAT_LINUX_LIB_EXCLUDE = re.compile(r"""(libpthread|
libc|
libstdc\+\+|
librt|
libdl|
libgcc.*
)\.so""", re.VERBOSE)
# We don't want to ship libSystem because it includes kernel and thread
# routines that we assume may not be portable between macOS versions.
# TODO(mpercy): consider excluding libc++ as well.
PAT_MACOS_LIB_EXCLUDE = re.compile(r"libSystem")
# Config keys.
BUILD_ROOT = 'build_root'
BUILD_BIN_DIR = 'build_bin_dir'
ARTIFACT_ROOT = 'artifact_root'
ARTIFACT_BIN_DIR = 'artifact_bin_dir'
ARTIFACT_LIB_DIR = 'artifact_lib_dir'
IS_MACOS = os.uname()[0] == "Darwin"
IS_LINUX = os.uname()[0] == "Linux"
def check_for_command(command):
"""
Ensure that the specified command is available on the PATH.
"""
try:
subprocess.check_call(['which', command])
except subprocess.CalledProcessError as err:
logging.error("Unable to find %s command", command)
raise err
def objdump_private_headers(binary_path):
"""
Run `objdump -p` on the given binary.
Returns a list with one line of objdump output per record.
"""
check_for_command('objdump')
try:
output = check_output(["objdump", "-p", binary_path])
except subprocess.CalledProcessError as err:
logging.error(err)
return []
return output.strip().decode("utf-8").split("\n")
def parse_objdump_macos(cmd_type, dump):
"""
Parses the output from objdump_private_headers() for macOS.
'cmd_type' must be one of the following:
* LC_RPATH: Returns a list containing the rpath search path, with one
search path per entry.
* LC_LOAD_DYLIB: Returns a list of shared object library dependencies, with
one shared object per entry. They are returned as stored in the MachO
header, without being first resolved to an absolute path, and may look
like: @rpath/Foo.framework/Versions/A/Foo
'dump' is the output from objdump_private_headers().
"""
# Parsing state enum values.
PARSING_NONE = 0
PARSING_NEW_RECORD = 1
PARSING_RPATH = 2
PARSING_LIB_PATHS = 3
state = PARSING_NONE
values = []
for line in dump:
if re.match('^Load command', line):
state = PARSING_NEW_RECORD
continue
splits = re.split('\s+', line.strip().decode("utf-8"), maxsplit=2)
key = splits[0]
val = splits[1] if len(splits) > 1 else None
if state == PARSING_NEW_RECORD:
if key == KEY_CMD and val == LC_RPATH:
state = PARSING_RPATH
continue
if key == KEY_CMD and val == LC_LOAD_DYLIB:
state = PARSING_LIB_PATHS
continue
if state == PARSING_RPATH and cmd_type == LC_RPATH:
if key == KEY_PATH:
# Strip trailing metadata from rpath dump line.
values.append(val)
if state == PARSING_LIB_PATHS and cmd_type == LC_LOAD_DYLIB:
if key == KEY_NAME:
values.append(val)
return values
def get_rpaths_macos(binary_path):
"""
Helper function that returns a list of rpaths parsed from the given binary.
"""
dump = objdump_private_headers(binary_path)
return parse_objdump_macos(LC_RPATH, dump)
def resolve_library_paths_macos(raw_library_paths, rpaths):
"""
Resolve the library paths from parse_objdump_macos(LC_LOAD_DYLIB, ...) to
absolute filesystem paths using the rpath information returned from
get_rpaths_macos().
Returns a mapping from original to resolved library paths on success.
If any libraries cannot be resolved, prints an error to stderr and returns
an empty map.
"""
resolved_paths = {}
for raw_lib_path in raw_library_paths:
if not raw_lib_path.startswith("@rpath"):
resolved_paths[raw_lib_path] = raw_lib_path
continue
resolved = False
for rpath in rpaths:
resolved_path = re.sub('@rpath', rpath, raw_lib_path)
if os.path.exists(resolved_path):
resolved_paths[raw_lib_path] = resolved_path
resolved = True
break
if not resolved:
raise FileNotFoundError("Unable to locate library %s in rpath %s" % (raw_lib_path, rpaths))
return resolved_paths
def get_dep_library_paths_macos(binary_path):
"""
Returns a map of symbolic to resolved library dependencies of the given binary.
See resolve_library_paths_macos().
"""
dump = objdump_private_headers(binary_path)
raw_library_paths = parse_objdump_macos(LC_LOAD_DYLIB, dump)
rpaths = parse_objdump_macos(LC_RPATH, dump)
return resolve_library_paths_macos(raw_library_paths, rpaths)
def get_artifact_name():
"""
Create an archive with an appropriate name. Including version, OS, and architecture.
"""
if IS_LINUX:
os_str = "linux"
elif IS_MACOS:
os_str = "osx"
else:
raise NotImplementedError("Unsupported platform")
arch = os.uname()[4]
with open(os.path.join(SOURCE_ROOT, "version.txt"), 'r') as version:
version = version.readline().strip().decode("utf-8")
artifact_name = "kudu-binary-%s-%s-%s" % (version, os_str, arch)
return artifact_name
def mkconfig(build_root, artifact_root):
"""
Build a configuration map for convenient plumbing of path information.
"""
config = {}
config[BUILD_ROOT] = build_root
config[BUILD_BIN_DIR] = os.path.join(build_root, "bin")
config[ARTIFACT_ROOT] = artifact_root
config[ARTIFACT_BIN_DIR] = os.path.join(artifact_root, "bin")
config[ARTIFACT_LIB_DIR] = os.path.join(artifact_root, "lib")
return config
def prep_artifact_dirs(config):
"""
Create any required artifact output directories, if needed.
"""
if not os.path.exists(config[ARTIFACT_ROOT]):
os.makedirs(config[ARTIFACT_ROOT], mode=0755)
if not os.path.exists(config[ARTIFACT_BIN_DIR]):
os.makedirs(config[ARTIFACT_BIN_DIR], mode=0755)
if not os.path.exists(config[ARTIFACT_LIB_DIR]):
os.makedirs(config[ARTIFACT_LIB_DIR], mode=0755)
def copy_file(src, dest):
"""
Copy the file with path 'src' to path 'dest'.
If 'src' is a symlink, the link will be followed and 'dest' will be written
as a plain file.
"""
shutil.copyfile(src, dest)
def chrpath(target, new_rpath):
"""
Change the RPATH or RUNPATH for the specified target. See man chrpath(1).
"""
# Continue with a warning if no rpath is set on the binary.
try:
subprocess.check_call(['chrpath', '-l', target])
except subprocess.CalledProcessError as err:
logging.warning("No RPATH or RUNPATH set on target %s, continuing...", target)
return
# Update the rpath.
try:
subprocess.check_call(['chrpath', '-r', new_rpath, target])
except subprocess.CalledProcessError as err:
logging.warning("Failed to chrpath for target %s", target)
raise err
def relocate_deps_linux(target_src, target_dst, config):
"""
See relocate_deps(). Linux implementation.
"""
NEW_RPATH = '$ORIGIN/../lib'
# Make sure we have the chrpath command available in the Linux build.
check_for_command('chrpath')
# Copy the linked libraries.
dep_extractor = DependencyExtractor()
dep_extractor.set_library_filter(lambda path: False if PAT_LINUX_LIB_EXCLUDE.search(path) else True)
libs = dep_extractor.extract_deps(target_src)
for lib_src in libs:
lib_dst = os.path.join(config[ARTIFACT_LIB_DIR], os.path.basename(lib_src))
copy_file(lib_src, lib_dst)
# We have to set the RUNPATH of the shared objects as well for transitive
# dependencies to be properly resolved. $ORIGIN is always relative to the
# running executable.
chrpath(lib_dst, NEW_RPATH)
# We must also update the RUNPATH of the executable itself to look for its
# dependencies in a relative location.
chrpath(target_dst, NEW_RPATH)
def relocate_deps_macos(target_src, target_dst, config):
"""
See relocate_deps(). macOS implementation.
"""
libs = get_dep_library_paths_macos(target_src)
check_for_command('install_name_tool')
for (search_name, resolved_path) in libs.iteritems():
# Filter out libs we don't want to archive.
if PAT_MACOS_LIB_EXCLUDE.search(resolved_path):
continue
# Archive the rest of the runtime dependencies.
lib_dst = os.path.join(config[ARTIFACT_LIB_DIR], os.path.basename(resolved_path))
copy_file(resolved_path, lib_dst)
# Change library search path or name for each archived library.
modified_search_name = re.sub('^.*/', '@rpath/', search_name)
subprocess.check_call(['install_name_tool', '-change',
search_name, modified_search_name, target_dst])
# Modify the rpath.
rpaths = get_rpaths_macos(target_src)
for rpath in rpaths:
subprocess.check_call(['install_name_tool', '-delete_rpath', rpath, target_dst])
subprocess.check_call(['install_name_tool', '-add_rpath', '@executable_path/../lib',
target_dst])
def relocate_deps(target_src, target_dst, config):
"""
Make the target relocatable and copy all of its dependencies into the
artifact directory.
"""
if IS_LINUX:
return relocate_deps_linux(target_src, target_dst, config)
if IS_MACOS:
return relocate_deps_macos(target_src, target_dst, config)
raise NotImplementedError("Unsupported platform")
def relocate_target(target, config):
"""
Copy all dependencies of the executable referenced by 'target' from the
build directory into the artifact directory, and change the rpath of the
executable so that the copied dependencies will be found when the executable
is invoked.
"""
# Create artifact directories, if needed.
prep_artifact_dirs(config)
# Copy the target into the artifact directory.
target_src = os.path.join(config[BUILD_BIN_DIR], target)
target_dst = os.path.join(config[ARTIFACT_BIN_DIR], target)
copy_file(target_src, target_dst)
# Make the target relocatable and copy all of its dependencies into the
# artifact directory.
return relocate_deps(target_src, target_dst, config)
def main():
if len(sys.argv) < 3:
print("Usage: %s kudu_build_dir target [target ...]" % (sys.argv[0], ))
sys.exit(1)
# Command-line arguments.
build_root = sys.argv[1]
targets = sys.argv[2:]
init_logging()
if not os.path.exists(build_root):
logging.error("Build directory %s does not exist", build_root)
sys.exit(1)
artifact_name = get_artifact_name()
artifact_root = os.path.join(build_root, artifact_name)
# Clear the artifact root to ensure a clean build.
if os.path.exists(artifact_root):
shutil.rmtree(artifact_root)
logging.info("Including targets and their dependencies in archive...")
config = mkconfig(build_root, artifact_root)
for target in targets:
relocate_target(target, config)
if __name__ == "__main__":
main()
| 1.273438 | 1 |
SumOfPrimes.py | kevjames3/CodingChallenges | 0 | 12792934 | '''
Focus of this file is to determine the sum of the first 1000 prime numbers.
'''
import random
import math
def isPrime(num):
result = True
if num > 2 and not (num % 2 == 0):
#Use Fermat's little theorem a^n (mod n) = a (mod n).
#If they equal each other, then that means that this
#one is prime. Also, 2 <= a < n where a is an integer
a = random.randint(2, num - 1)
if ((a ** num) % num) == (a % num):
sqrt_n = math.sqrt(num)
for divider in range(3, int(math.ceil(sqrt_n) + 1)):
if num % divider == 0:
result = False
break
else:
result = False
elif not num == 2:
result = False
return result
if __name__ == '__main__':
primeList = []
currentNumber = 0
while len(primeList) < 1000:
if isPrime(currentNumber):
primeList.append(currentNumber)
currentNumber += 1
print sum(primeList) | 4.0625 | 4 |
src/rnn/text_classification_model_simple.py | jorgemf/kaggle_redefining_cancer_treatment | 20 | 12792935 | import tensorflow as tf
from tensorflow.contrib import slim
import tensorflow.contrib.layers as layers
from ..configuration import *
from .text_classification_train import main
class ModelSimple(object):
"""
Base class to create models for text classification. It uses several layers of GRU cells.
"""
def model(self, input_text_begin, input_text_end, gene, variation, num_output_classes,
batch_size, embeddings, training=True, dropout=TC_MODEL_DROPOUT):
"""
Creates a model for text classification
:param tf.Tensor input_text: the input data, the text as
[batch_size, text_vector_max_length, embeddings_size]
:param int num_output_classes: the number of output classes for the classifier
:param int batch_size: batch size, the same used in the dataset
:param List[List[float]] embeddings: a matrix with the embeddings for the embedding lookup
:param int num_hidden: number of hidden GRU cells in every layer
:param int num_layers: number of layers of the model
:param float dropout: dropout value between layers
:param boolean training: whether the model is built for training or not
:return Dict[str,tf.Tensor]: a dict with logits and prediction tensors
"""
input_text_begin = tf.reshape(input_text_begin, [batch_size, MAX_WORDS])
if input_text_end is not None:
input_text_end = tf.reshape(input_text_end, [batch_size, MAX_WORDS])
embedded_sequence_begin, sequence_length_begin, \
embedded_sequence_end, sequence_length_end, \
gene, variation = \
self.model_embedded_sequence(embeddings, input_text_begin, input_text_end, gene,
variation)
_, max_length, _ = tf.unstack(tf.shape(embedded_sequence_begin))
with tf.variable_scope('text_begin'):
output_begin = self.rnn(embedded_sequence_begin, sequence_length_begin, max_length,
dropout, batch_size, training)
if input_text_end is not None:
with tf.variable_scope('text_end'):
output_end = self.rnn(embedded_sequence_end, sequence_length_end, max_length,
dropout, batch_size, training)
output = tf.concat([output_begin, output_end], axis=1)
else:
output = output_begin
# full connected layer
logits = self.model_fully_connected(output, gene, variation, num_output_classes, dropout,
training)
prediction = tf.nn.softmax(logits)
return {
'logits' : logits,
'prediction': prediction,
}
def rnn(self, sequence, sequence_length, max_length, dropout, batch_size, training,
num_hidden=TC_MODEL_HIDDEN, num_layers=TC_MODEL_LAYERS):
# Recurrent network.
cells = []
for _ in range(num_layers):
cell = tf.nn.rnn_cell.GRUCell(num_hidden)
if training:
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout)
cells.append(cell)
network = tf.nn.rnn_cell.MultiRNNCell(cells)
type = sequence.dtype
sequence_output, _ = tf.nn.dynamic_rnn(network, sequence, dtype=tf.float32,
sequence_length=sequence_length,
initial_state=network.zero_state(batch_size, type))
# get last output of the dynamic_rnn
sequence_output = tf.reshape(sequence_output, [batch_size * max_length, num_hidden])
indexes = tf.range(batch_size) * max_length + (sequence_length - 1)
output = tf.gather(sequence_output, indexes)
return output
def model_fully_connected(self, output, gene, variation, num_output_classes, dropout, training):
output = layers.dropout(output, keep_prob=dropout, is_training=training)
net = tf.concat([output, gene, variation], axis=1)
net = layers.fully_connected(net, 128, activation_fn=tf.nn.relu)
net = layers.dropout(net, keep_prob=dropout, is_training=training)
logits = layers.fully_connected(net, num_output_classes, activation_fn=None)
return logits
def remove_padding(self, input_text):
# calculate max length of the input_text
mask = tf.greater_equal(input_text, 0) # true for words false for padding
sequence_length = tf.reduce_sum(tf.cast(mask, tf.int32), 1)
# truncate the input text to max length
max_sequence_length = tf.reduce_max(sequence_length)
input_text_length = tf.shape(input_text)[1]
empty_padding_lenght = input_text_length - max_sequence_length
input_text, _ = tf.split(input_text, [max_sequence_length, empty_padding_lenght], axis=1)
return input_text, sequence_length
def model_embedded_sequence(self, embeddings, input_text_begin, input_text_end, gene,
variation):
"""
Given the embeddings and the input text returns the embedded sequence and
the sequence length. The input_text is truncated to the max length of the sequence, so
the output embedded_sequence wont have the same shape as input_text or even a constant shape
:param embeddings:
:param input_text:
:return: (embedded_sequence, sequence_length)
"""
input_text_begin, sequence_length_begin = self.remove_padding(input_text_begin)
if input_text_end is not None:
input_text_end, sequence_length_end = self.remove_padding(input_text_end)
else:
sequence_length_end = None
variation, variation_length = self.remove_padding(variation)
# create the embeddings
# first vector is a zeros vector used for padding
embeddings_dimension = len(embeddings[0])
embeddings = [[0.0] * embeddings_dimension] + embeddings
embeddings = tf.constant(embeddings, name='embeddings', dtype=tf.float32)
# this means we need to add 1 to the input_text
input_text_begin = tf.add(input_text_begin, 1)
if input_text_end is not None:
input_text_end = tf.add(input_text_end, 1)
gene = tf.add(gene, 1)
variation = tf.add(variation, 1)
embedded_sequence_begin = tf.nn.embedding_lookup(embeddings, input_text_begin)
if input_text_end is not None:
embedded_sequence_end = tf.nn.embedding_lookup(embeddings, input_text_end)
else:
embedded_sequence_end = None
embedded_gene = tf.nn.embedding_lookup(embeddings, gene)
embedded_gene = tf.squeeze(embedded_gene, axis=1)
embedded_variation = tf.nn.embedding_lookup(embeddings, variation)
embedded_variation = tf.reduce_mean(embedded_variation, axis=1)
return embedded_sequence_begin, sequence_length_begin, \
embedded_sequence_end, sequence_length_end, \
embedded_gene, embedded_variation
def model_arg_scope(self, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
with slim.arg_scope([slim.batch_norm],
decay=batch_norm_decay,
epsilon=batch_norm_epsilon,
activation_fn=None) as scope:
return scope
def targets(self, labels, output_classes):
"""
Transform a vector of labels into a matrix of one hot encoding labels
:param tf.Tensor labels: an array of labels with dimension [batch_size]
:param int output_classes: the total number of output classes
:return tf.Tensor: a tensorflow tensor
"""
targets = tf.one_hot(labels, axis=-1, depth=output_classes, on_value=1.0, off_value=0.0)
targets = tf.squeeze(targets, axis=1)
return targets
def loss(self, targets, graph_data):
"""
Calculates the softmax cross entropy loss
:param tf.Tensor logits: logits output of the model
:param tf.Tensor targets: targets with the one hot encoding labels
:return tf.Tensor : a tensor with the loss value
"""
logits = graph_data['logits']
loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets, logits=logits)
return tf.reduce_mean(loss)
def optimize(self, loss, global_step,
learning_rate_initial=TC_LEARNING_RATE_INITIAL,
learning_rate_decay=TC_LEARNING_RATE_DECAY,
learning_rate_decay_steps=TC_LEARNING_RATE_DECAY_STEPS):
"""
Creates a learning rate and an optimizer for the loss
:param tf.Tensor loss: the tensor with the loss of the model
:param tf.Tensor global_step: the global step for training
:param int learning_rate_initial: the initial learning rate
:param int learning_rate_decay: the decay of the learning rate
:param int learning_rate_decay_steps: the number of steps to decay the learning rate
:return (tf.Tensor, tf.Tensor): a tuple with the optimizer and the learning rate
"""
learning_rate = tf.train.exponential_decay(learning_rate_initial, global_step,
learning_rate_decay_steps,
learning_rate_decay,
staircase=True, name='learning_rate')
# optimizer
optimizer = tf.train.RMSPropOptimizer(learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
optimizer = optimizer.minimize(loss, global_step=global_step)
return optimizer, learning_rate
if __name__ == '__main__':
main(ModelSimple(), 'simple', batch_size=TC_BATCH_SIZE)
| 3.1875 | 3 |
Programmi/script/print_comandi_c0data.py | lgiacomazzo/SG0-ITA | 1 | 12792936 | <reponame>lgiacomazzo/SG0-ITA<filename>Programmi/script/print_comandi_c0data.py
path_dir = "SG0-2.1.1/immagini_c0data"
print("")
print("")
print(f"cd {path_dir}")
lista_png = ["BG12A.png","BG24A1.png","BG24A3.png","BG24A4.png","BG24A5.png","BG24E1.png","BG24E4.png","BG24N1.png","BG24N3.png","BG24N4.png","BG83A3.png","IBG094.png","IBG099.png","SG0_IBG004A.png","SG0_IBG005A.png","SG0_IBG005C.png","SG0_IBG010A.png","SG0_IBG010B.png","SG0_IBG015A.png","SG0_IBG019A.png","SG0_IBG031A.png","SG0_IBG031B.png","SG0_IBG031C.png","SG0_IBG031D.png","SG0_IBG031E.png","SG0_IBG031F.png","SG0_IBG031G.png","SG0_IBG034A.png","SG0_IBG034B.png","SG0_IBG034C.png","SG0_IBG035A.png","SG0_IBG048A.png","SG0_IBG049A_honorifics.png","SG0_IBG049B_honorifics.png","SG0_IBG052A.png","SG0_IBG056C.png","SG0_IBG058A.png","z_warning.png"]
lista_dds = ["CLEARLIST.dds","CONFIG.dds","DATA01.dds","EXMENU.dds","EXMENU2.dds","GSYSMES.dds","help00.dds","help01.dds","MENUCHIP.dds","MESWIN.dds","PHONE.dds","PHONE_B.dds","PHONE_RINE.dds","SAVEMENU.dds","SYSM_SAVEMENU.dds","SYSM_TIPS.dds","TIPSCHIPS.dds","title_chip.dds"]
#pipeline = ["replace", "add"]
pipeline = ["replace"]
for nome in lista_png:
for op in pipeline:
print("open c0data.mpk")
print(f"{op} {nome}")
print("close c0data.mpk")
for nome in lista_dds:
for op in pipeline:
print("open c0data.mpk")
print(f"{op} {nome}")
print("close c0data.mpk")
print("exit")
print("")
print("")
print(f"copia questi comandi nella shell di Ungelify, ma prima, copia il file c0data.mpk nella directory {path_dir}") | 1.804688 | 2 |
ventura/_hweb.py | HermesPasser/ventura | 0 | 12792937 | import urllib.request as req
import ventura._hpath as hpath
import os
def download(url, file_name):
response = req.urlopen(url)
# Create folders if need
hpath.create_dir(file_name)
file = open(file_name,'wb')
file.write(response.read())
file.close()
def get_page(url):
response = req.urlopen(url)
return response.read().decode("utf8")
| 2.9375 | 3 |
pyramid_api/helper_types.py | shawnsarwar/pyramid_analytics_python | 0 | 12792938 | <reponame>shawnsarwar/pyramid_analytics_python
from dataclasses import dataclass
import json
from string import Template
from typing import Any
from dataclasses_json import DataClassJsonMixin
from . import api_types
@dataclass
class MetaData(DataClassJsonMixin):
name: str = None
dstPath: str = None
modified: str = None
@dataclass
class WrappedType(DataClassJsonMixin):
className: str = None
metaData: MetaData = None
data: dict = None
def to_instance(self):
class_ = getattr(api_types, self.className)
return class_.from_json(json.dumps(self.data))
def to_file(self, path_):
with open(path_, 'w') as f:
json.dump(json.loads(self.to_json()), f, indent=2)
@staticmethod
def createFromFile(path_, template_values=None, error_on_missing=True):
with open(path_, 'r') as f:
obj = json.dumps(json.load(f))
if template_values:
obj = Template(obj)
if error_on_missing:
obj = obj.substitute(template_values)
else:
obj = obj.safe_substitute(template_values)
return WrappedType.from_json(obj)
@staticmethod
def create(instance: Any) -> 'WrappedType':
# expects an instance of an api_type Class
class_ = type(instance)
return WrappedType(
class_.__qualname__,
MetaData(),
json.loads(instance.to_json())
)
| 2.265625 | 2 |
python/pynet/w01_03.py | bandarji/lekhan | 0 | 12792939 | #!/usr/bin/env python
"""
Create three different variables the first variable should use all lower case
characters with underscore ( _ ) as the word separator. The second variable
should use all upper case characters with underscore as the word separator.
The third variable should use numbers, letters, and underscore, but still be a
valid variable Python variable name.
Make all three variables be strings that refer to IPv6 addresses.
Use the from future technique so that any string literals in Python2 are
unicode.
compare if variable1 equals variable2
compare if variable1 is not equal to variable3
"""
def main():
if __name__ == '__main__':
main()
| 4.125 | 4 |
tsserver/configutils.py | m4tx/techswarm-server | 1 | 12792940 | import os
from tsserver import app
def get_upload_dir():
return os.path.join(app.root_path, app.config['PHOTOS_UPLOAD_FOLDER'])
| 1.742188 | 2 |
carts/views.py | cristinacorghi/BeautyProject | 0 | 12792941 | from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
import Store.views
from Store.models.productModel import *
from .forms import CustomerPaymentForm
from .models import *
from Store.models.productModel import CustomerOrders
def cart_view(request):
try:
the_id = request.session['cart_id'] # prende l'id del profumo
cart = Cart.objects.get(id=the_id)
except:
the_id = None
if the_id is None:
empty_message = "Your cart is empty, please keep shopping"
context = {"empty": True, 'empty_message': empty_message}
else:
new_total = 0.00 # prezzo totale
for item in cart.cartitem_set.all():
line_total = float(item.product.price) * item.quantity # prezzo * quantità
new_total += line_total
request.session['items_total'] = cart.cartitem_set.count()
cart.total = new_total # prezzo totale
cart.save()
context = {"cart": cart}
template = "cart.html"
return render(request, template, context)
def add_to_cart(request, pk):
try:
the_id = request.session['cart_id']
except:
new_cart = Cart()
new_cart.save()
request.session['cart_id'] = new_cart.id
the_id = new_cart.id
cart = Cart.objects.get(id=the_id)
try:
product = Product.objects.get(id=pk)
except Product.DoesNotExist:
pass
except:
pass
if request.method == 'POST':
if product.quantity == 0:
lista_attesa = WaitingListModel.objects.create(product=product, user=request.user)
lista_attesa.save()
return render(request, 'finished_perfumes.html', {'product': product})
elif int(request.POST['qty']) > product.quantity:
return render(request, 'finished_perfumes.html', {'product': product})
else:
qty = request.POST['qty'] # quantità aggiunta al carello del singolo profumo
cart_item = CartItem.objects.create(cart=cart, product=product)
cart_item.quantity = qty
cart_item.save()
return HttpResponseRedirect(reverse("carts:cart_view"))
return HttpResponseRedirect(reverse("carts:cart_view"))
def remove_from_cart(request, id):
try:
the_id = request.session['cart_id']
cart = Cart.objects.get(id=the_id)
except:
the_id = None
return HttpResponseRedirect(reverse("carts:cart_view"))
cartitem = CartItem.objects.get(id=id)
cartitem.delete()
return HttpResponseRedirect(reverse("carts:cart_view"))
def customer_payment(request):
if request.method == 'POST':
form = CustomerPaymentForm(request.POST, instance=request.user)
if not form.is_valid():
return render(request, 'payment.html', {'form': form})
else:
form.save()
cartitem = CartItem.objects.all()
for item in cartitem:
orderdetail = CustomerOrders(user=request.user, product=item.product)
orderdetail.save()
item.product.quantity -= item.quantity
# aggiorno la quantità nel database
Product.objects.filter(id=item.product.pk).update(quantity=item.product.quantity)
cartitem.delete() # quando procedo al pagamento il carrello torna vuoto
request.session['items_total'] = 0
template = "success_payment.html"
context = {"empty": True, 'form': form, 'cartitem': cartitem}
return render(request, template, context)
form = CustomerPaymentForm()
return render(request, 'payment.html', {'form': form})
def success_payment(request):
return render(request, 'success_payment.html')
| 2.390625 | 2 |
sessao03/04_56-FuncoesPart3/aula56.py | Ruteski/CursoPythonOM | 0 | 12792942 | """
funcoes - *args e **kwargs
"""
# def func(a1,a2,a3,a4,a5, nome=None, a6=None):
# print(a1,a2,a3,a4,a5,nome, a6)
# return nome, a6
#
#
# var = func(1,2,3,4,5, nome='lincoln', a6='5')
# print(var[0], var[1])
# **kwargs = key word args - argumentos nomeados, basicamente um json
def func(*args, **kwargs):
# print(args)
# print(args[0])
# print(args[-1]) # acessa o ultimo item da lista
# print(len(args))
# for v in args:
# print(v)
# print(args[0])
print(args)
print(kwargs)
# print(kwargs['nome'])
# print(kwargs['sobrenome'])
nome = kwargs.get('nome')
print(nome)
idade = kwargs.get('idade')
if idade is not None:
print(idade)
else:
print('idade nao existe')
# lista = [1,2,3,4,5]
# n1, n2, *n = lista
# print(n1, n2, n)
# print(*lista, sep='\n')
# func(1,2,3,4,5,6)
lista = [1,2,3,4,5]
lista2 = [10,20,30,40,50]
func(*lista, *lista2, nome='lincoln', sobrenome='ruteski')
| 4.0625 | 4 |
clusterwrapper/clustermetrics.py | opennlp/DeepPhrase | 2 | 12792943 | <reponame>opennlp/DeepPhrase<filename>clusterwrapper/clustermetrics.py
from sklearn.metrics import silhouette_score, calinski_harabaz_score
def get_silhouette_coefficient(cluster_train_data,labels_assigned):
return silhouette_score(cluster_train_data,labels_assigned)
def get_calinski_harabaz_coefficient(cluster_train_data, labels_assigned):
return calinski_harabaz_score(cluster_train_data, labels_assigned) | 2.046875 | 2 |
Python/Strings/designer_door.py | abivilion/Hackerank-Solutions- | 0 | 12792944 | <gh_stars>0
# Enter your code here. Read input from STDIN. Print output to STDOUT
x,y = map(int,input().split())
items = list(range(1,x+1,2))
items = items+items[::-1][1:]
for i in items:
text= "WELCOME" if i == x else '.|.'*i
print (text.center(y,'-'))
| 3.65625 | 4 |
deployment_tool/middleware.py | Onyxnetworks/deployment_tool | 0 | 12792945 | <filename>deployment_tool/middleware.py
import re
from django.shortcuts import render, redirect
from django.conf import settings
EXEMPT_URL = settings.LOGIN_URL.lstrip('/')
# Class to redirect user to login unless they have a valid APIC Cookie
class LoginRequiredMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
path = request.path_info.lstrip('/')
if request.session.has_key('APIC_COOKIE') and path in EXEMPT_URL:
return redirect(settings.LOGIN_REDIRECT_URL)
elif request.session.has_key('APIC_COOKIE') or path in EXEMPT_URL:
return None
else:
return redirect(settings.LOGIN_URL)
| 2.1875 | 2 |
soam/core/__init__.py | MuttData/soam | 1 | 12792946 | """SoaM core."""
from soam.core.runner import SoamFlow
from soam.core.step import Step
| 0.898438 | 1 |
weixin/framework/tornado.py | 500-Error/weixin-SDK | 1 | 12792947 | <gh_stars>1-10
# encoding=utf-8
import functools
import tornado.web
from ..utils import is_valid_request
def weixin_request_only(func):
@functools.wraps(func)
def _wrapper_(req):
nts = map(lambda k: req.get_query_argument(k, ""),
['nonce', 'timestamp', 'signature'])
nonce, timestamp, sig = nts
token = req.config.token
if is_valid_request(token, nonce, timestamp, sig):
return func(req)
req.set_status(403)
return _wrapper_
def make_handler(weapp):
weapp.initialize()
class WeixinRequestHandler(tornado.web.RequestHandler):
def initialize(self):
self.weapp = weapp
self.config = weapp.config
def compute_etag(self):
return
def set_default_headers(self):
self.clear_header('Server')
@weixin_request_only
def get(self):
echo_str = self.get_argument ('echostr', "")
self.finish(echo_str)
@weixin_request_only
async def post(self):
xml = self.weapp.reply(self.request.body) or ""
self.write(xml)
return WeixinRequestHandler
| 2.125 | 2 |
Mixin/Search.py | parente/clique | 3 | 12792948 | <filename>Mixin/Search.py<gh_stars>1-10
'''
Defines search related mixins.
@author: <NAME> <<EMAIL>>
@copyright: Copyright (c) 2008 <NAME>
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
class CircularSearchMixin(object):
'''
Mixing providing a generic method using callbacks to perform a circular
search through a body of items. How the search proceeds is entirely defined
by the callbacks.
'''
def CircularSearch(self, start_cb, end_cb, move_cb, test_cb, found_cb,
reset_cb, text, current):
'''
Method for performing a generic, wrapping search over an entire collection.
@param start_cb: Function to call when the search is starting
@type start_cb: callable
@param end_cb: Function to call when the search is ending
@type end_cb: callable
@param move_cb: Function to call to move to another item to test
@type move_cb: callable
@param test_cb: Function to call to test if an item contains the text
@type test_cb: callable
@param found_cb: Function to call when text is found in an item
@type found_cb: callable
@param reset_cb: Function to call when wrapping during search
@type reset_cb: callable
@param text: Text to locate
@type text: string
@param current: Start the search on the current item?
@type current: boolean
@return: True if wrapped, False if not wrapped, None if not found
@rtype: boolean
'''
# set to first item to test
curr = start_cb()
try:
if not current:
curr = move_cb(start_cb())
except (ValueError, AttributeError):
# ignore any errors here, we might need to wrap
pass
else:
while 1:
if test_cb(curr, text):
found_cb(curr)
end_cb()
return False
# seek in desired direction
try:
curr = move_cb(curr)
except ValueError:
break
except AttributeError:
end_cb()
return None
try:
# reset to endpoint
curr = reset_cb(curr)
except (ValueError, AttributeError):
end_cb()
return None
while 1:
if test_cb(curr, text):
found_cb(curr)
end_cb()
return True
# seek in desired direction
try:
curr = move_cb(curr)
except (ValueError, AttributeError):
break
end_cb()
return None
| 2.828125 | 3 |
projectmanager/migrations/0010_client_email.py | gregplaysguitar/django-projectmanager | 8 | 12792949 | <reponame>gregplaysguitar/django-projectmanager
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projectmanager', '0009_auto_20150414_1019'),
]
operations = [
migrations.AddField(
model_name='client',
name='email',
field=models.EmailField(default=b'', max_length=254, blank=True),
),
]
| 1.398438 | 1 |
barcodes/dxfwrite/dxfwrite/htmlcolors.py | sbarton272/AcousticBarcodes-Explorations | 2 | 12792950 | capitalized_html_colors = {
"Blue": (
0,
0,
255
),
"Pink": (
255,
192,
203
),
"Darkorange": (
255,
140,
0
),
"Fuchsia": (
255,
0,
255
),
"LawnGreen": (
124,
252,
0
),
"AliceBlue": (
240,
248,
255
),
"Crimson": (
220,
20,
60
),
"White": (
255,
255,
255
),
"NavajoWhite": (
255,
222,
173
),
"Cornsilk": (
255,
248,
220
),
"Bisque": (
255,
228,
196
),
"PaleGreen": (
152,
251,
152
),
"Brown": (
165,
42,
42
),
"DarkTurquoise": (
0,
206,
209
),
"DarkGreen": (
0,
100,
0
),
"DarkGoldenRod": (
184,
134,
11
),
"MediumOrchid": (
186,
85,
211
),
"Chocolate": (
210,
105,
30
),
"Purple": (
128,
0,
128
),
"PapayaWhip": (
255,
239,
213
),
"Olive": (
128,
128,
0
),
"LightSlateGray": (
119,
136,
153
),
"PeachPuff": (
255,
218,
185
),
"Plum": (
221,
160,
221
),
"MediumAquaMarine": (
102,
205,
170
),
"MintCream": (
245,
255,
250
),
"CornflowerBlue": (
100,
149,
237
),
"HotPink": (
255,
105,
180
),
"DarkBlue": (
0,
0,
139
),
"LimeGreen": (
50,
205,
50
),
"DeepSkyBlue": (
0,
191,
255
),
"DarkKhaki": (
189,
183,
107
),
"LightGrey": (
211,
211,
211
),
"Yellow": (
255,
255,
0
),
"Gainsboro": (
220,
220,
220
),
"MistyRose": (
255,
228,
225
),
"SandyBrown": (
244,
164,
96
),
"DeepPink": (
255,
20,
147
),
"SeaShell": (
255,
245,
238
),
"Magenta": (
255,
0,
255
),
"DarkGrey": (
169,
169,
169
),
"DarkCyan": (
0,
139,
139
),
"DarkSlateGrey": (
47,
79,
79
),
"GreenYellow": (
173,
255,
47
),
"DarkOrchid": (
153,
50,
204
),
"LightGoldenRodYellow": (
250,
250,
210
),
"OliveDrab": (
107,
142,
35
),
"Chartreuse": (
127,
255,
0
),
"Peru": (
205,
133,
63
),
"MediumTurquoise": (
72,
209,
204
),
"Orange": (
255,
165,
0
),
"Red": (
255,
0,
0
),
"Wheat": (
245,
222,
179
),
"LightCyan": (
224,
255,
255
),
"LightSeaGreen": (
32,
178,
170
),
"BlueViolet": (
138,
43,
226
),
"LightSlateGrey": (
119,
136,
153
),
"Cyan": (
0,
255,
255
),
"MediumPurple": (
147,
112,
216
),
"MidnightBlue": (
25,
25,
112
),
"Coral": (
255,
127,
80
),
"PaleTurquoise": (
175,
238,
238
),
"Gray": (
128,
128,
128
),
"MediumSeaGreen": (
60,
179,
113
),
"Moccasin": (
255,
228,
181
),
"Turquoise": (
64,
224,
208
),
"DarkSlateBlue": (
72,
61,
139
),
"Green": (
0,
128,
0
),
"Beige": (
245,
245,
220
),
"Teal": (
0,
128,
128
),
"Azure": (
240,
255,
255
),
"LightSteelBlue": (
176,
196,
222
),
"DimGrey": (
105,
105,
105
),
"Tan": (
210,
180,
140
),
"AntiqueWhite": (
250,
235,
215
),
"SkyBlue": (
135,
206,
235
),
"GhostWhite": (
248,
248,
255
),
"HoneyDew": (
240,
255,
240
),
"FloralWhite": (
255,
250,
240
),
"LavenderBlush": (
255,
240,
245
),
"SeaGreen": (
46,
139,
87
),
"Lavender": (
230,
230,
250
),
"BlanchedAlmond": (
255,
235,
205
),
"DarkOliveGreen": (
85,
107,
47
),
"DarkSeaGreen": (
143,
188,
143
),
"SpringGreen": (
0,
255,
127
),
"Navy": (
0,
0,
128
),
"Orchid": (
218,
112,
214
),
"Salmon": (
250,
128,
114
),
"IndianRed": (
205,
92,
92
),
"Snow": (
255,
250,
250
),
"SteelBlue": (
70,
130,
180
),
"MediumSlateBlue": (
123,
104,
238
),
"Black": (
0,
0,
0
),
"LightBlue": (
173,
216,
230
),
"Ivory": (
255,
255,
240
),
"MediumVioletRed": (
199,
21,
133
),
"DarkViolet": (
148,
0,
211
),
"DarkGray": (
169,
169,
169
),
"SaddleBrown": (
139,
69,
19
),
"DarkMagenta": (
139,
0,
139
),
"Tomato": (
255,
99,
71
),
"WhiteSmoke": (
245,
245,
245
),
"MediumSpringGreen": (
0,
250,
154
),
"DodgerBlue": (
30,
144,
255
),
"Aqua": (
0,
255,
255
),
"ForestGreen": (
34,
139,
34
),
"LemonChiffon": (
255,
250,
205
),
"Silver": (
192,
192,
192
),
"LightGray": (
211,
211,
211
),
"GoldenRod": (
218,
165,
32
),
"Indigo": (
75,
0,
130
),
"CadetBlue": (
95,
158,
160
),
"LightYellow": (
255,
255,
224
),
"PowderBlue": (
176,
224,
230
),
"RoyalBlue": (
65,
105,
225
),
"Sienna": (
160,
82,
45
),
"Thistle": (
216,
191,
216
),
"Lime": (
0,
255,
0
),
"SlateGray": (
112,
128,
144
),
"DarkRed": (
139,
0,
0
),
"LightSkyBlue": (
135,
206,
250
),
"SlateBlue": (
106,
90,
205
),
"YellowGreen": (
154,
205,
50
),
"Aquamarine": (
127,
255,
212
),
"LightCoral": (
240,
128,
128
),
"DarkSlateGray": (
47,
79,
79
),
"Khaki": (
240,
230,
140
),
"BurlyWood": (
222,
184,
135
),
"MediumBlue": (
0,
0,
205
),
"DarkSalmon": (
233,
150,
122
),
"RosyBrown": (
188,
143,
143
),
"LightSalmon": (
255,
160,
122
),
"PaleVioletRed": (
216,
112,
147
),
"FireBrick": (
178,
34,
34
),
"Violet": (
238,
130,
238
),
"Grey": (
128,
128,
128
),
"LightGreen": (
144,
238,
144
),
"Linen": (
250,
240,
230
),
"OrangeRed": (
255,
69,
0
),
"PaleGoldenRod": (
238,
232,
170
),
"DimGray": (
105,
105,
105
),
"Maroon": (
128,
0,
0
),
"LightPink": (
255,
182,
193
),
"SlateGrey": (
112,
128,
144
),
"Gold": (
255,
215,
0
),
"OldLace": (
253,
245,
230
)
}
lowercase_html_colors = dict( ((key.lower(), value) for key, value in capitalized_html_colors.items()) )
#lowercase_html_colors = { key.lower(): value for key, value in capitalized_html_colors.items() }
def get_color_tuple_by_name(colorname):
return lowercase_html_colors[colorname.lower()]
| 1.625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.