id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
178218 | from .l2_loss import L2Loss
__all__ = ['L2Loss']
| StarcoderdataPython |
3360279 | import copy
import random
import numpy as np
class NPuzzle:
"""N-Puzzle simulator"""
def __init__(self, n=15, start_blank=None):
self.n = int(n)
self.width = int(np.round(np.sqrt(n+1)))
assert self.width**2-1 == self.n
self.state = np.arange(self.n+1).reshape(self.width, self.width)
self.blank_idx = (self.width-1, self.width-1)
self.labels = list(range(1, self.n+1))+[0]
self.sequence = []
if start_blank is not None:
while start_blank[0] < self.blank_idx[0]:
self.transition(self.above())
while start_blank[1] < self.blank_idx[1]:
self.transition(self.left())
assert self.blank_idx == start_blank
def __len__(self):
return len(self.state.reshape(-1))
def __getitem__(self, key):
return self.labels[self.state.reshape(-1)[key]]
def __iter__(self):
values = self.state.reshape(-1)
for v in values:
yield self.labels[v]
def all_atoms(self):
atoms = set([])
for pos, _ in enumerate(self):
for _, val in enumerate(self.labels):
atoms.add((pos, val))
return atoms
def actions(self):
"""Return a list of actions for the current state"""
directions = [self.above, self.below, self.left, self.right]
return [d(self.blank_idx) for d in directions if d(self.blank_idx) is not None]
def above(self, loc=None):
"""Return the tile index above the given (row, col) location tuple, or None
The default behavior uses the current blank index as the location
"""
if loc is None:
loc = self.blank_idx
row, col = loc
row = row-1
if row >= 0:
return row, col
return None
def below(self, loc=None):
"""Return the tile index below the given (row, col) location tuple, or None
The default behavior uses the current blank index as the location
"""
if loc is None:
loc = self.blank_idx
row, col = loc
row = row+1
if row < self.width:
return row, col
return None
def left(self, loc=None):
"""Return the tile index left of the given (row, col) location tuple, or None
The default behavior uses the current blank index as the location
"""
if loc is None:
loc = self.blank_idx
row, col = loc
col = col-1
if col >= 0:
return row, col
return None
def right(self, loc=None):
"""Return the tile index right of the given (row, col) location tuple, or None
The default behavior uses the current blank index as the location
"""
if loc is None:
loc = self.blank_idx
row, col = loc
col = col+1
if col < self.width:
return row, col
return None
def reset(self):
"""Reset the NPuzzle to the canonical 'solved' state"""
self.state = np.arange(self.n+1).reshape(self.width, self.width)
self.blank_idx = (self.width-1, self.width-1)
return self
def scramble(self, seed=None, n_steps=None):
"""Scramble the NPuzzle with randomly selected actions
Specify a random seed for repeatable results.
"""
if seed is not None:
py_st = random.getstate()
np_st = np.random.get_state()
random.seed(seed)
np.random.seed(seed)
# need both even and odd n_steps for blank to reach every space
if n_steps == None:
n_steps = random.choice([self.n**2, self.n**2+1])
for _ in range(n_steps):
action = random.choice(self.actions())
self.transition(action)
self.sequence.append(action)
if seed is not None:
random.setstate(py_st)
np.random.set_state(np_st)
return self
def transition(self, tile_idx):
"""Transform the NPuzzle with a single action
The action must be specified as a tile index and must be within the
bounds of the NPuzzle and adjacent to the current blank index.
"""
t_row, t_col = tile_idx
b_row, b_col = self.blank_idx
# Within bounds
assert 0 <= t_row < self.width
assert 0 <= t_col < self.width
# Adjacent tile
assert sum([np.abs(b_row-t_row), np.abs(b_col-t_col)]) == 1
self._unchecked_transition(tile_idx, self.blank_idx)
self.blank_idx = tile_idx
return self
def _unchecked_transition(self, tile_idx, blank_idx):
self.state[tile_idx], self.state[blank_idx] = self.state[blank_idx], self.state[tile_idx]
def __repr__(self):
string_form = np.asarray(list(map(lambda x: self.labels[x],
self.state.flatten()))
).reshape(self.width, self.width)
return '{}-Puzzle(\n{})'.format(self.n, string_form)
def __hash__(self):
return hash(repr(self))
def __eq__(self, another):
assert self.n == another.n, 'Instances must have same n_var'
assert self.width == another.width, 'Instances must have same n_values'
return np.all(self.state == another.state) and np.all(self.blank_idx == another.blank_idx)
def __ne__(self, another):
return not self.__eq__(another)
def apply_macro(self, sequence=None, model=None):
"""Apply a sequence of actions or an effect model to transform the NPuzzle
If using a model, it should be specified as a tuple (swap_list, blank_idx)
which represents a list of tile position swaps and the required blank index
for satisfying the model's precondition.
"""
assert sequence is not None or model is not None
if model is not None:
swap_list, starting_blank_idx = model
if self.blank_idx == starting_blank_idx:
old_state = self.state.flatten()
new_state = self.state.flatten()
for (src_idx, dst_idx) in swap_list:
new_state[dst_idx] = old_state[src_idx]
self.state = new_state.reshape(self.width, self.width)
self.blank_idx = tuple(np.argwhere(self.state == self.n)[0])
else: # starting blanks don't line up
pass # cannot execute macro
elif sequence is not None:
for move in sequence:
self.transition(move)
if sequence:
self.sequence += sequence
return self
def summarize_effects(self, baseline=None):
"""Summarize the position changes in the NPuzzle relative to a baseline NPuzzle
The default behavior compares the current NPuzzle against a solved NPuzzle.
Returns:
An effect model tuple (swap_list, blank_idx), where swap_list is a
tuple of (source_idx, destination_idx) pairs, and blank_idx is the
starting blank index (i.e. the one from the baseline NPuzzle).
"""
if baseline is None:
baseline = copy.deepcopy(self).reset()
src_indices = np.arange(self.n+1)
src_tiles = baseline.state.flatten()
src_dict = dict(zip(src_tiles, src_indices))
dst_indices = [src_dict[tile] for tile in self.state.flatten()]
swap_list = list(zip(dst_indices, src_indices))
swap_list = tuple([swap for swap in swap_list if swap[0] != swap[1]])
return swap_list, baseline.blank_idx
def test_default_baseline():
"""Test NPuzzle when building models with the default baseline"""
puz = NPuzzle(15)
puz.scramble()
baseline = copy.deepcopy(puz).reset()
assert baseline == NPuzzle(15)
assert puz != baseline
baseline.apply_macro(sequence=puz.sequence)
assert baseline == puz
baseline.reset()
assert baseline != puz
baseline.apply_macro(model=puz.summarize_effects())
assert baseline == puz
def test_custom_baseline():
"""Test NPuzzle when building models with a custom baseline"""
puz = NPuzzle(15)
puz.transition(puz.left())
puz.transition(puz.left())
puz.transition(puz.left())
model = puz.summarize_effects()
assert model == (((15, 12), (12, 13), (13, 14), (14, 15)), (3, 3))
baseline = NPuzzle(15)
baseline.scramble(seed=40) # Seed 40 has blank in lower right corner
assert baseline.blank_idx == (3, 3)
newpuz = copy.deepcopy(baseline)
newpuz.transition(newpuz.left())
newpuz.transition(newpuz.left())
newpuz.transition(newpuz.left())
assert newpuz.blank_idx == puz.blank_idx
new_model = newpuz.summarize_effects(baseline=baseline)
assert new_model == model
assert NPuzzle(15).apply_macro(model=model) == puz
assert copy.deepcopy(baseline).apply_macro(model=new_model) == newpuz
assert puz != newpuz
def test():
"""Test NPuzzle functionality"""
test_default_baseline()
test_custom_baseline()
print('All tests passed.')
if __name__ == '__main__':
test()
| StarcoderdataPython |
172355 | """
time: n^3
space: n
"""
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [True] + [False] * len(s)
for i in range(1, len(s)+1):
for w in wordDict:
if s[:i].endswith(w):
dp[i] |= dp[i-len(w)]
return dp[-1]
"""
time: n^2
space: n
"""
class Solution:
def wordBreak1(self, s: str, wordDict: List[str]) -> bool:
q = [0]
visited = set()
while q:
i = q.pop()
visited.add(i)
for w in wordDict:
wend = i+len(w)
if s[i:wend] == w:
if wend == len(s):
return True
else:
if wend not in visited:
q.append(wend)
return False | StarcoderdataPython |
1706149 | from home.models import task, task_steps,resource,server
from home.config import test_type
from datetime import datetime
def find_resource(name,target):
for i in range(len(target)):
if target[i]['name'] == name:
return i
return -1
def get_resources(resource_type=0):
resource_list = resource.objects.filter(resource_type=resource_type)
resources = []
for resource_obj in resource_list:
resources.append({'name': '%s%s' % ('Level ' if resource_type==1 else '', resource_obj.name)})
return resources
def get_levels():
return [{'name':'Level 0'}, {'name':'Level 1'}, {'name':'Level 2'}, {'name':'Level 3'}, {'name':'Level 4'}, ]
def form_matrix(ttype,ttime=None):
datas = {
'servers':[],
'resources':[],
'links':[]
}
if ttype == test_type['FHIR_TEST']:
datas['resources'] = get_resources(0)
elif ttype == test_type['STANDARD_TEST']:
datas['resources'] = get_resources(1)
else:
return datas
datetime_obj = None
if ttime and len(ttime) > 0:
datetime_obj = datetime.strptime(ttime, '%Y-%m-%d %H:%M:%S')
server_list = server.objects.filter(is_delete=False)
server_index = 0
for server_obj in server_list:
datas['servers'].append({'name':server_obj.server_name})
#get task id
task_id = None
if datetime_obj:
task_list = task.objects.filter(task_type=ttype,status="finished",target_server=server_obj,create_time=datetime_obj).values_list('task_id',flat=True)
if len(task_list) != 0:
task_id =task_list[0]
else:
server_index += 1
continue
else:
task_list = task.objects.filter(task_type=ttype,status="finished",target_server=server_obj).order_by('-create_time').values_list('task_id',flat=True)
if len(task_list) != 0:
task_id =task_list[0]
else:
server_index += 1
continue
if task_id:
task_step_list = task_steps.objects.filter(task_id=task_id).exclude(name="Setup")
for task_step_obj in task_step_list:
if task_step_obj.name == None or len(task_step_obj.name) == 0:
continue
source = server_index
target = find_resource(task_step_obj.name,datas['resources'])
if target == -1:
continue
datas['links'].append({
'source':source,
'target':target,
'value':0 if 'success' in task_step_obj.step_desc.lower() else 1
})
server_index += 1
return datas
def form_resource_martix():
datas = {
'servers':[],
'resources':[],
'links':[]
}
#get resources
resource_list = resource.objects.filter(resource_type=0)
for resource_obj in resource_list:
datas['resources'].append({'name':resource_obj.name})
server_list = server.objects.filter(is_delete=False)
server_index = 0
for server_obj in server_list:
datas['servers'].append({'name':server_obj.server_name})
task_list = task.objects.filter(task_type=3,status="finished",target_server=server_obj).order_by('-create_time').values_list('task_id',flat=True)
task_id = None
if len(task_list) != 0:
task_id = task_list[0]
else:
server_index += 1
continue
if task_id != None:
task_step_list = task_steps.objects.filter(task_id=task_id).exclude(name="Setup")
for task_step_obj in task_step_list:
source = server_index
target = find_resource(task_step_obj.name,datas['resources'])
if target == -1:
continue
datas['links'].append({
'source':source,
'target':target,
'value':0 if 'success' in task_step_obj.step_desc.lower() else 1
})
server_index += 1
return datas
def form_level_martix():
pass | StarcoderdataPython |
20404 | """Competitions for parameter tuning using Monte-carlo tree search."""
from __future__ import division
import operator
import random
from heapq import nlargest
from math import exp, log, sqrt
from gomill import compact_tracebacks
from gomill import game_jobs
from gomill import competitions
from gomill import competition_schedulers
from gomill.competitions import (
Competition, NoGameAvailable, CompetitionError, ControlFileError,
Player_config)
from gomill.settings import *
class Node(object):
"""A MCTS node.
Public attributes:
children -- list of Nodes, or None for unexpanded
wins
visits
value -- wins / visits
rsqrt_visits -- 1 / sqrt(visits)
"""
def count_tree_size(self):
if self.children is None:
return 1
return sum(child.count_tree_size() for child in self.children) + 1
def recalculate(self):
"""Update value and rsqrt_visits from changed wins and visits."""
self.value = self.wins / self.visits
self.rsqrt_visits = sqrt(1 / self.visits)
def __getstate__(self):
return (self.children, self.wins, self.visits)
def __setstate__(self, state):
self.children, self.wins, self.visits = state
self.recalculate()
__slots__ = (
'children',
'wins',
'visits',
'value',
'rsqrt_visits',
)
def __repr__(self):
return "<Node:%.2f{%s}>" % (self.value, repr(self.children))
class Tree(object):
"""A tree of MCTS nodes representing N-dimensional parameter space.
Parameters (available as read-only attributes):
splits -- subdivisions of each dimension
(list of integers, one per dimension)
max_depth -- number of generations below the root
initial_visits -- visit count for newly-created nodes
initial_wins -- win count for newly-created nodes
exploration_coefficient -- constant for UCT formula (float)
Public attributes:
root -- Node
dimensions -- number of dimensions in the parameter space
All changing state is in the tree of Node objects started at 'root'.
References to 'optimiser_parameters' below mean a sequence of length
'dimensions', whose values are floats in the range 0.0..1.0 representing
a point in this space.
Each node in the tree represents an N-cuboid of parameter space. Each
expanded node has prod(splits) children, tiling its cuboid.
(The splits are the same in each generation.)
Instantiate with:
all parameters listed above
parameter_formatter -- function optimiser_parameters -> string
"""
def __init__(self, splits, max_depth,
exploration_coefficient,
initial_visits, initial_wins,
parameter_formatter):
self.splits = splits
self.dimensions = len(splits)
self.branching_factor = reduce(operator.mul, splits)
self.max_depth = max_depth
self.exploration_coefficient = exploration_coefficient
self.initial_visits = initial_visits
self.initial_wins = initial_wins
self._initial_value = initial_wins / initial_visits
self._initial_rsqrt_visits = 1 / sqrt(initial_visits)
self.format_parameters = parameter_formatter
# map child index -> coordinate vector
# coordinate vector -- tuple length 'dimensions' with values in
# range(splits[d])
# The first dimension changes most slowly.
self._cube_coordinates = []
for child_index in xrange(self.branching_factor):
v = []
i = child_index
for split in reversed(splits):
i, coord = divmod(i, split)
v.append(coord)
v.reverse()
self._cube_coordinates.append(tuple(v))
def new_root(self):
"""Initialise the tree with an expanded root node."""
self.node_count = 1 # For description only
self.root = Node()
self.root.children = None
self.root.wins = self.initial_wins
self.root.visits = self.initial_visits
self.root.value = self.initial_wins / self.initial_visits
self.root.rsqrt_visits = self._initial_rsqrt_visits
self.expand(self.root)
def set_root(self, node):
"""Use the specified node as the tree's root.
This is used when restoring serialised state.
Raises ValueError if the node doesn't have the expected number of
children.
"""
if not node.children or len(node.children) != self.branching_factor:
raise ValueError
self.root = node
self.node_count = node.count_tree_size()
def expand(self, node):
"""Add children to the specified node."""
assert node.children is None
node.children = []
child_count = self.branching_factor
for _ in xrange(child_count):
child = Node()
child.children = None
child.wins = self.initial_wins
child.visits = self.initial_visits
child.value = self._initial_value
child.rsqrt_visits = self._initial_rsqrt_visits
node.children.append(child)
self.node_count += child_count
def is_ripe(self, node):
"""Say whether a node has been visted enough times to be expanded."""
return node.visits != self.initial_visits
def parameters_for_path(self, choice_path):
"""Retrieve the point in parameter space given by a node.
choice_path -- sequence of child indices
Returns optimiser_parameters representing the centre of the region
of parameter space represented by the node of interest.
choice_path must represent a path from the root to the node of interest.
"""
lo = [0.0] * self.dimensions
breadths = [1.0] * self.dimensions
for child_index in choice_path:
cube_pos = self._cube_coordinates[child_index]
breadths = [f / split for (f, split) in zip(breadths, self.splits)]
for d, coord in enumerate(cube_pos):
lo[d] += breadths[d] * coord
return [f + .5 * breadth for (f, breadth) in zip(lo, breadths)]
def retrieve_best_parameters(self):
"""Find the parameters with the most promising simulation results.
Returns optimiser_parameters
This walks the tree from the root, at each point choosing the node with
most wins, and returns the parameters corresponding to the leaf node.
"""
simulation = self.retrieve_best_parameter_simulation()
return simulation.get_parameters()
def retrieve_best_parameter_simulation(self):
"""Return the Greedy_simulation used for retrieve_best_parameters."""
simulation = Greedy_simulation(self)
simulation.walk()
return simulation
def get_test_parameters(self):
"""Return a 'typical' optimiser_parameters."""
return self.parameters_for_path([0])
def describe_choice(self, choice):
"""Return a string describing a child's coordinates in its parent."""
return str(self._cube_coordinates[choice]).replace(" ", "")
def describe(self):
"""Return a text description of the current state of the tree.
This currently dumps the full tree to depth 2.
"""
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = self.describe_choice(choice_path[-1])
return "%s %s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
root = self.root
wins = root.wins - self.initial_wins
visits = root.visits - self.initial_visits
try:
win_rate = "%.3f" % (wins / visits)
except ZeroDivisionError:
win_rate = "--"
result = [
"%d nodes" % self.node_count,
"Win rate %d/%d = %s" % (wins, visits, win_rate)
]
for choice, node in enumerate(self.root.children):
result.append(" " + describe_node(node, [choice]))
if node.children is None:
continue
for choice2, node2 in enumerate(node.children):
result.append(" " + describe_node(node2, [choice, choice2]))
return "\n".join(result)
def summarise(self, out, summary_spec):
"""Write a summary of the most-visited parts of the tree.
out -- writeable file-like object
summary_spec -- list of ints
summary_spec says how many nodes to describe at each depth of the tree
(so to show only direct children of the root, pass a list of length 1).
"""
def p(s):
print >> out, s
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = " ".join(map(self.describe_choice, choice_path))
return "%s %-40s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
def most_visits((child_index, node)):
return node.visits
last_generation = [([], self.root)]
for i, n in enumerate(summary_spec):
depth = i + 1
p("most visited at depth %s" % (depth))
this_generation = []
for path, node in last_generation:
if node.children is not None:
this_generation += [
(path + [child_index], child)
for (child_index, child) in enumerate(node.children)]
for path, node in sorted(
nlargest(n, this_generation, key=most_visits)):
p(describe_node(node, path))
last_generation = this_generation
p("")
class Simulation(object):
"""A single monte-carlo simulation.
Instantiate with the Tree the simulation will run in.
Use the methods in the following order:
run()
get_parameters()
update_stats(b)
describe()
"""
def __init__(self, tree):
self.tree = tree
# list of Nodes
self.node_path = []
# corresponding list of child indices
self.choice_path = []
# bool
self.candidate_won = None
def _choose_action(self, node):
"""Choose the best action from the specified node.
Returns a pair (child index, node)
"""
uct_numerator = (self.tree.exploration_coefficient *
sqrt(log(node.visits)))
def urgency((i, child)):
return child.value + uct_numerator * child.rsqrt_visits
start = random.randrange(len(node.children))
children = list(enumerate(node.children))
return max(children[start:] + children[:start], key=urgency)
def walk(self):
"""Choose a node sequence, without expansion."""
node = self.tree.root
while node.children is not None:
choice, node = self._choose_action(node)
self.node_path.append(node)
self.choice_path.append(choice)
def run(self):
"""Choose the node sequence for this simulation.
This walks down from the root, using _choose_action() at each level,
until it reaches a leaf; if the leaf has already been visited, this
expands it and chooses one more action.
"""
self.walk()
node = self.node_path[-1]
if (len(self.node_path) < self.tree.max_depth and
self.tree.is_ripe(node)):
self.tree.expand(node)
choice, child = self._choose_action(node)
self.node_path.append(child)
self.choice_path.append(choice)
def get_parameters(self):
"""Retrieve the parameters corresponding to the simulation's leaf node.
Returns optimiser_parameters
"""
return self.tree.parameters_for_path(self.choice_path)
def update_stats(self, candidate_won):
"""Update the tree's node statistics with the simulation's results.
This updates visits (and wins, if appropriate) for each node in the
simulation's node sequence.
"""
self.candidate_won = candidate_won
for node in self.node_path:
node.visits += 1
if candidate_won:
node.wins += 1
node.recalculate()
self.tree.root.visits += 1
if candidate_won:
self.tree.root.wins += 1 # For description only
self.tree.root.recalculate()
def describe_steps(self):
"""Return a text description of the simulation's node sequence."""
return " ".join(map(self.tree.describe_choice, self.choice_path))
def describe(self):
"""Return a one-line-ish text description of the simulation."""
result = "%s [%s]" % (
self.tree.format_parameters(self.get_parameters()),
self.describe_steps())
if self.candidate_won is not None:
result += (" lost", " won")[self.candidate_won]
return result
def describe_briefly(self):
"""Return a shorter description of the simulation."""
return "%s %s" % (self.tree.format_parameters(self.get_parameters()),
("lost", "won")[self.candidate_won])
class Greedy_simulation(Simulation):
"""Variant of simulation that chooses the node with most wins.
This is used to pick the 'best' parameters from the current state of the
tree.
"""
def _choose_action(self, node):
def wins((i, node)):
return node.wins
return max(enumerate(node.children), key=wins)
parameter_settings = [
Setting('code', interpret_identifier),
Setting('scale', interpret_callable),
Setting('split', interpret_positive_int),
Setting('format', interpret_8bit_string, default=None),
]
class Parameter_config(Quiet_config):
"""Parameter (ie, dimension) description for use in control files."""
# positional or keyword
positional_arguments = ('code',)
# keyword-only
keyword_arguments = tuple(setting.name for setting in parameter_settings
if setting.name != 'code')
class Parameter_spec(object):
"""Internal description of a parameter spec from the configuration file.
Public attributes:
code -- identifier
split -- integer
scale -- function float(0.0..1.0) -> player parameter
format -- string for use with '%'
"""
class Scale_fn(object):
"""Callable implementing a scale function.
Scale_fn classes are used to provide a convenient way to describe scale
functions in the control file (LINEAR, LOG, ...).
"""
class Linear_scale_fn(Scale_fn):
"""Linear scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
self.lower_bound = float(lower_bound)
self.upper_bound = float(upper_bound)
self.range = float(upper_bound - lower_bound)
self.integer = bool(integer)
def __call__(self, f):
result = (f * self.range) + self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Log_scale_fn(Scale_fn):
"""Log scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
if lower_bound == 0.0:
raise ValueError("lower bound is zero")
self.rate = log(upper_bound / lower_bound)
self.lower_bound = lower_bound
self.integer = bool(integer)
def __call__(self, f):
result = exp(self.rate * f) * self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Explicit_scale_fn(Scale_fn):
"""Scale function that returns elements from a list.
Instantiate with the list of values to use.
Normally use this with 'split' equal to the length of the list
(more generally, split**max_depth equal to the length of the list).
"""
def __init__(self, values):
if not values:
raise ValueError("empty value list")
self.values = tuple(values)
self.n = len(values)
def __call__(self, f):
return self.values[int(self.n * f)]
class LINEAR(Config_proxy):
underlying = Linear_scale_fn
class LOG(Config_proxy):
underlying = Log_scale_fn
class EXPLICIT(Config_proxy):
underlying = Explicit_scale_fn
def interpret_candidate_colour(v):
if v in ('r', 'random'):
return 'random'
else:
return interpret_colour(v)
class Mcts_tuner(Competition):
"""A Competition for parameter tuning using the Monte-carlo tree search.
The game ids are strings containing integers starting from zero.
"""
def __init__(self, competition_code, **kwargs):
Competition.__init__(self, competition_code, **kwargs)
self.outstanding_simulations = {}
self.halt_on_next_failure = True
def control_file_globals(self):
result = Competition.control_file_globals(self)
result.update({
'Parameter': Parameter_config,
'LINEAR': LINEAR,
'LOG': LOG,
'EXPLICIT': EXPLICIT,
})
return result
global_settings = (Competition.global_settings +
competitions.game_settings + [
Setting('number_of_games', allow_none(interpret_int), default=None),
Setting('candidate_colour', interpret_candidate_colour),
Setting('log_tree_to_history_period',
allow_none(interpret_positive_int), default=None),
Setting('summary_spec', interpret_sequence_of(interpret_int),
default=(30,)),
Setting('number_of_running_simulations_to_show', interpret_int,
default=12),
])
special_settings = [
Setting('opponent', interpret_identifier),
Setting('parameters',
interpret_sequence_of_quiet_configs(Parameter_config)),
Setting('make_candidate', interpret_callable),
]
# These are used to instantiate Tree; they don't turn into Mcts_tuner
# attributes.
tree_settings = [
Setting('max_depth', interpret_positive_int, default=1),
Setting('exploration_coefficient', interpret_float),
Setting('initial_visits', interpret_positive_int),
Setting('initial_wins', interpret_positive_int),
]
def parameter_spec_from_config(self, parameter_config):
"""Make a Parameter_spec from a Parameter_config.
Raises ControlFileError if there is an error in the configuration.
Returns a Parameter_spec with all attributes set.
"""
arguments = parameter_config.resolve_arguments()
interpreted = load_settings(parameter_settings, arguments)
pspec = Parameter_spec()
for name, value in interpreted.iteritems():
setattr(pspec, name, value)
optimiser_param = 1.0 / (pspec.split * 2)
try:
scaled = pspec.scale(optimiser_param)
except Exception:
raise ValueError(
"error from scale (applied to %s)\n%s" %
(optimiser_param, compact_tracebacks.format_traceback(skip=1)))
if pspec.format is None:
pspec.format = pspec.code + ":%s"
try:
pspec.format % scaled
except Exception:
raise ControlFileError("'format': invalid format string")
return pspec
def initialise_from_control_file(self, config):
Competition.initialise_from_control_file(self, config)
if self.komi == int(self.komi):
raise ControlFileError("komi: must be fractional to prevent jigos")
competitions.validate_handicap(
self.handicap, self.handicap_style, self.board_size)
try:
specials = load_settings(self.special_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
try:
self.opponent = self.players[specials['opponent']]
except KeyError:
raise ControlFileError(
"opponent: unknown player %s" % specials['opponent'])
self.parameter_specs = []
if not specials['parameters']:
raise ControlFileError("parameters: empty list")
seen_codes = set()
for i, parameter_spec in enumerate(specials['parameters']):
try:
pspec = self.parameter_spec_from_config(parameter_spec)
except StandardError, e:
code = parameter_spec.get_key()
if code is None:
code = i
raise ControlFileError("parameter %s: %s" % (code, e))
if pspec.code in seen_codes:
raise ControlFileError(
"duplicate parameter code: %s" % pspec.code)
seen_codes.add(pspec.code)
self.parameter_specs.append(pspec)
self.candidate_maker_fn = specials['make_candidate']
try:
tree_arguments = load_settings(self.tree_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
self.tree = Tree(splits=[pspec.split for pspec in self.parameter_specs],
parameter_formatter=self.format_optimiser_parameters,
**tree_arguments)
# State attributes (*: in persistent state):
# *scheduler -- Simple_scheduler
# *tree -- Tree (root node is persisted)
# outstanding_simulations -- map game_number -> Simulation
# halt_on_next_failure -- bool
# *opponent_description -- string (or None)
def set_clean_status(self):
self.scheduler = competition_schedulers.Simple_scheduler()
self.tree.new_root()
self.opponent_description = None
# Can bump this to prevent people loading incompatible .status files.
status_format_version = 0
def get_status(self):
# path0 is stored for consistency check
return {
'scheduler': self.scheduler,
'tree_root': self.tree.root,
'opponent_description': self.opponent_description,
'path0': self.scale_parameters(self.tree.parameters_for_path([0])),
}
def set_status(self, status):
root = status['tree_root']
try:
self.tree.set_root(root)
except ValueError:
raise CompetitionError(
"status file is inconsistent with control file")
expected_path0 = self.scale_parameters(
self.tree.parameters_for_path([0]))
if status['path0'] != expected_path0:
raise CompetitionError(
"status file is inconsistent with control file")
self.scheduler = status['scheduler']
self.scheduler.rollback()
self.opponent_description = status['opponent_description']
def scale_parameters(self, optimiser_parameters):
l = []
for pspec, v in zip(self.parameter_specs, optimiser_parameters):
try:
l.append(pspec.scale(v))
except Exception:
raise CompetitionError(
"error from scale for %s\n%s" %
(pspec.code, compact_tracebacks.format_traceback(skip=1)))
return tuple(l)
def format_engine_parameters(self, engine_parameters):
l = []
for pspec, v in zip(self.parameter_specs, engine_parameters):
try:
s = pspec.format % v
except Exception:
s = "[%s?%s]" % (pspec.code, v)
l.append(s)
return "; ".join(l)
def format_optimiser_parameters(self, optimiser_parameters):
return self.format_engine_parameters(self.scale_parameters(
optimiser_parameters))
def make_candidate(self, player_code, engine_parameters):
"""Make a player using the specified engine parameters.
Returns a game_jobs.Player.
"""
try:
candidate_config = self.candidate_maker_fn(*engine_parameters)
except Exception:
raise CompetitionError(
"error from make_candidate()\n%s" %
compact_tracebacks.format_traceback(skip=1))
if not isinstance(candidate_config, Player_config):
raise CompetitionError(
"make_candidate() returned %r, not Player" %
candidate_config)
try:
candidate = self.game_jobs_player_from_config(
player_code, candidate_config)
except Exception, e:
raise CompetitionError(
"bad player spec from make_candidate():\n"
"%s\nparameters were: %s" %
(e, self.format_engine_parameters(engine_parameters)))
return candidate
def get_player_checks(self):
test_parameters = self.tree.get_test_parameters()
engine_parameters = self.scale_parameters(test_parameters)
candidate = self.make_candidate('candidate', engine_parameters)
result = []
for player in [candidate, self.opponent]:
check = game_jobs.Player_check()
check.player = player
check.board_size = self.board_size
check.komi = self.komi
result.append(check)
return result
def choose_candidate_colour(self):
if self.candidate_colour == 'random':
return random.choice('bw')
else:
return self.candidate_colour
def get_game(self):
if (self.number_of_games is not None and
self.scheduler.issued >= self.number_of_games):
return NoGameAvailable
game_number = self.scheduler.issue()
simulation = Simulation(self.tree)
simulation.run()
optimiser_parameters = simulation.get_parameters()
engine_parameters = self.scale_parameters(optimiser_parameters)
candidate = self.make_candidate("#%d" % game_number, engine_parameters)
self.outstanding_simulations[game_number] = simulation
job = game_jobs.Game_job()
job.game_id = str(game_number)
job.game_data = game_number
if self.choose_candidate_colour() == 'b':
job.player_b = candidate
job.player_w = self.opponent
else:
job.player_b = self.opponent
job.player_w = candidate
job.board_size = self.board_size
job.komi = self.komi
job.move_limit = self.move_limit
job.handicap = self.handicap
job.handicap_is_free = (self.handicap_style == 'free')
job.use_internal_scorer = (self.scorer == 'internal')
job.internal_scorer_handicap_compensation = \
self.internal_scorer_handicap_compensation
job.sgf_event = self.competition_code
job.sgf_note = ("Candidate parameters: %s" %
self.format_engine_parameters(engine_parameters))
return job
def process_game_result(self, response):
self.halt_on_next_failure = False
self.opponent_description = response.engine_descriptions[
self.opponent.code].get_long_description()
game_number = response.game_data
self.scheduler.fix(game_number)
# Counting no-result as loss for the candidate
candidate_won = (
response.game_result.losing_player == self.opponent.code)
simulation = self.outstanding_simulations.pop(game_number)
simulation.update_stats(candidate_won)
self.log_history(simulation.describe())
if (self.log_tree_to_history_period is not None and
self.scheduler.fixed % self.log_tree_to_history_period == 0):
self.log_history(self.tree.describe())
return "%s %s" % (simulation.describe(),
response.game_result.sgf_result)
def process_game_error(self, job, previous_error_count):
## If the very first game to return a response gives an error, halt.
## If two games in a row give an error, halt.
## Otherwise, forget about the failed game
stop_competition = False
retry_game = False
game_number = job.game_data
del self.outstanding_simulations[game_number]
self.scheduler.fix(game_number)
if self.halt_on_next_failure:
stop_competition = True
else:
self.halt_on_next_failure = True
return stop_competition, retry_game
def write_static_description(self, out):
def p(s):
print >> out, s
p("MCTS tuning event: %s" % self.competition_code)
if self.description:
p(self.description)
p("board size: %s" % self.board_size)
p("komi: %s" % self.komi)
def _write_main_report(self, out):
games_played = self.scheduler.fixed
if self.number_of_games is None:
print >> out, "%d games played" % games_played
else:
print >> out, "%d/%d games played" % (
games_played, self.number_of_games)
print >> out
best_simulation = self.tree.retrieve_best_parameter_simulation()
print >> out, "Best parameters: %s" % best_simulation.describe()
print >> out
self.tree.summarise(out, self.summary_spec)
def write_screen_report(self, out):
self._write_main_report(out)
if self.outstanding_simulations:
print >> out, "In progress:"
to_show = sorted(self.outstanding_simulations.iteritems()) \
[:self.number_of_running_simulations_to_show]
for game_id, simulation in to_show:
print >> out, "game %s: %s" % (game_id, simulation.describe())
def write_short_report(self, out):
self.write_static_description(out)
self._write_main_report(out)
if self.opponent_description:
print >> out, "opponent (%s): %s" % (
self.opponent.code, self.opponent_description)
else:
print >> out, "opponent: %s" % self.opponent.code
print >> out
write_full_report = write_short_report
| StarcoderdataPython |
3239977 | #
# Init
#
# import global modules
import configparser
import importlib
def init():
global config
config = configparser.ConfigParser()
config.read('config.ini')
global commands
commands = {}
def addCommandAction(command, instance, function):
if command in commands.keys():
commands[command].append([instance, function])
else:
commands[command] = [[instance, function]]
def executeCommandAction(command, message):
if command in commands.keys():
for instanceArray in commands[command]:
instance = instanceArray[0]
callableMethod = getattr(instance, instanceArray[1])
return callableMethod(message)
| StarcoderdataPython |
1737211 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from numpy import array, append, nan, full
from numpy.testing import assert_almost_equal
import pandas as pd
from pandas.tslib import Timedelta
from catalyst.assets import Equity, Future
from catalyst.data.data_portal import HISTORY_FREQUENCIES, OHLCV_FIELDS
from catalyst.data.minute_bars import (
FUTURES_MINUTES_PER_DAY,
US_EQUITIES_MINUTES_PER_DAY,
)
from catalyst.testing import parameter_space
from catalyst.testing.fixtures import (
CatalystTestCase,
WithTradingSessions,
WithDataPortal,
alias,
)
from catalyst.testing.predicates import assert_equal
from catalyst.utils.numpy_utils import float64_dtype
class DataPortalTestBase(WithDataPortal,
WithTradingSessions,
CatalystTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
START_DATE = pd.Timestamp('2016-08-01')
END_DATE = pd.Timestamp('2016-08-08')
TRADING_CALENDAR_STRS = ('NYSE', 'us_futures')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
# Since the future with sid 10001 has a tick size of 0.0001, its prices
# should be rounded out to 4 decimal places. To test that this rounding
# occurs correctly, store its prices out to 5 decimal places by using a
# multiplier of 100,000 when writing its values.
OHLC_RATIOS_PER_SID = {10001: 100000}
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame({
'root_symbol': ['BAR', 'BUZ'],
'root_symbol_id': [1, 2],
'exchange': ['CME', 'CME'],
})
@classmethod
def make_futures_info(cls):
trading_sessions = cls.trading_sessions['us_futures']
return pd.DataFrame({
'sid': [10000, 10001],
'root_symbol': ['BAR', 'BUZ'],
'symbol': ['BARA', 'BUZZ'],
'start_date': [trading_sessions[1], trading_sessions[0]],
'end_date': [cls.END_DATE, cls.END_DATE],
# TODO: Make separate from 'end_date'
'notice_date': [cls.END_DATE, cls.END_DATE],
'expiration_date': [cls.END_DATE, cls.END_DATE],
'tick_size': [0.01, 0.0001],
'multiplier': [500, 50000],
'exchange': ['CME', 'CME'],
})
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
# No data on first day.
dts = trading_calendar.minutes_for_session(cls.trading_days[0])
dfs = []
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[1])
dfs.append(pd.DataFrame(
{
'open': append(100.5, full(len(dts) - 1, nan)),
'high': append(100.9, full(len(dts) - 1, nan)),
'low': append(100.1, full(len(dts) - 1, nan)),
'close': append(100.3, full(len(dts) - 1, nan)),
'volume': append(1000, full(len(dts) - 1, nan)),
},
index=dts))
dts = trading_calendar.minutes_for_session(cls.trading_days[2])
dfs.append(pd.DataFrame(
{
'open': [nan, 103.50, 102.50, 104.50, 101.50, nan],
'high': [nan, 103.90, 102.90, 104.90, 101.90, nan],
'low': [nan, 103.10, 102.10, 104.10, 101.10, nan],
'close': [nan, 103.30, 102.30, 104.30, 101.30, nan],
'volume': [0, 1003, 1002, 1004, 1001, 0]
},
index=dts[:6]
))
dts = trading_calendar.minutes_for_session(cls.trading_days[3])
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
asset1_df = pd.concat(dfs)
yield 1, asset1_df
asset2_df = pd.DataFrame(
{
'open': 1.0055,
'high': 1.0059,
'low': 1.0051,
'close': 1.0055,
'volume': 100,
},
index=asset1_df.index,
)
yield 2, asset2_df
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
trading_sessions = cls.trading_sessions['us_futures']
# No data on first day, future asset intentionally not on the same
# dates as equities, so that cross-wiring of results do not create a
# false positive.
dts = trading_calendar.minutes_for_session(trading_sessions[1])
dfs = []
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
dts = trading_calendar.minutes_for_session(trading_sessions[2])
dfs.append(pd.DataFrame(
{
'open': append(200.5, full(len(dts) - 1, nan)),
'high': append(200.9, full(len(dts) - 1, nan)),
'low': append(200.1, full(len(dts) - 1, nan)),
'close': append(200.3, full(len(dts) - 1, nan)),
'volume': append(2000, full(len(dts) - 1, nan)),
},
index=dts))
dts = trading_calendar.minutes_for_session(trading_sessions[3])
dfs.append(pd.DataFrame(
{
'open': [nan, 203.50, 202.50, 204.50, 201.50, nan],
'high': [nan, 203.90, 202.90, 204.90, 201.90, nan],
'low': [nan, 203.10, 202.10, 204.10, 201.10, nan],
'close': [nan, 203.30, 202.30, 204.30, 201.30, nan],
'volume': [0, 2003, 2002, 2004, 2001, 0]
},
index=dts[:6]
))
dts = trading_calendar.minutes_for_session(trading_sessions[4])
dfs.append(pd.DataFrame(
{
'open': full(len(dts), nan),
'high': full(len(dts), nan),
'low': full(len(dts), nan),
'close': full(len(dts), nan),
'volume': full(len(dts), 0),
},
index=dts))
asset10000_df = pd.concat(dfs)
yield 10000, asset10000_df
missing_dts = trading_calendar.minutes_for_session(trading_sessions[0])
asset10001_df = pd.DataFrame(
{
'open': 1.00549,
'high': 1.00591,
'low': 1.00507,
'close': 1.0055,
'volume': 100,
},
index=missing_dts.append(asset10000_df.index),
)
yield 10001, asset10001_df
def test_get_last_traded_equity_minute(self):
trading_calendar = self.trading_calendars[Equity]
# Case: Missing data at front of data set, and request dt is before
# first value.
dts = trading_calendar.minutes_for_session(self.trading_days[0])
asset = self.asset_finder.retrieve_asset(1)
self.assertTrue(pd.isnull(
self.data_portal.get_last_traded_dt(
asset, dts[0], 'minute')))
# Case: Data on requested dt.
dts = trading_calendar.minutes_for_session(self.trading_days[2])
self.assertEqual(dts[1],
self.data_portal.get_last_traded_dt(
asset, dts[1], 'minute'))
# Case: No data on dt, but data occuring before dt.
self.assertEqual(dts[4],
self.data_portal.get_last_traded_dt(
asset, dts[5], 'minute'))
def test_get_last_traded_future_minute(self):
asset = self.asset_finder.retrieve_asset(10000)
trading_calendar = self.trading_calendars[Future]
# Case: Missing data at front of data set, and request dt is before
# first value.
dts = trading_calendar.minutes_for_session(self.trading_days[0])
self.assertTrue(pd.isnull(
self.data_portal.get_last_traded_dt(
asset, dts[0], 'minute')))
# Case: Data on requested dt.
dts = trading_calendar.minutes_for_session(self.trading_days[3])
self.assertEqual(dts[1],
self.data_portal.get_last_traded_dt(
asset, dts[1], 'minute'))
# Case: No data on dt, but data occuring before dt.
self.assertEqual(dts[4],
self.data_portal.get_last_traded_dt(
asset, dts[5], 'minute'))
def test_get_last_traded_dt_equity_daily(self):
# Case: Missing data at front of data set, and request dt is before
# first value.
asset = self.asset_finder.retrieve_asset(1)
self.assertTrue(pd.isnull(
self.data_portal.get_last_traded_dt(
asset, self.trading_days[0], 'daily')))
# Case: Data on requested dt.
self.assertEqual(self.trading_days[1],
self.data_portal.get_last_traded_dt(
asset, self.trading_days[1], 'daily'))
# Case: No data on dt, but data occuring before dt.
self.assertEqual(self.trading_days[2],
self.data_portal.get_last_traded_dt(
asset, self.trading_days[3], 'daily'))
def test_get_spot_value_equity_minute(self):
trading_calendar = self.trading_calendars[Equity]
asset = self.asset_finder.retrieve_asset(1)
dts = trading_calendar.minutes_for_session(self.trading_days[2])
# Case: Get data on exact dt.
dt = dts[1]
expected = OrderedDict({
'open': 103.5,
'high': 103.9,
'low': 103.1,
'close': 103.3,
'volume': 1003,
'price': 103.3
})
result = [self.data_portal.get_spot_value(asset,
field,
dt,
'minute')
for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
# Case: Get data on empty dt, return nan or most recent data for price.
dt = dts[100]
expected = OrderedDict({
'open': nan,
'high': nan,
'low': nan,
'close': nan,
'volume': 0,
'price': 101.3
})
result = [self.data_portal.get_spot_value(asset,
field,
dt,
'minute')
for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
def test_get_spot_value_future_minute(self):
trading_calendar = self.trading_calendars[Future]
asset = self.asset_finder.retrieve_asset(10000)
dts = trading_calendar.minutes_for_session(self.trading_days[3])
# Case: Get data on exact dt.
dt = dts[1]
expected = OrderedDict({
'open': 203.5,
'high': 203.9,
'low': 203.1,
'close': 203.3,
'volume': 2003,
'price': 203.3
})
result = [self.data_portal.get_spot_value(asset,
field,
dt,
'minute')
for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
# Case: Get data on empty dt, return nan or most recent data for price.
dt = dts[100]
expected = OrderedDict({
'open': nan,
'high': nan,
'low': nan,
'close': nan,
'volume': 0,
'price': 201.3
})
result = [self.data_portal.get_spot_value(asset,
field,
dt,
'minute')
for field in expected.keys()]
assert_almost_equal(array(list(expected.values())), result)
def test_get_spot_value_multiple_assets(self):
equity = self.asset_finder.retrieve_asset(1)
future = self.asset_finder.retrieve_asset(10000)
trading_calendar = self.trading_calendars[Future]
dts = trading_calendar.minutes_for_session(self.trading_days[3])
# We expect the outputs to be lists of spot values.
expected = pd.DataFrame(
{
equity: [nan, nan, nan, nan, 0, 101.3],
future: [203.5, 203.9, 203.1, 203.3, 2003, 203.3],
},
index=['open', 'high', 'low', 'close', 'volume', 'price'],
)
result = [
self.data_portal.get_spot_value(
assets=[equity, future],
field=field,
dt=dts[1],
data_frequency='minute',
)
for field in expected.index
]
assert_almost_equal(expected.values.tolist(), result)
def test_bar_count_for_simple_transforms(self):
# July 2015
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
# half an hour into july 9, getting a 4-"day" window should get us
# all the minutes of 7/6, 7/7, 7/8, and 31 minutes of 7/9
july_9_dt = self.trading_calendar.open_and_close_for_session(
pd.Timestamp("2015-07-09", tz='UTC')
)[0] + Timedelta("30 minutes")
self.assertEqual(
(3 * 390) + 31,
self.data_portal._get_minute_count_for_transform(july_9_dt, 4)
)
# November 2015
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
# nov 26th closed
# nov 27th was an early close
# half an hour into nov 30, getting a 4-"day" window should get us
# all the minutes of 11/24, 11/25, 11/27 (half day!), and 31 minutes
# of 11/30
nov_30_dt = self.trading_calendar.open_and_close_for_session(
pd.Timestamp("2015-11-30", tz='UTC')
)[0] + Timedelta("30 minutes")
self.assertEqual(
390 + 390 + 210 + 31,
self.data_portal._get_minute_count_for_transform(nov_30_dt, 4)
)
def test_get_last_traded_dt_minute(self):
minutes = self.nyse_calendar.minutes_for_session(
self.trading_days[2])
equity = self.asset_finder.retrieve_asset(1)
result = self.data_portal.get_last_traded_dt(equity,
minutes[3],
'minute')
self.assertEqual(minutes[3], result,
"Asset 1 had a trade on third minute, so should "
"return that as the last trade on that dt.")
result = self.data_portal.get_last_traded_dt(equity,
minutes[5],
'minute')
self.assertEqual(minutes[4], result,
"Asset 1 had a trade on fourth minute, so should "
"return that as the last trade on the fifth.")
future = self.asset_finder.retrieve_asset(10000)
calendar = self.trading_calendars[Future]
minutes = calendar.minutes_for_session(self.trading_days[3])
result = self.data_portal.get_last_traded_dt(future,
minutes[3],
'minute')
self.assertEqual(minutes[3], result,
"Asset 10000 had a trade on the third minute, so "
"return that as the last trade on that dt.")
result = self.data_portal.get_last_traded_dt(future,
minutes[5],
'minute')
self.assertEqual(minutes[4], result,
"Asset 10000 had a trade on fourth minute, so should "
"return that as the last trade on the fifth.")
def test_get_empty_splits(self):
splits = self.data_portal.get_splits([], self.trading_days[2])
self.assertEqual([], splits)
@parameter_space(frequency=HISTORY_FREQUENCIES, field=OHLCV_FIELDS)
def _test_price_rounding(self, frequency, field):
equity = self.asset_finder.retrieve_asset(2)
future = self.asset_finder.retrieve_asset(10001)
cf = self.data_portal.asset_finder.create_continuous_future(
'BUZ', 0, 'calendar', None,
)
minutes = self.nyse_calendar.minutes_for_session(self.trading_days[0])
if frequency == '1m':
minute = minutes[0]
expected_equity_volume = 100
expected_future_volume = 100
data_frequency = 'minute'
else:
minute = minutes[0].normalize()
expected_equity_volume = 100 * US_EQUITIES_MINUTES_PER_DAY
expected_future_volume = 100 * FUTURES_MINUTES_PER_DAY
data_frequency = 'daily'
# Equity prices should be floored to three decimal places.
expected_equity_values = {
'open': 1.005,
'high': 1.005,
'low': 1.005,
'close': 1.005,
'volume': expected_equity_volume,
}
# Futures prices should be rounded to four decimal places.
expected_future_values = {
'open': 1.0055,
'high': 1.0059,
'low': 1.0051,
'close': 1.0055,
'volume': expected_future_volume,
}
result = self.data_portal.get_history_window(
assets=[equity, future, cf],
end_dt=minute,
bar_count=1,
frequency=frequency,
field=field,
data_frequency=data_frequency,
)
expected_result = pd.DataFrame(
{
equity: expected_equity_values[field],
future: expected_future_values[field],
cf: expected_future_values[field],
},
index=[minute],
dtype=float64_dtype,
)
assert_equal(result, expected_result)
class TestDataPortal(DataPortalTestBase):
DATA_PORTAL_LAST_AVAILABLE_SESSION = None
DATA_PORTAL_LAST_AVAILABLE_MINUTE = None
class TestDataPortalExplicitLastAvailable(DataPortalTestBase):
DATA_PORTAL_LAST_AVAILABLE_SESSION = alias('START_DATE')
DATA_PORTAL_LAST_AVAILABLE_MINUTE = alias('END_DATE')
| StarcoderdataPython |
24692 | <gh_stars>0
import sys
import copy
def find_available(graph, steps):
return [s for s in steps if s not in graph]
def trim_graph(graph, item):
removed_keys = []
for step, items in graph.items():
if item in items:
items.remove(item)
if len(items) == 0:
removed_keys.append(step)
else:
graph[step] = items
for key in removed_keys:
del graph[key]
return graph
def assembly():
steps = set()
rules = []
for directions in sys.stdin:
parts = directions.split()
rules.append((parts[1], parts[7]))
steps.add(parts[1])
steps.add(parts[7])
graph = {}
for requirement, step in rules:
if step in graph:
graph[step].append(requirement)
else:
graph[step] = [requirement]
# Iterate through until no steps are remaining
solution = ""
current_graph = copy.deepcopy(graph)
remaining_steps = steps.copy()
while(current_graph):
options = find_available(current_graph, remaining_steps)
choice = min(options)
#Add to solution and remove from graph and choices
solution += choice
remaining_steps.remove(choice)
current_graph = trim_graph(current_graph, choice)
# Add final item
solution += remaining_steps.pop()
print(f'The correct order is {solution}')
# Setup variables to simulate progress
base_seconds = 60
workers = 5
total_seconds = 0
workers = ['free']*workers
remaining_time = {step: ord(step)-64+base_seconds for step in steps}
# Process until all steps are completed
while(steps):
# Assign workers
if 'free' in workers:
options = sorted(find_available(graph, steps))
for step in options:
if 'free' in workers and step not in workers:
workers[workers.index('free')] = step
# Decrement time on all active steps
for step in remaining_time:
#Only increment steps being worked on
if step not in workers:
continue
remaining_time[step] -= 1
# If the step is finished, remove it from the graph
if remaining_time[step] == 0:
#print(f'Finished {step}')
steps.remove(step)
graph = trim_graph(graph, step)
workers[workers.index(step)] = 'free'
print(total_seconds,workers,remaining_time)
total_seconds += 1
print(f'Assembly took {total_seconds} seconds')
assembly() | StarcoderdataPython |
3249760 | <filename>deepsim/deepsim/core/material.py
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for material."""
from typing import Optional
from deepsim.core.color import Color
class Material:
"""
Material class
"""
def __init__(self,
ambient: Optional[Color] = None,
diffuse: Optional[Color] = None,
specular: Optional[Color] = None,
emissive: Optional[Color] = None) -> None:
"""
Initialize Material class
Args:
ambient (Optional[Color]): ambient color
diffuse (Optional[Color]): diffuse color
specular (Optional[Color]): specular color
emissive (Optional[Color]): emissive color
"""
self._ambient = ambient.copy() if ambient else Color()
self._diffuse = diffuse.copy() if diffuse else Color()
self._specular = specular.copy() if specular else Color()
self._emissive = emissive.copy() if emissive else Color()
@property
def ambient(self) -> Color:
"""
Returns the copy of ambient color
Returns:
Color: the copy of ambient color of the material
"""
return self._ambient.copy()
@ambient.setter
def ambient(self, value: Color) -> None:
"""
Set ambient color
Args:
value (Color): ambient color
"""
self._ambient = value.copy()
@property
def diffuse(self) -> Color:
"""
Returns the copy of diffuse color
Returns:
Color: the copy of diffuse color of the material
"""
return self._diffuse.copy()
@diffuse.setter
def diffuse(self, value: Color) -> None:
"""
Set diffuse color
Args:
value (Color): diffuse color
"""
self._diffuse = value.copy()
@property
def specular(self) -> Color:
"""
Returns the copy of specular color
Returns:
Color: the copy of specular color of the material
"""
return self._specular.copy()
@specular.setter
def specular(self, value: Color) -> None:
"""
Set specular color
Args:
value (Color): specular color
"""
self._specular = value.copy()
@property
def emissive(self) -> Color:
"""
Returns the copy of emissive color
Returns:
Color: the copy of emissive color of the material
"""
return self._emissive.copy()
@emissive.setter
def emissive(self, value: Color) -> None:
"""
Set emissive color
Args:
value (Color): emissive color
"""
self._emissive = value.copy()
def copy(self) -> 'Material':
"""
Returns a copy.
Returns:
Material: the copied material
"""
return Material(ambient=self._ambient,
diffuse=self._diffuse,
specular=self._specular,
emissive=self._emissive)
def __eq__(self, other: 'Material') -> bool:
"""
Equality of Material.
Args:
other (Material): other to compare
Returns:
bool: True if the differences of all components are within epsilon, Otherwise False.
"""
return (self._ambient == other._ambient and self._diffuse == other._diffuse
and self._specular == other._specular and self._emissive == other._emissive)
def __ne__(self, other: 'Material') -> bool:
"""
Inequality of points is inequality of any coordinates
Args:
other (Material): other to compare
Returns:
bool: False if the differences of all components are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __str__(self) -> str:
"""
String representation of a link state
Returns:
str: String representation of a link state
"""
return "(ambient=%s, diffuse=%s, specular=%s, emissive=%s)" % (repr(self._ambient),
repr(self._diffuse),
repr(self._specular),
repr(self._emissive))
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "Material" + str(self)
| StarcoderdataPython |
68345 | from pypdnsrest.dnsrecords import DNSMxRecord
from pypdnsrest.dnsrecords import DNSMxRecordData
from pypdnsrest.dnsrecords import InvalidDNSRecordException
from datetime import timedelta
from tests.records.test_records import TestRecords
class TestMxRecord(TestRecords):
def test_record(self):
mxdata = DNSMxRecordData(u"mail.{0}".format(self.zone), 10)
rec = DNSMxRecord(self.zone)
self.assertTrue(rec.set_data(mxdata))
def test_record2(self):
mxdata = DNSMxRecordData(u"mail.{0}".format(self.zone), 10, timedelta(hours=1))
rec = DNSMxRecord(self.zone)
self.assertTrue(rec.set_data(mxdata))
def test_record_wrong_priority(self):
mxdata = DNSMxRecordData(u"mail.{0}".format(self.zone), -1)
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(mxdata)
def test_invalid_server_type(self):
mxdata = DNSMxRecordData(int(1), 10)
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(mxdata)
def test_invalid_server(self):
mxdata = DNSMxRecordData(u"invalid", 10)
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(mxdata)
def test_invalid_priority_type(self):
mxdata = DNSMxRecordData(u"mail.{0}".format(self.zone), u"invalid")
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(mxdata)
def test_data_none(self):
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(None)
def test_data_invalid(self):
rec = DNSMxRecord(self.zone)
with self.assertRaises(InvalidDNSRecordException) as context:
rec.set_data(u"invalid") | StarcoderdataPython |
54070 | <reponame>Berailitz/library_monitor
"""Monitor book state in BUPT's library, send notice if available."""
import json
import logging
from typing import List, Dict
import requests
from .config import BOOK_PAGE_REFERER, BOOK_STATE_API, DAILY_REPORT_TEMPLATE, MESSAGE_TEMPLATE, NOTICE_COUNTER, TARGET_STATE
from .models import Book
from .queued_bot import create_queued_bot
from .sql_handler import SQLHandler, SQLManager
class LibraryMonitor(object):
"""
>>> self.target_books[0].keys()
['name', 'id', 'location']"""
def __init__(self, bot_token: str):
self.bot = create_queued_bot(bot_token)
self.sql_handler = SQLHandler(SQLManager())
@staticmethod
def update_book_states(target_book: Book) -> List[Dict]:
"""
Download and simplify book state dicts from server."""
headers = {
'Referer': BOOK_PAGE_REFERER.format(book_id=target_book.id)
}
params = {
'rec_ctrl_id': target_book.id
}
books = []
try:
state_response = requests.post(
BOOK_STATE_API, headers=headers, params=params)
full_states = json.loads(state_response.text.split('@')[0])[0]['A']
books = [
{'state': current_book['circul_status'],
'location': current_book['guancang_dept'],
'due_date': current_book['due_date']}
for current_book in full_states
if target_book.location in current_book['guancang_dept']
and current_book['circul_status'] == TARGET_STATE
]
except Exception as identifier:
logging.exception(identifier)
logging.error(
f'LibraryMonitor: Failed to update book state. (ID: {target_book.id})')
return books
def send_message(self, *, chat_id: int, text: str):
"""
Send message to """
logging.info(f"Send message: `{text}`")
self.bot.send_message(chat_id=chat_id, text=text)
def report_status(self):
"""
Send monitor report to users."""
for chat in self.sql_handler.get_chats():
self.send_message(
chat_id=chat.id,
text=DAILY_REPORT_TEMPLATE.format(
book_counter=len(chat.books),
book_names="、".join(
[f'《{book.name}》' for book in chat.books])
))
def run(self) -> None:
for chat in self.sql_handler.get_chats():
for target_book in chat.books:
logging.info(
f"LibraryMonitor: Updating book state. ({target_book})")
book_states = self.update_book_states(target_book)
book_counter = len(book_states)
if book_counter > 0:
logging.info(
f"LibraryMonitor: Book found. ({target_book})")
self.sql_handler.count_notice(target_book)
self.send_message(
chat_id=chat.id,
text=MESSAGE_TEMPLATE.format(
book_location=book_states[0]['location'],
book_name=target_book.name,
book_id=target_book.id,
book_counter=book_counter,
notice_index=NOTICE_COUNTER - target_book.notice_counter + 1,
max_notice_index=NOTICE_COUNTER))
else:
self.sql_handler.reset_notice_counter(target_book)
logging.info(
f"LibraryMonitor: Book NOT found. ({target_book})")
def stop(self):
"""
Stop bot and return."""
self.bot.stop()
| StarcoderdataPython |
1702771 | import os
import shutil
import tempfile
from lms import notifications
from lms.lmsdb import models
from lms.lmstests.public.flake8 import tasks
INVALID_CODE = 'print "Hello Word" '
INVALID_CODE_MESSAGE = 'כשהבודק שלנו ניסה להריץ את הקוד שלך, הוא ראה שלפייתון יש בעיה להבין אותו. כדאי לוודא שהקוד רץ כהלכה לפני שמגישים אותו.' # noqa E501
INVALID_CODE_KEY = 'E999'
VALID_CODE = 'print(0)'
EXECUTE_CODE = ('import os\n'
'eval(\'os.system("touch {}")\')')
class TestAutoFlake8:
test_directory = None
@classmethod
def setup_class(cls):
cls.test_directory = tempfile.mkdtemp()
cls.file_path = os.path.join(cls.test_directory, 'some-file')
cls.execute_script = EXECUTE_CODE.format(cls.file_path)
@classmethod
def teardown_class(cls):
if cls.test_directory is not None:
shutil.rmtree(cls.test_directory)
def test_pyflake_wont_execute_code(self, solution: models.Solution):
solution.json_data_str = self.execute_script
solution.save()
tasks.run_flake8_on_solution(solution.id)
comments = tuple(
models.Comment.filter(models.Comment.solution == solution))
assert not os.listdir(self.test_directory)
assert len(comments) == 2
exec(compile(self.execute_script, '', 'exec')) # noqa S102
assert os.listdir(self.test_directory) == ['some-file']
def test_invalid_solution(self, solution: models.Solution):
solution.json_data_str = INVALID_CODE
solution.save()
tasks.run_flake8_on_solution(solution.id)
comments = tuple(
models.Comment.filter(models.Comment.solution == solution))
assert comments
assert len(comments) == 1
comment = comments[0].comment
assert comment.text == INVALID_CODE_MESSAGE
assert comment.flake8_key == INVALID_CODE_KEY
user_notifications = notifications.get_notifications_for_user(
for_user=solution.solver)
assert len(user_notifications) == 1
assert user_notifications
parameters = user_notifications[0]['message_parameters']
subject = parameters['exercise_name']
errors = parameters['errors']
assert solution.exercise.subject == subject
assert 1 == errors
def test_valid_solution(self, solution: models.Solution):
solution.json_data_str = VALID_CODE
solution.save()
tasks.run_flake8_on_solution(solution.id)
comments = tuple(
models.Comment.filter(models.Comment.solution == solution))
assert not comments
| StarcoderdataPython |
143795 | <filename>mavsim_python/parameters/planner_parameters.py
import sys
sys.path.append('..')
import numpy as np
import parameters.aerosonde_parameters as MAV
# size of the waypoint array used for the path planner. This is the
# maximum number of waypoints that might be transmitted to the path
# manager.
size_waypoint_array = 100
# airspeed commanded by planner
Va0 = MAV.u0
# max possible roll angle
phi_max = np.radians(20)
# minimum turn radius
R_min = Va0**2 / MAV.gravity / np.tan(phi_max)
# # create random city map
# city_width = 2000; # the city is of size (width)x(width)
# building_height = 300; # maximum height of buildings
# #building_height = 1; # maximum height of buildings (for camera)
# num_blocks = 5; # number of blocks in city
# street_width = .8; # percent of block that is street.
# P.map = createWorld(city_width, building_height, num_blocks, street_width);
| StarcoderdataPython |
37221 | <gh_stars>0
# Python
# Django
# Try Django
# Getting Started (Level 1)
# Challenge 03 - Refactor the existing URL Dispatchers
from django.conf.urls import url
from . import views
urlpatterns = [
# TODO: Add a url() object whose regex parameter takes an empty path that
# terminates, and goes to views.home
] | StarcoderdataPython |
1767633 | <filename>server/app.py
from s3 import app
from s3.api import *
if __name__ == '__main__':
app.run(host='localhost', port=5000, debug=True) | StarcoderdataPython |
1698409 | <filename>main.py
from typing import Optional
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
id: int = None
price: float
name: str
description: str
deleted: bool = False
database = []
@app.get('/')
def index():
return {'hello': 'world'}
@app.post('/items')
def create_item(item: Item):
new_item = item
database.append(new_item)
index = database.index(new_item)
new_item.id = index
return new_item
@app.get('/items')
def list_items():
all_items = []
for item in database:
if not item.deleted:
all_items.append(item)
return all_items
@app.delete('/items/{id_item}')
def delete_item(id_item: int):
item = database[id_item]
item.deleted = True
return {'success': 'item deleted'}
@app.get('/items/{id_item}')
def get_item(id_item: int):
try:
item = database[id_item]
except:
return {'error': 'item not found.'}
if not item.deleted:
return item
else:
return {'error': 'item deleted.'}
| StarcoderdataPython |
1655616 | import logging
import sqlite3
import time
from datetime import date, datetime
from .indices import IndicesClient
logger = logging.getLogger("elasticsearch")
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
return str(value)
class Locasticsearch:
def __init__(self, database=":memory:", **kwargs):
self.db = sqlite3.connect(database)
self.db.row_factory = sqlite3.Row
self.cursor = self.db.cursor()
self.indices = IndicesClient(self.cursor)
def _get_columns(self, index):
if not self.indices.exists(index):
return []
sql = f'select * from "{index}"'
self.cursor.execute(sql)
return [col[0] for col in self.cursor.description]
def index(self, index, body, id=None, **kwargs):
"""
Creates or updates a document in an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html>`_
:arg index: The name of the index
:arg body: The document
:arg id: Document ID
"""
query_fields = list(body.keys())
# check if new columns need to be added
columns = self._get_columns(index)
if not set(columns) == set(query_fields):
self.indices.delete(index)
_template = {
"mappings": {
"properties": {field: {"type": "text"} for field in query_fields}
},
}
self.indices.create(index=index, body=_template)
if id is not None:
query_fields = ["rowid"] + query_fields
values = tuple(_escape(val) for val in body.values())
if id is not None:
values = [str(id)] + list(values)
values = tuple(values)
query_fields = ", ".join(query_fields)
sql = f'INSERT INTO "{index}"({query_fields}) VALUES {values};'
try:
self.cursor.execute(sql)
except sqlite3.IntegrityError:
pass
response = {
"_index": index,
"_type": "_doc",
"_id": str(id) if id is not None else str(self.cursor.lastrowid),
"_version": 1,
"result": "created",
"_shards": {"total": 1, "successful": 1, "failed": 0},
}
return response
def get(self, index, id, **kwargs):
sql = f'SELECT * FROM "{index}" WHERE rowid={id};'
self.cursor.execute(sql)
record = dict(self.cursor.fetchone())
response = {
"_index": index,
"_type": "_doc",
"_id": str(id),
"_version": 1,
"_seq_no": 10,
"_primary_term": 1,
"found": True,
"_source": record,
}
return response
def search(self, index=None, body=None):
"""
Returns results matching a query.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html>`_
:arg body: The search definition using the Query DSL
:arg index: A comma-separated list of index names to search; use
`_all` or empty string to perform the operation on all indices
"""
# match_all
if "match_all" in body["query"]:
sql = f'SELECT * FROM "{index}";'
if "multi_match" in body["query"]:
multi_match = body["query"]["multi_match"]["query"]
# fields = body["query"]["multi_match"]["fields"]
sql = f'SELECT * FROM "{index}" WHERE "{index}" MATCH "{multi_match}";'
start = time.process_time()
rows = self.cursor.execute(sql).fetchall()
elapsed_time = round(time.process_time() - start / 1000)
response = {
"took": elapsed_time,
"timed_out": False,
"_shards": {"total": 1, "successful": 1, "skipped": 0, "failed": 0},
"hits": {
"total": {"value": len(rows), "relation": "eq"},
"max_score": "fix this",
"hits": [
{
"_index": index,
"_type": "_doc",
"_id": "fix this",
"_score": "fix this",
"_source": dict(row),
}
for row in rows
],
},
}
return response
def bulk(self, body, index=None, doc_type=None, params=None, headers=None):
"""
Allows to perform multiple index/update/delete operations in a single request.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html>`_
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg index: Default index for items which don't provide one
:arg doc_type: Default document type for items which don't
provide one
:arg _source: True or false to return the _source field or not,
or default list of fields to return, can be overridden on each sub-
request
:arg _source_excludes: Default list of fields to exclude from
the returned _source field, can be overridden on each sub-request
:arg _source_includes: Default list of fields to extract and
return from the _source field, can be overridden on each sub-request
:arg pipeline: The pipeline id to preprocess incoming documents
with
:arg refresh: If `true` then refresh the affected shards to make
this operation visible to search, if `wait_for` then wait for a refresh
to make this operation visible to search, if `false` (the default) then
do nothing with refreshes. Valid choices: true, false, wait_for
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Sets the number of shard copies
that must be active before proceeding with the bulk operation. Defaults
to 1, meaning the primary shard only. Set to `all` for all shard copies,
otherwise set to any non-negative value less than or equal to the total
number of copies for the shard (number of replicas + 1)
"""
pass
| StarcoderdataPython |
3372606 | from core.advbase import *
from slot.a import *
from slot.d import Leviathan
import adv.fjorm
def module():
return Fjorm
class Fjorm(adv.fjorm.Fjorm):
comment = '4x Fjorm in 20.55s with sufficient dprep'
a3 = [('prep',1.00), ('scharge_all', 0.05)]
a2 = [('dp',50)] # team dprep
conf = {}
conf['slots.a'] = Unexpected_Requests()+Valiant_Crown()
conf['slots.frostbite.a'] = Unexpected_Requests()+Valiant_Crown()
conf['slots.d'] = Leviathan()
conf['acl'] = "`rotation"
conf['rotation'] = """
s2 s1 dragon end
"""
coab = ['Blade', 'Tiki', 'Axe2']
conf['afflict_res.bog'] = 80
def prerun(self):
pass
def s2_before(self, e):
for _ in range(4):
Selfbuff('last_bravery',0.3,15).on()
Event('defchain')()
def s2_proc(self, e):
self.dmg_make(f'{e.name}_reflect', 3792*8, fixed=True)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(Fjorm, *sys.argv) | StarcoderdataPython |
78987 | <filename>homeassistant/components/denonavr/media_player.py
"""Support for Denon AVR receivers using their HTTP interface."""
import logging
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_MAC,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import CONF_RECEIVER
from .config_flow import (
CONF_MANUFACTURER,
CONF_MODEL,
CONF_SERIAL_NUMBER,
CONF_TYPE,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
ATTR_SOUND_MODE_RAW = "sound_mode_raw"
SUPPORT_DENON = (
SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_SET
)
SUPPORT_MEDIA_MODES = (
SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the DenonAVR receiver from a config entry."""
entities = []
receiver = hass.data[DOMAIN][config_entry.entry_id][CONF_RECEIVER]
for receiver_zone in receiver.zones.values():
if config_entry.data[CONF_SERIAL_NUMBER] is not None:
unique_id = f"{config_entry.unique_id}-{receiver_zone.zone}"
else:
unique_id = None
entities.append(DenonDevice(receiver_zone, unique_id, config_entry))
_LOGGER.debug(
"%s receiver at host %s initialized", receiver.manufacturer, receiver.host
)
async_add_entities(entities)
class DenonDevice(MediaPlayerEntity):
"""Representation of a Denon Media Player Device."""
def __init__(self, receiver, unique_id, config_entry):
"""Initialize the device."""
self._receiver = receiver
self._name = self._receiver.name
self._unique_id = unique_id
self._config_entry = config_entry
self._muted = self._receiver.muted
self._volume = self._receiver.volume
self._current_source = self._receiver.input_func
self._source_list = self._receiver.input_func_list
self._state = self._receiver.state
self._power = self._receiver.power
self._media_image_url = self._receiver.image_url
self._title = self._receiver.title
self._artist = self._receiver.artist
self._album = self._receiver.album
self._band = self._receiver.band
self._frequency = self._receiver.frequency
self._station = self._receiver.station
self._sound_mode_support = self._receiver.support_sound_mode
if self._sound_mode_support:
self._sound_mode = self._receiver.sound_mode
self._sound_mode_raw = self._receiver.sound_mode_raw
self._sound_mode_list = self._receiver.sound_mode_list
else:
self._sound_mode = None
self._sound_mode_raw = None
self._sound_mode_list = None
self._supported_features_base = SUPPORT_DENON
self._supported_features_base |= (
self._sound_mode_support and SUPPORT_SELECT_SOUND_MODE
)
async def async_added_to_hass(self):
"""Register signal handler."""
self.async_on_remove(
async_dispatcher_connect(self.hass, DOMAIN, self.signal_handler)
)
def signal_handler(self, data):
"""Handle domain-specific signal by calling appropriate method."""
entity_ids = data[ATTR_ENTITY_ID]
if entity_ids == ENTITY_MATCH_NONE:
return
if entity_ids == ENTITY_MATCH_ALL or self.entity_id in entity_ids:
params = {
key: value
for key, value in data.items()
if key not in ["entity_id", "method"]
}
getattr(self, data["method"])(**params)
def update(self):
"""Get the latest status information from device."""
self._receiver.update()
self._name = self._receiver.name
self._muted = self._receiver.muted
self._volume = self._receiver.volume
self._current_source = self._receiver.input_func
self._source_list = self._receiver.input_func_list
self._state = self._receiver.state
self._power = self._receiver.power
self._media_image_url = self._receiver.image_url
self._title = self._receiver.title
self._artist = self._receiver.artist
self._album = self._receiver.album
self._band = self._receiver.band
self._frequency = self._receiver.frequency
self._station = self._receiver.station
if self._sound_mode_support:
self._sound_mode = self._receiver.sound_mode
self._sound_mode_raw = self._receiver.sound_mode_raw
@property
def unique_id(self):
"""Return the unique id of the zone."""
return self._unique_id
@property
def device_info(self):
"""Return the device info of the receiver."""
if self._config_entry.data[CONF_SERIAL_NUMBER] is None:
return None
device_info = {
"identifiers": {(DOMAIN, self._config_entry.unique_id)},
"manufacturer": self._config_entry.data[CONF_MANUFACTURER],
"name": self._config_entry.title,
"model": f"{self._config_entry.data[CONF_MODEL]}-{self._config_entry.data[CONF_TYPE]}",
}
if self._config_entry.data[CONF_MAC] is not None:
device_info["connections"] = {
(dr.CONNECTION_NETWORK_MAC, self._config_entry.data[CONF_MAC])
}
return device_info
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
# Volume is sent in a format like -50.0. Minimum is -80.0,
# maximum is 18.0
return (float(self._volume) + 80) / 100
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""Return a list of available input sources."""
return self._source_list
@property
def sound_mode(self):
"""Return the current matched sound mode."""
return self._sound_mode
@property
def sound_mode_list(self):
"""Return a list of available sound modes."""
return self._sound_mode_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._current_source in self._receiver.netaudio_func_list:
return self._supported_features_base | SUPPORT_MEDIA_MODES
return self._supported_features_base
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._state == STATE_PLAYING or self._state == STATE_PAUSED:
return MEDIA_TYPE_MUSIC
return MEDIA_TYPE_CHANNEL
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._current_source in self._receiver.playing_func_list:
return self._media_image_url
return None
@property
def media_title(self):
"""Title of current playing media."""
if self._current_source not in self._receiver.playing_func_list:
return self._current_source
if self._title is not None:
return self._title
return self._frequency
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._artist is not None:
return self._artist
return self._band
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._album is not None:
return self._album
return self._station
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
if (
self._sound_mode_raw is not None
and self._sound_mode_support
and self._power == "ON"
):
return {ATTR_SOUND_MODE_RAW: self._sound_mode_raw}
return {}
def media_play_pause(self):
"""Play or pause the media player."""
return self._receiver.toggle_play_pause()
def media_play(self):
"""Send play command."""
return self._receiver.play()
def media_pause(self):
"""Send pause command."""
return self._receiver.pause()
def media_previous_track(self):
"""Send previous track command."""
return self._receiver.previous_track()
def media_next_track(self):
"""Send next track command."""
return self._receiver.next_track()
def select_source(self, source):
"""Select input source."""
# Ensure that the AVR is turned on, which is necessary for input
# switch to work.
self.turn_on()
return self._receiver.set_input_func(source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
return self._receiver.set_sound_mode(sound_mode)
def turn_on(self):
"""Turn on media player."""
if self._receiver.power_on():
self._state = STATE_ON
def turn_off(self):
"""Turn off media player."""
if self._receiver.power_off():
self._state = STATE_OFF
def volume_up(self):
"""Volume up the media player."""
return self._receiver.volume_up()
def volume_down(self):
"""Volume down media player."""
return self._receiver.volume_down()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
# Volume has to be sent in a format like -50.0. Minimum is -80.0,
# maximum is 18.0
volume_denon = float((volume * 100) - 80)
if volume_denon > 18:
volume_denon = float(18)
try:
if self._receiver.set_volume(volume_denon):
self._volume = volume_denon
except ValueError:
pass
def mute_volume(self, mute):
"""Send mute command."""
return self._receiver.mute(mute)
def get_command(self, command, **kwargs):
"""Send generic command."""
self._receiver.send_get_command(command)
| StarcoderdataPython |
1603244 | #/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
from enum import IntEnum
from framework import test_config
from proxies.bootstrap_server_xml_rpc import BootstrapServerXmlRpc
from proxies.gateway_server_xml_rpc import GWServerXmlRpc
from proxies.gateway_client_xml_rpc import GWClientXmlRpc
from proxies.device_server_client_http import DeviceServerClientHttp
from proxies.constrained_client_xml_rpc import ConstrainedClientXmlRpc
from helpers.httphelper import HttpHelper
class UnsupportedProxyProtocolException(Exception):
pass
class ProxyProtocol(IntEnum):
XMLRPC = 0
class TopologyManager(object):
def __init__(self):
self.deviceServerClients = []
self.bootstrapServers = []
self.gatewayServers = []
self.gatewayClients = []
self.constrainedClients = []
@classmethod
def custom(cls, deviceServers=None, bootstrapServers=None, gatewayServers=None, gatewayClients=None, constrainedClients=None):
return cls._loadTopology(deviceServers, bootstrapServers, gatewayServers, gatewayClients, constrainedClients)
@classmethod
def fromConfigFile(cls, topologyName):
topologyConfig = test_config.config["topologies"][topologyName]
print 'Loading Topology: ' + topologyName
return cls._loadTopology(topologyConfig.get("device-servers", None), \
topologyConfig.get("bootstrap-servers", None), \
topologyConfig.get("gateway-servers", None), \
topologyConfig.get("gateway-clients", None), \
topologyConfig.get("constrained-clients", None))
@classmethod
def _loadTopology(cls, deviceServers, bootstrapServers, gatewayServers, gatewayClients, constrainedClients):
topology = cls()
#print("Loading topology: ", topologyConfig)
if deviceServers is not None:
deviceServers = deviceServers.split(",")
for deviceServer in deviceServers:
deviceServerConfig = test_config.config["device-servers"][deviceServer.strip()]
if deviceServerConfig is not None:
topology.deviceServerClients.append(DeviceServerClientHttp(deviceServerConfig))
if constrainedClients is not None:
constrainedClients = constrainedClients.split(",")
for constrainedClient in constrainedClients:
constrainedClientConfig = test_config.config["constrained-clients"][constrainedClient.strip()]
proxyProtocol = topology._getProxyProtocolFromConfig(constrainedClientConfig)
if proxyProtocol == ProxyProtocol.XMLRPC:
topology.constrainedClients.append(ConstrainedClientXmlRpc(constrainedClientConfig))
else:
raise UnsupportedProxyProtocolException(proxyProtocol)
if bootstrapServers is not None:
bootstrapServers = bootstrapServers.split(",")
for bootstrapServer in bootstrapServers:
bootstrapServerConfig = test_config.config["bootstrap-servers"][bootstrapServer.strip()]
proxyProtocol = topology._getProxyProtocolFromConfig(bootstrapServerConfig)
if proxyProtocol == ProxyProtocol.XMLRPC:
topology.bootstrapServers.append(BootstrapServerXmlRpc(bootstrapServerConfig))
else:
raise UnsupportedProxyProtocolException(proxyProtocol)
if gatewayServers is not None:
gatewayServers = gatewayServers.split(",")
for gatewayServer in gatewayServers:
gatewayServerConfig = test_config.config["gateway-servers"][gatewayServer.strip()]
proxyProtocol = topology._getProxyProtocolFromConfig(gatewayServerConfig)
if proxyProtocol == ProxyProtocol.XMLRPC:
topology.gatewayServers.append(GWServerXmlRpc(gatewayServerConfig))
else:
raise UnsupportedProxyProtocolException(proxyProtocol)
if gatewayClients is not None:
gatewayClients = gatewayClients.split(",")
for gatewayClient in gatewayClients:
gatewayClientConfig = test_config.config["gateway-clients"][gatewayClient.strip()]
proxyProtocol = topology._getProxyProtocolFromConfig(gatewayClientConfig)
if proxyProtocol == ProxyProtocol.XMLRPC:
topology.gatewayClients.append(GWClientXmlRpc(gatewayClientConfig))
else:
raise UnsupportedProxyProtocolException(proxyProtocol)
return topology
def __del__(self):
pass
def _getProxyProtocolFromConfig(self, config):
proxyProtocol = test_config.config["proxies"][config["proxy"]]["protocol"]
return ProxyProtocol.__members__[proxyProtocol]
| StarcoderdataPython |
3200260 | <filename>streamlit_files/utils.py
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
#Replicando a classe Transformer para realizar pré-processamento das infos do cliente
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self, quant_columns, cat_columns):
self.quant_columns = quant_columns
self.cat_columns = cat_columns
self.enc = OneHotEncoder()
self.scaler = MinMaxScaler()
def fit(self, x, y=None): #funções da classe devem ter o mesmo nome do método usado
self.enc.fit(x[self.cat_columns])
self.scaler.fit(x[self.quant_columns])
return self
def transform(self, x, y=None): #funções da classe devem ter o mesmo nome do método usado
x_categoricals = pd.DataFrame(data=self.enc.transform(x[self.cat_columns]).toarray(),
columns=self.enc.get_feature_names(self.cat_columns))
x_quants = pd.DataFrame(data=self.scaler.transform(x[self.quant_columns]),
columns=self.quant_columns)
x = pd.concat([x_quants , x_categoricals], axis=1)
return x
| StarcoderdataPython |
150993 | import socket
from django.conf import settings
from djutils.dashboard.provider import PanelProvider
from djutils.dashboard.registry import registry
try:
import psycopg2
except ImportError:
psycopg2 = None
def get_db_setting(key):
try:
return settings.DATABASES['default'][key]
except KeyError:
return getattr(settings, 'DATABASE_%s' % key)
# stored as either 'localhost:6379' or 'localhost:6379:0'
REDIS_SERVER = getattr(settings, 'DASHBOARD_REDIS_CONNECTION', None)
# stored as 'localhost:11211'
MEMCACHED_SERVER = getattr(settings, 'DASHBOARD_MEMCACHED_CONNECTION', None)
class PostgresPanelProvider(PanelProvider):
conn = None
def connect(self):
self.conn = self.get_conn()
def get_conn(self):
return psycopg2.connect(
database=get_db_setting('NAME'),
user=get_db_setting('USER') or 'postgres',
host=get_db_setting('HOST') or 'localhost',
)
def execute(self, sql, params=None):
if not self.conn:
self.connect()
cursor = self.conn.cursor()
res = cursor.execute(sql, params or ())
return cursor
class PostgresQueryPanel(PostgresPanelProvider):
def get_title(self):
return 'Postgres Queries'
def get_data(self):
cursor = self.execute('SELECT datname, current_query, query_start FROM pg_stat_activity ORDER BY query_start;')
rows = cursor.fetchall()
idle = idle_trans = queries = 0
for (database, query, start) in rows:
if query == '<IDLE>':
idle += 1
elif query == '<IDLE> in transaction':
idle_trans += 1
else:
queries += 1
return {
'idle': idle,
'idle_in_trans': idle_trans,
'queries': queries
}
class PostgresUserPanel(PostgresPanelProvider):
def get_title(self):
return 'Postgres connections by User'
def get_data(self):
cursor = self.execute('SELECT usename, count(*) FROM pg_stat_activity WHERE procpid != pg_backend_pid() GROUP BY usename ORDER BY 1;')
data_dict = {}
for row in cursor.fetchall():
data_dict[row[0]] = row[1]
return data_dict
class PostgresConnectionsPanel(PostgresPanelProvider):
def get_title(self):
return 'Postgres connections by Type'
def get_data(self):
sql = """
SELECT tmp.state, COALESCE(count, 0) FROM
(VALUES ('active'), ('waiting'), ('idle'), ('idletransaction'), ('unknown')) AS tmp(state)
LEFT JOIN (
SELECT CASE
WHEN waiting THEN 'waiting'
WHEN current_query='<IDLE>' THEN 'idle'
WHEN current_query='<IDLE> in transaction' THEN 'idletransaction'
WHEN current_query='<insufficient privilege>' THEN 'unknown'
ELSE 'active' END AS state,
count(*) AS count
FROM pg_stat_activity WHERE procpid != pg_backend_pid()
GROUP BY CASE WHEN waiting THEN 'waiting' WHEN current_query='<IDLE>' THEN 'idle' WHEN current_query='<IDLE> in transaction' THEN 'idletransaction' WHEN current_query='<insufficient privilege>' THEN 'unknown' ELSE 'active' END
) AS tmp2
ON tmp.state=tmp2.state
ORDER BY 1
"""
cursor = self.execute(sql)
data_dict = {}
total = 0
for row in cursor.fetchall():
data_dict[row[0]] = row[1]
total += row[1]
data_dict['total'] = total
return data_dict
class PostgresConnectionsForDatabase(PostgresPanelProvider):
def get_title(self):
return 'Connections for site db'
def get_data(self):
sql = """
SELECT pg_database.datname, COALESCE(count,0) AS count
FROM pg_database
LEFT JOIN (
SELECT datname, count(*)
FROM pg_stat_activity
WHERE procpid != pg_backend_pid()
GROUP BY datname
) AS tmp ON pg_database.datname=tmp.datname
WHERE datallowconn AND pg_database.datname=%s ORDER BY 1
"""
cursor = self.execute(sql, (get_db_setting('NAME'),))
data_dict = {}
for row in cursor.fetchall():
data_dict[row[0]] = row[1]
return data_dict
class RedisPanelProvider(PanelProvider):
def get_info(self):
host, port = REDIS_SERVER.split(':')[:2]
sock = socket.socket()
try:
sock.connect((host, int(port)))
except:
return {}
sock.send('INFO\r\n')
data = sock.recv(4096)
data_dict = {}
for line in data.splitlines():
if ':' in line:
key, val = line.split(':', 1)
data_dict[key] = val
return data_dict
def get_key(self, key):
return self.get_info().get(key, 0)
class RedisConnectedClients(RedisPanelProvider):
def get_title(self):
return 'Redis connections'
def get_data(self):
return {'clients': self.get_key('connected_clients')}
class RedisMemoryUsage(RedisPanelProvider):
def get_title(self):
return 'Redis memory usage'
def get_data(self):
return {'memory': self.get_key('used_memory')}
class CPUInfo(PanelProvider):
def get_title(self):
return 'CPU Usage'
def get_data(self):
fh = open('/proc/loadavg', 'r')
contents = fh.read()
fh.close()
# grab the second value
second = contents.split()[1]
return {'loadavg': second}
class MemcachedPanelProvider(PanelProvider):
def get_stats(self):
host, port = MEMCACHED_SERVER.split(':')[:2]
sock = socket.socket()
try:
sock.connect((host, int(port)))
except:
return {}
sock.send('stats\r\n')
data = sock.recv(8192)
data_dict = {}
for line in data.splitlines():
if line.startswith('STAT'):
key, val = line.split()[1:]
data_dict[key] = val
return data_dict
class MemcachedHitMiss(MemcachedPanelProvider):
def get_title(self):
return 'Memcached hit/miss ratio'
def get_data(self):
memcached_stats = self.get_stats()
get_hits = float(memcached_stats.get('get_hits', 0))
get_misses = float(memcached_stats.get('get_misses', 0))
if get_hits and get_misses:
return {'hit-miss ratio': get_hits / get_misses}
return {'hit-miss-ration': 0}
class MemcachedMemoryUsage(MemcachedPanelProvider):
def get_title(self):
return 'Memcached memory usage'
def get_data(self):
memcached_stats = self.get_stats()
return {'bytes': memcached_stats.get('bytes', 0)}
class MemcachedItemsInCache(MemcachedPanelProvider):
def get_title(self):
return 'Memcached items in cache'
def get_data(self):
memcached_stats = self.get_stats()
return {'items': memcached_stats.get('curr_items', 0)}
registry.register(CPUInfo)
if REDIS_SERVER:
registry.register(RedisConnectedClients)
registry.register(RedisMemoryUsage)
if MEMCACHED_SERVER:
registry.register(MemcachedHitMiss)
registry.register(MemcachedMemoryUsage)
registry.register(MemcachedItemsInCache)
if 'psycopg2' in get_db_setting('ENGINE'):
registry.register(PostgresQueryPanel)
registry.register(PostgresUserPanel)
registry.register(PostgresConnectionsPanel)
registry.register(PostgresConnectionsForDatabase)
| StarcoderdataPython |
137437 | <reponame>ji3g4m6zo6/JioNLP
# -*- coding=utf-8 -*-
# library: jionlp
# author: dongrixinyu
# license: Apache License 2.0
# Email: <EMAIL>
# github: https://github.com/dongrixinyu/JioNLP
# description: Preprocessing tool for Chinese NLP
from .extract_summary import ChineseSummaryExtractor
extract_summary = ChineseSummaryExtractor()
| StarcoderdataPython |
4825686 | """
Structure for parsed as dict `:type:` or `:rtype:` nested lines.
"""
from typing import Iterable, List, Any
class TypeDocLine:
"""
Structure for parsed as dict `:type:` or `:rtype:` nested lines.
Arguments:
name -- Argument or TypedDict key name
type_name -- Argument or TypedDict key type string.
line -- Raw original line parts.
description -- Rest of line for argument or TypedDict key definition.
indented -- Intended lines.
"""
def __init__(
self,
name: str = "",
type_name: str = "",
line: Iterable[str] = tuple(),
description: str = "",
indented: Iterable[Any] = tuple(),
) -> None:
self.line = "".join(line)
self.name = name
self.type_name = type_name
self.description = description
self._indented = indented
@property
def indented(self) -> List["TypeDocLine"]:
"""
Get indented lines list.
Returns:
A list of `TypeDocLine`.
"""
result: List[TypeDocLine] = []
for line in self._indented:
result.append(TypeDocLine(**line))
return result
@property
def required(self) -> bool:
"""
Whether the argument or TypedDict key is required.
"""
return "REQUIRED" in self.description or "**must**" in self.description
def render(self) -> str:
"""
Get original string with indentation.
Returns:
A string as close as possible to original.
"""
result: List[str] = []
indent = " " if self.line else ""
if self.line:
result.append(self.line)
result.append("")
for indented_line in self.indented:
for indented_line_line in indented_line.render().splitlines():
result.append(f"{indent}{indented_line_line}")
return "\n".join(result)
| StarcoderdataPython |
1657936 | import discord
from discord.ext import commands
import asyncio
import aiohttp
from datetime import datetime, timedelta
import random
import json
import re
# Owner only command
class Owner(commands.Cog):
"""Owner only commands."""
def __init__(self, bot):
self.bot = bot
self.color = 0x9842f4
def isOwner(): # for decorators
async def predicate(ctx):
return ctx.bot.isOwner(ctx)
return commands.check(predicate)
# get the general channel of a server, if any
def getGeneral(self, guild):
for c in guild.text_channels:
if c.name.lower() == 'general':
return c
return None
async def guildList(self):
embed = discord.Embed(title=self.bot.user.name, color=self.color)
embed.set_thumbnail(url=self.bot.user.avatar_url)
msg = ""
for s in self.bot.guilds:
msg += "**{}:** {} owned by {} ({})\n".format(s.name, s.id, s.owner.name, s.owner.id)
if msg == "": msg = "None"
embed.add_field(name="Server List", value=msg, inline=False)
msg = ""
for s in self.bot.newserver['pending']:
msg += "**{}:** {}\n".format(s, self.bot.newserver['pending'][s])
if msg == "": msg = "None"
embed.add_field(name="Pending Servers", value=msg, inline=False)
msg = ""
for s in self.bot.newserver['servers']:
msg += "[{}] ".format(s)
if msg == "": msg = "None"
embed.add_field(name="Banned Servers", value=msg, inline=False)
msg = ""
for s in self.bot.newserver['owners']:
msg += "[{}] ".format(s)
if msg == "": msg = "None"
embed.add_field(name="Banned owners", value=msg, inline=False)
await self.bot.send('debug', embed=embed)
@commands.command(no_pm=True)
@isOwner()
async def clear(self, ctx):
"""Clear the debug channel"""
try:
await self.bot.channels['debug'].purge()
except Exception as e:
await self.bot.sendError('clear', str(e))
@commands.command(no_pm=True)
@isOwner()
async def leave(self, ctx, id: int):
"""Make the bot leave a server (Owner only)"""
try:
toleave = self.bot.get_guild(id)
await toleave.leave()
await ctx.message.add_reaction('✅') # white check mark
await self.guildList()
except Exception as e:
await self.bot.sendError('leave', str(e))
@commands.command(no_pm=True, aliases=['banS', 'ban', 'bs'])
@isOwner()
async def ban_server(self, ctx, id: int):
"""Command to leave and ban a server (Owner only)"""
id = str(id)
try:
if id not in self.bot.newserver['servers']:
self.bot.newserver['servers'].append(id)
self.bot.savePending = True
try:
toleave = self.bot.get_guild(id)
await toleave.leave()
except:
pass
await ctx.message.add_reaction('✅') # white check mark
await self.guildList()
except Exception as e:
await self.bot.sendError('ban_server', str(e))
@commands.command(no_pm=True, aliases=['banO', 'bo'])
@isOwner()
async def ban_owner(self, ctx, id: int):
"""Command to ban a server owner and leave all its servers (Owner only)"""
id = str(id)
try:
if id not in self.bot.newserver['owners']:
self.bot.newserver['owners'].append(id)
self.bot.savePending = True
for g in self.bot.guilds:
try:
if str(g.owner.id) == id:
await g.leave()
except:
pass
await ctx.message.add_reaction('✅') # white check mark
await self.guildList()
except Exception as e:
await self.bot.sendError('ban_owner', str(e))
@commands.command(no_pm=True, aliases=['a'])
@isOwner()
async def accept(self, ctx, id: int):
"""Command to accept a pending server (Owner only)"""
sid = str(id)
try:
if sid in self.bot.newserver['pending']:
self.bot.newserver['pending'].pop(sid)
self.bot.savePending = True
guild = self.bot.get_guild(id)
if guild:
await guild.owner.send(embed=self.bot.buildEmbed(title="I'm now available for use in {}".format(guild.name), description="Use `$help` for my list of commands, `$help Management` for mod only commands.\nUse `$setPrefix` to change the command prefix (default: `$`)\nIf you encounter an issue, use `$bug_report` and describe the problem.\nIf I'm down or slow, I might be rebooting, in maintenance or Discord itself might be acting up.", thumbnail=guild.icon_url))
await ctx.message.add_reaction('✅') # white check mark
await self.guildList()
except Exception as e:
await self.bot.sendError('accept', str(e))
@commands.command(no_pm=True, aliases=['r'])
@isOwner()
async def refuse(self, ctx, id: int):
"""Command to refuse a pending server (Owner only)"""
id = str(id)
try:
if id in self.bot.newserver['pending']:
self.bot.newserver['pending'].pop(id)
self.bot.savePending = True
guild = self.bot.get_guild(id)
if guild:
await guild.leave()
await ctx.message.add_reaction('✅') # white check mark
await self.guildList()
except Exception as e:
await self.bot.sendError('refuse', str(e))
@commands.command(name='save', no_pm=True, aliases=['s'])
@isOwner()
async def _save(self, ctx):
"""Command to make a snapshot of the bot's settings (Owner only)"""
await self.bot.autosave(True)
await ctx.message.add_reaction('✅') # white check mark
@commands.command(name='load', no_pm=True, aliases=['l'])
@isOwner()
async def _load(self, ctx, drive : str = ""):
"""Command to reload the bot settings (Owner only)"""
self.bot.cancelTask('check_buff')
if drive == 'drive':
if not self.bot.drive.load():
await self.bot.send('debug', embed=self.bot.buildEmbed(title=ctx.guild.me.name, description="Failed to retrieve save.json on the Google Drive", color=self.color))
if self.bot.load():
self.bot.savePending = False
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
await self.bot.send('debug', embed=self.bot.buildEmbed(title=ctx.guild.me.name, description="save.json reloaded", color=self.color))
else:
await self.bot.send('debug', embed=self.bot.buildEmbed(title=ctx.guild.me.name, description="save.json loading failed", color=self.color))
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, aliases=['server'])
@isOwner()
async def servers(self, ctx):
"""List all servers (Owner only)"""
await self.guildList()
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, aliases=['checkbuff'])
@isOwner()
async def buffcheck(self, ctx): # debug stuff
"""List the GW buff list for (You) (Owner only)"""
await ctx.message.add_reaction('✅') # white check mark
msg = ""
for b in self.bot.gw['buffs']:
msg += '{0:%m/%d %H:%M}: '.format(b[0])
if b[1]: msg += '[Normal Buffs] '
if b[2]: msg += '[FO Buffs] '
if b[3]: msg += '[Warning] '
if b[4]: msg += '[Double duration] '
msg += '\n'
await self.bot.send('debug', embed=self.bot.buildEmbed(title="{} Guild War (You) Buff debug check".format(self.bot.getEmote('gw')), description=msg, color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def setMaintenance(self, ctx, day : int, month : int, hour : int, duration : int):
"""Set a maintenance date (Owner only)"""
try:
self.bot.maintenance['time'] = datetime.now().replace(month=month, day=day, hour=hour, minute=0, second=0, microsecond=0)
self.bot.maintenance['duration'] = duration
self.bot.maintenance['state'] = True
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
except Exception as e:
await self.bot.sendError('setmaintenance', str(e))
@commands.command(no_pm=True)
@isOwner()
async def delMaintenance(self, ctx):
"""Delete the maintenance date (Owner only)"""
self.bot.maintenance = {"state" : False, "time" : None, "duration" : 0}
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, aliases=['as'])
@isOwner()
async def addStream(self, ctx, *, txt : str):
"""Append a line to the stream command text (Owner only)
separate with ';' for multiple lines"""
strs = txt.split(';')
msg = ""
for s in strs:
self.bot.stream['content'].append(s)
msg += "`" + s + "`\n"
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Stream Settings", description="Appended the following lines:\n" + msg, color=self.color))
@commands.command(no_pm=True, aliases=['sst'])
@isOwner()
async def setStreamTime(self, ctx, day : int, month : int, year : int, hour : int):
"""Set the stream time (Owner only)
The text needs to contain {} for the cooldown to show up"""
try:
self.bot.stream['time'] = datetime.now().replace(year=year, month=month, day=day, hour=hour, minute=0, second=0, microsecond=0)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
except Exception as e:
await self.bot.sendError('setstreamtime', str(e))
@commands.command(no_pm=True, aliases=['cs'])
@isOwner()
async def clearStream(self, ctx):
"""Clear the stream command text (Owner only)"""
self.bot.stream['content'] = []
self.bot.stream['time'] = None
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, aliases=['dsl'])
@isOwner()
async def delStreamLine(self, ctx, line : int = 0, many : int = 1):
"""Delete a line from stream command text (Owner only)
By default, the first line is deleted
You can specify how many you want to delete"""
if many < 1:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="You can't delete less than one line", color=self.color))
elif line < len(self.bot.stream['content']) and line >= 0:
if many + line > len(self.bot.stream['content']):
many = len(self.bot.stream['content']) - line
msg = ""
for i in range(0, many):
msg += self.bot.stream['content'].pop(line) + "\n"
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Stream Settings", description="Removed the following lines:\n" + msg, color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="Invalid line number", color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def setSchedule(self, ctx, *, txt : str):
"""Set the GBF schedule for the month (Owner only)
Use ; to separate elements"""
self.bot.schedule = txt.split(';')
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def getSchedule(self, ctx):
"""Retrieve the monthly schedule from @granble_en (Owner only / Tweepy only)
The tweet must be recent"""
tw = self.bot.getTwitterTimeline('granblue_en')
if tw is not None:
for t in tw:
txt = t.text
if txt.find(" = ") != -1 and txt.find("chedule") != -1:
s = txt.find("https://t.co/")
if s != -1: txt = txt[:-1]
lines = txt.split('\n')
msg = lines[0] + '\n`'
for i in range(1, len(lines)):
if lines[i] != "":
msg += lines[i].replace(" = ", ";") + ";"
msg = msg[:-1]
msg += "`"
await self.bot.send('debug', embed=self.bot.buildEmbed(title="Automatic schedule detection", description=msg, color=self.color))
await ctx.message.add_reaction('✅') # white check mark
return
await ctx.message.add_reaction('❎') # white negative mark
@commands.command(no_pm=True)
@isOwner()
async def cleanSchedule(self, ctx):
"""Remove expired entries from the schedule (Owner only)"""
c = self.bot.getJST()
new_schedule = []
for i in range(0, len(self.bot.schedule), 2):
try:
date = self.bot.schedule[i].replace(" ", "").split("-")[-1].split("/")
x = c.replace(month=int(date[0]), day=int(date[1])+1, microsecond=0)
if c - x > timedelta(days=160):
x = x.replace(year=x.year+1)
if c >= x:
continue
except:
pass
new_schedule.append(self.bot.schedule[i])
new_schedule.append(self.bot.schedule[i+1])
self.bot.schedule = new_schedule
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def setStatus(self, ctx, *, terms : str):
"""Change the bot status (Owner only)"""
await self.bot.change_presence(status=discord.Status.online, activity=discord.activity.Game(name=terms))
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def banRollID(self, ctx, id: int):
"""ID based Ban for $rollranking (Owner only)"""
id = str(id)
if id not in self.bot.spark[1]:
self.bot.spark[1].append(id)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, aliases=['unbanspark'])
@isOwner()
async def unbanRoll(self, ctx, id : int):
"""Unban an user from all the roll ranking (Owner only)
Ask me for an unban (to avoid abuses)"""
id = str(id)
if id in self.bot.spark[1]:
i = 0
while i < len(self.bot.spark[1]):
if id == self.bot.spark[1][i]: self.bot.spark[1].pop(i)
else: i += 1
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def cleanRoll(self, ctx):
"""Remove users with 0 rolls (Owner only)"""
count = 0
for k in list(self.bot.spark[0].keys()):
sum = self.bot.spark[0][k][0] + self.bot.spark[0][k][1] + self.bot.spark[0][k][2]
if sum == 0:
self.bot.spark[0].pop(k)
count += 1
if count > 0:
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def resetGacha(self, ctx):
"""Reset the gacha settings"""
self.bot.gbfdata['gachabanner'] = None
self.bot.gbfdata['gachacontent'] = None
self.bot.gbfdata['gachatime'] = None
self.bot.gbfdata['gachatimesub'] = None
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def logout(self, ctx):
"""Make the bot quit (Owner only)"""
await self.bot.autosave()
self.bot.running = False
await self.bot.logout()
@commands.command(no_pm=True)
@isOwner()
async def config(self, ctx):
"""Post the current config file in the debug channel (Owner only)"""
try:
with open('config.json', 'rb') as infile:
await self.bot.send('debug', 'config.json', file=discord.File(infile))
except Exception as e:
await self.bot.sendError('config', str(e))
@commands.command(no_pm=True)
@isOwner()
async def cleanSave(self, ctx):
"""Do some clean up (Owner only)"""
guild_ids = []
for s in self.bot.guilds:
guild_ids.append(str(s.id))
for k in list(self.bot.permitted.keys()):
if k not in guild_ids:
self.bot.permitted.pop(k)
self.bot.savePending = True
for k in list(self.bot.news.keys()):
if k not in guild_ids or len(self.bot.news[k]) == 0:
self.bot.news.pop(k)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def broadcast(self, ctx, *, terms):
"""Broadcast a message (Owner only)"""
if len(terms) == 0:
return
embed=discord.Embed(title="{} Broadcast".format(ctx.guild.me.display_name), description=terms, thumbnail=ctx.guild.me.avatar_url, color=self.color)
for g in self.bot.news:
for id in self.bot.news[g]:
try:
channel = self.bot.get_channel(id)
await channel.send(embed=embed)
except Exception as e:
self.bot.sendError('broadcast', str(e))
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
async def newgwtask(self, ctx):
"""Start a new checkGWBuff() task (Owner only)"""
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True)
@isOwner()
@commands.cooldown(1, 10, commands.BucketType.guild)
async def invite(self, ctx):
"""Post the invite link (Owner only)"""
await self.bot.send('debug', embed=self.bot.buildEmbed(title="Invite Request", description="{} ▫️ {}".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.avatar_url, timestamp=datetime.utcnow(), color=self.color))
await ctx.author.send(embed=self.bot.buildEmbed(title=ctx.guild.me.name, description="{}\nYou'll have to wait for my owner approval.\nMisuses will result in a ban.".format(self.bot.strings["invite()"]), thumbnail=ctx.guild.me.avatar_url, timestamp=datetime.utcnow(), color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def nitro(self, ctx):
"""Get the nitro boost status of the guild (Owner only)"""
guild = ctx.guild
await ctx.send(embed=self.bot.buildEmbed(title=guild.name + " status", description="Premium Tier: {}\nBoosted members: {}\nIcon animated: {}".format(guild.premium_tier, guild.premium_subscription_count, guild.is_icon_animated()), thumbnail=guild.icon_url, footer=str(guild.id), color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def purgeUbhl(self, ctx):
"""Remove inactive users from the /gbfg/ ubaha-hl channel (Owner only)"""
ubhl_c = self.bot.get_channel(self.bot.ids['gbfg_ubhl'])
gbfg_g = self.bot.get_guild(self.bot.ids['gbfg'])
whitelist = {}
await self.bot.react(ctx, 'time')
async for message in ubhl_c.history(limit=10000):
if message.author.id in whitelist:
continue
else:
whitelist[str(message.author)] = 0
i = 0
for member in gbfg_g.members:
for r in member.roles:
if r.name == 'UBaha HL':
if str(member) in whitelist:
pass
else:
await member.remove_roles(r)
i += 1
break
await self.bot.unreact(ctx, 'time')
await ctx.send(embed=self.bot.buildEmbed(title="*ubaha-hl* purge results", description="{} inactive user(s)".format(i), color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def purgeLucilius(self, ctx):
"""Remove inactive users from the /gbfg/ lucilius-hard channel (Owner only)"""
luci_c = self.bot.get_channel(bot.lucilius['main'])
gbfg_g = self.bot.get_guild(self.bot.ids['gbfg'])
whitelist = {}
await self.bot.react(ctx, 'time')
async for message in luci_c.history(limit=10000):
if message.author.id in whitelist:
continue
else:
whitelist[str(message.author)] = 0
i = 0
for member in gbfg_g.members:
for r in member.roles:
if r.name == 'Lucilius HL':
if str(member) in whitelist:
pass
else:
await member.remove_roles(r)
i += 1
break
await self.bot.unreact(ctx, 'time')
await ctx.send(embed=self.bot.buildEmbed(title="*ubaha-hl* purge results", description="{} inactive user(s)".format(i), color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def gbfg_inactive(self, ctx):
"""Remove inactive users from the /gbfg/ server (Owner only)"""
g = self.bot.get_guild(self.bot.ids['gbfg'])
t = datetime.utcnow() - timedelta(days=30)
whitelist = {}
await self.bot.react(ctx, 'time')
for c in g.channels:
try:
async for message in c.history(limit=50000):
if message.created_at < t:
break
whitelist[message.author.id] = 0
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="/gbfg/ purge starting", description="Kicking {} inactive user(s)".format(len(g.members) - len(whitelist)), color=self.color))
i = 0
for member in g.members:
try:
if member.id not in whitelist:
await member.kick()
i += 1
except:
pass
await self.bot.unreact(ctx, 'time')
await ctx.send(embed=self.bot.buildEmbed(title="/gbfg/ purge results", description="{} inactive user(s) successfully kicked".format(i), color=self.color))
@commands.command(no_pm=True)
@isOwner()
async def gbfg_emote(self, ctx):
"""Check emote usage in the /gbfg/ server (Owner only)"""
await self.bot.react(ctx, 'time')
g = self.bot.get_guild(self.bot.ids['gbfg'])
u = self.bot.get_user(156948874630660096) # snak
emotecount = {}
for e in g.emojis:
emotecount[str(e)] = 0
regex = re.compile("^<:\w*:\d+>$|^:\w*:$")
for c in g.channels:
try:
async for message in c.history(limit=50000):
try:
res = regex.findall(message.content)
for r in res:
if r in emotecount:
emotecount[r] += 1
except:
pass
except:
pass
msg = ""
for e in emotecount:
msg += "{} ▫️ {} use(s)\n".format(e, emotecount[e])
if len(msg) > 1800:
await u.send(msg)
msg = ""
await self.bot.unreact(ctx, 'time')
if len(msg) > 0:
await u.send(msg)
await ctx.send('DONE')
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isOwner()
async def getfile(self, ctx, *, filename: str):
"""Retrieve a bot file (Owner only)"""
try:
with open(filename, 'rb') as infile:
await self.bot.send('debug', file=discord.File(infile))
await ctx.message.add_reaction('✅') # white check mark
except Exception as e:
await self.bot.sendError('getfile', str(e))
@commands.command(no_pm=True)
@isOwner()
async def punish(self, ctx):
"""Punish the bot"""
await ctx.send("Please, Master, make it hurt.")
@commands.command(no_pm=True)
@isOwner()
async def snackcount(self, ctx):
"""Count Snacks mono emote posts"""
await self.bot.react(ctx, 'time')
sc = 0
mc = 0
regex = re.compile("^<a?:\w*:\d+>$|^:\w*:$")
c = ctx.channel
try:
async for message in c.history(limit=50000):
try:
if message.author.id == self.bot.ids['snacks']:
sc += 1
if regex.search(message.content):
mc += 1
except:
pass
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="Results", description="{} message(s) from Snacks in the last 50000 messages of this channel.\n{} are mono-emotes ({:.2f}%).".format(sc, mc, mc/sc*100), color=self.color)) | StarcoderdataPython |
3201820 | <reponame>serhankk/Kac-Sayfa
"""
Girilen yazar ve kitap adına göre
kitabın kaç sayfa olduğunu döndürür.
"""
import requests
import sys
from bs4 import BeautifulSoup
headers_param = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"}
print("Kaç-Sayfa".center(50, '-'))
print("""Hoşgeldiniz. Lütfen kaç sayfa olduğunu
öğrenmek istediğiniz kitabın adını ve daha tutarlı
bilgi gelmesi için yazar adını da giriniz.
Ardından program sizin için arama yapacaktır.
Programdan çıkmak için 0 yazmanız yeterlidir.""")
print("-" * 50)
while True:
USER_INPUT = str(input("Yazar ve Kitap Adı: ")).replace(" ", "+")
if USER_INPUT == "0":
print("Hoşçakalın!")
sys.exit("-" * 50)
SEARCH = "https://www.google.com/search?q=" + USER_INPUT + "+kac+sayfa"
print("Sorgu\t\t : " + SEARCH)
response = requests.get(SEARCH, headers=headers_param)
soup = BeautifulSoup(response.content, "lxml", from_encoding="utf-8")
BOOK_INFORMATION_FLAG = False
PAPER_FLAG = False
try:
book_name = soup.find("div", attrs={"class": "iKJnec"}).text
KITAP_FLAG = True
print("Kitap Bilgisi\t : " + book_name)
except AttributeError:
print("Kitap Bilgisi\t :", None)
book_information = soup.find_all("div", attrs={"class": "Crs1tb"})
for i, _ in enumerate(book_information):
tds = book_information[i].find_all("td")
for j, _ in enumerate(tds):
if tds[j].text in ("Sayfa Sayısı:", "Sayfa", "Sayfa Sayısı"):
book_paper = tds[j + 1].text
PAPER_FLAG = True
print("Sayfa Sayısı\t : " + book_paper)
if BOOK_INFORMATION_FLAG is False and PAPER_FLAG is False:
print('Kitap bilgisi bulunamadı..')
print("-" * 50)
| StarcoderdataPython |
3359765 | <reponame>brefra/python-plugwise
"""
Use of this source code is governed by the MIT license found in the LICENSE file.
Base for Plugwise messages
"""
from plugwise.constants import (
MESSAGE_FOOTER,
MESSAGE_HEADER,
UTF8_DECODE,
)
from plugwise.util import crc_fun
class ParserError(Exception):
"""
Error when invalid message is received
"""
pass
class PlugwiseMessage(object):
def serialize(self):
"""
return message in a serialized format that can be sent out on wire
return: bytes
"""
args = b"".join(a.serialize() for a in self.args)
msg = self.ID
if self.mac != "":
msg += self.mac
msg += args
checksum = self.calculate_checksum(msg)
return MESSAGE_HEADER + msg + checksum + MESSAGE_FOOTER
def calculate_checksum(self, s):
return bytes("%04X" % crc_fun(s), UTF8_DECODE)
| StarcoderdataPython |
3286585 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import tensorflow as tf
from cleverhans.devtools.checks import CleverHansTest
from runner import RunnerMultiGPU
class TestRunnerMultiGPU(CleverHansTest):
def setUp(self):
super(TestRunnerMultiGPU, self).setUp()
self.sess = tf.Session()
inputs = []
outputs = []
self.niter = 10
niter = self.niter
# A Simple graph with `niter` sub-graphs.
with tf.variable_scope(None, 'runner'):
for i in range(niter):
v = tf.get_variable('v%d' % i, shape=(100, 10))
w = tf.get_variable('w%d' % i, shape=(100, 1))
inputs += [{'v': v, 'w': w}]
outputs += [{'v': v, 'w': w}]
self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess)
def help_test_runner(self, ninputs, niter):
"""
Tests the MultiGPU runner by feeding in random Tensors for `ninputs`
steps. Then validating the output after `niter-1` steps.
"""
v_val = []
w_val = []
for i in range(ninputs):
v_val += [np.random.rand(100, 10)]
w_val += [np.random.rand(100, 1)]
fvals = self.runner.run({'v': v_val[i], 'w': w_val[i]})
self.assertTrue(len(fvals) == 0)
self.assertFalse(self.runner.is_finished())
for i in range(niter-ninputs-1):
self.assertFalse(self.runner.is_finished())
fvals = self.runner.run()
self.assertTrue(len(fvals) == 0)
self.assertFalse(self.runner.is_finished())
for i in range(ninputs):
self.assertFalse(self.runner.is_finished())
fvals = self.runner.run()
self.assertTrue('v' in fvals and 'w' in fvals)
self.assertTrue(np.allclose(fvals['v'], v_val[i]))
self.assertTrue(np.allclose(fvals['w'], w_val[i]))
self.assertTrue(self.runner.is_finished())
def test_queue_full(self):
self.help_test_runner(self.niter-1, self.niter)
def test_queue_half(self):
self.help_test_runner(self.niter//2, self.niter)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1768864 | <gh_stars>0
from sklearn.datasets import load_iris
from sklearn.tree import tree
from sklearn_porter import Porter
# Load data and train the classifier:
iris_data = load_iris()
X, y = iris_data.data, iris_data.target
clf = tree.DecisionTreeClassifier()
clf.fit(X, y)
# Export:
porter = Porter(clf, language='java')
output = porter.export()
with open(file="import.java", mode='w') as f:
f.write(output) | StarcoderdataPython |
3324581 | from .api import MojangAPI
from .user import MojangUser
| StarcoderdataPython |
3264952 | from pyspark.sql import SparkSession
class Extract:
"""
Functions to extract data from the source and return the spark dataframe.
"""
def __init__(self, spark, paths):
self.spark = spark
self.paths = paths
def _get_standard_csv(self, filepath, delimiter=","):
"""
Function to extract a dataframe from csv file.
Input: .csv file path
Option: delimiter
Output: spark dataframe
"""
return self.spark.read.format("csv").option("header", "true").option("delimiter", delimiter).load(filepath)
def get_cities_demographics(self):
return self._get_standard_csv(filepath=self.paths["us_cities_demographics"], delimiter=";")
def get_airports_codes(self):
return self._get_standard_csv(self.paths["airport_codes"])
def get_immigration(self):
return self.spark.read.parquet(self.paths["sas_data"])
def get_temperature_data(self):
return self._get_standard_csv(self.paths["temperature_data"]) | StarcoderdataPython |
3386958 | <reponame>jugoodma/reu-2018
import sys
import boto3
import re
from xml.dom.minidom import parseString
import csv
from settings import *
mturk_environment = environments[mturk_type]
# users can pass in a profile as system argument
profile_name = sys.argv[1] if len(sys.argv) >= 2 else None
session = boto3.Session(profile_name = profile_name)
client = session.client(service_name = 'mturk', region_name = 'us-east-1', endpoint_url = mturk_environment['endpoint'],)
# print account balance
print("Your account balance is {}".format(client.get_account_balance()['AvailableBalance']))
yes = input("THIS WILL DELETE ALL ACTIVE HITS, ARE YOU SURE YOU WANT TO DO THIS?? (yes) ")
if yes == 'yes':
hit_list = client.list_hits(MaxResults = 100)
num_results = hit_list['NumResults']
while num_results > 0:
for hit in hit_list['HITs']:
try:
print("Deleting " + hit['HITId'])
client.update_expiration_for_hit(HITId = hit['HITId'], ExpireAt = 0)
client.delete_hit(HITId = hit['HITId'])
except:
pass
hit_list = client.list_hits(MaxResults = 100)
num_results = hit_list['NumResults']
print("Done.")
| StarcoderdataPython |
3222997 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import viewsets, filters
from edw.models.customer import CustomerModel
from edw.rest.serializers.customer import CustomerSerializer
from edw.rest.viewsets import remove_empty_params_from_request
class CustomerViewSet(viewsets.ReadOnlyModelViewSet):
"""
A simple ViewSet for listing or retrieving customer.
"""
queryset = CustomerModel.objects.all()
serializer_class = CustomerSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('first_name', 'last_name')
ordering_fields = ('first_name', )
@remove_empty_params_from_request()
def initialize_request(self, *args, **kwargs):
return super(CustomerViewSet, self).initialize_request(*args, **kwargs)
| StarcoderdataPython |
1778291 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A CloudSQL ACL Resource."""
class CloudSqlAccessControl(object):
"""CloudSQL ACL Resource."""
def __init__(self, instance_name, authorized_networks, ssl_enabled,
project_number=None):
"""Initialize.
Args:
instance_name (str): CloudSQL instance name
authorized_networks (str): Authorized networks for CloudSQL
instance
ssl_enabled (str): SSL enabled
project_number (int): the project number
"""
self.instance_name = instance_name
self.authorized_networks = authorized_networks
self.ssl_enabled = (ssl_enabled == 'True')
self.project_number = project_number
def __hash__(self):
"""Return hash of properties.
Returns:
hash: The hash of the class properties.
"""
return hash((self.instance_name, self.authorized_networks,
self.ssl_enabled, self.project_number))
| StarcoderdataPython |
3232514 | <filename>pytermcanvas/__init__.py
from .canvas import *
| StarcoderdataPython |
22834 | <filename>tests/unit/test_databeardb.py<gh_stars>1-10
'''
A unit test for databearDB.py
Runs manually at this point...
'''
import unittest
from databear.databearDB import DataBearDB
#Tests
class testDataBearDB(unittest.TestCase):
def setUp(self):
'''
Hmm
'''
pass
| StarcoderdataPython |
27952 | #!/usr/bin/env python
'''
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import os,sys,datetime,multiprocessing
from os.path import abspath,dirname,realpath,join
import log,traceback
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
elif 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check(args, argv):
log.logger.debug('started')
try:
log.logger.debug('command line:\n'+ ' '.join(argv))
# check python version
version=sys.version_info
if (version[0] >= 3) and (version[1] >= 7):
log.logger.debug('Python version=%d.%d.%d' % (version[0], version[1], version[2]))
else:
log.logger.error('Please use Python 3.7 or later. Your Python is version %d.%d.' % (version[0], version[1]))
exit(1)
# check PATH
for i in ['blastn', 'bedtools']:
if which(i) is None:
log.logger.error('%s not found in $PATH. Please check %s is installed and added to PATH.' % (i, i))
exit(1)
# check files
if args.c is not None:
if os.path.exists(args.c) is False:
log.logger.error('CRAM file (%s) was not found.' % args.c)
exit(1)
elif args.b is not None:
if os.path.exists(args.b) is False:
log.logger.error('BAM file (%s) was not found.' % args.b)
exit(1)
else:
log.logger.error('Please specify BAM or CRAM file (-b or -c option).')
exit(1)
if args.c is not None:
if args.fa is None:
log.logger.error('Reference genome (%s) was not specified.' % args.fa)
exit(1)
elif os.path.exists(args.fa) is False:
log.logger.error('Reference genome (%s) was not found.' % args.fa)
exit(1)
# check prerequisite modules
from Bio.Seq import Seq
import gzip
from pybedtools import BedTool
import matplotlib
import pysam
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
| StarcoderdataPython |
1678903 | <filename>src/datafinder/core/configuration/datastores/datastore.py
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements different concrete data store configurations.
"""
import base64
from copy import copy, deepcopy
import sys
from datafinder.core.configuration.gen import datastores
__version__ = "$Revision-Id:$"
_WIN32_PLATFORM = "win32"
_PATH_SEPARATOR = ";"
class DefaultDataStore(object):
""" Represents the default configuration for data stores. """
_xmlBindingClass = datastores.default
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
self._dataLocationUri = None
self._parameters = dict()
if persistedStore is None:
self._store = self._xmlBindingClass(name, storeType, iconName, url, isDefault, owner)
else:
self._store = persistedStore
def toPersistenceRepresentation(self):
""" Returns the persistence representation. """
self._store.__dict__.update(self.__dict__)
return self._store
def __getattr__(self, name):
""" Automatically redirects property calls to the generated class. """
return getattr(self._store, name)
def __deepcopy__(self, _):
""" The deep copy implementation. """
store = self.toPersistenceRepresentation()
store = deepcopy(store)
return self.__class__(persistedStore=store)
def __copy__(self):
""" The copy implementation. """
store = self.toPersistenceRepresentation()
store = copy(store)
return self.__class__(persistedStore=store)
def __cmp__(self, other):
""" Implements comparison. """
try:
return cmp(self.name, other.name)
except AttributeError:
return 1
def __hash__(self):
return hash(self.name)
@property
def dataLocationUri(self):
"""
Returns the data location URI.
This is a convenience interface to simplify usage.
"""
return self._dataLocationUri
@property
def parameters(self):
"""
Returns additional parameters required for storage access.
This is a convenience interface to simplify usage.
"""
return self._parameters
@property
def isMigrated(self):
"""
Returns flag indicating whether the data store is migrated or not.
This is a convenience interface to simplify usage.
"""
try:
return len(self.isMigratedTo) > 0
except AttributeError:
return False
def _getStorageRealisation(self):
"""
Returns storage realization type.
This is a convenience interface to simplify usage.
"""
try:
return self._store.storageRealisation
except AttributeError:
return None
def _setStorageRealisation(self, value):
"""
Returns storage realization type.
This is a convenience interface to simplify usage.
"""
self._store.storageRealisation = value
storageRealisation = property(_getStorageRealisation, _setStorageRealisation)
class FileDataStore(DefaultDataStore):
""" Restricts properties of a File data store configuration. """
_xmlBindingClass = datastores.file
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._password = self._store.password
self._dataLocationUri = self._determineDataLocationUri()
self._parameters = self._parameters = {"username": self.username, "password": <PASSWORD>}
def _determineDataLocationUri(self):
""" Determines the correct data location. """
if _PATH_SEPARATOR in self.dataLocation:
windowsLocation, unixLocation = self.dataLocation.split(_PATH_SEPARATOR)
if sys.platform == _WIN32_PLATFORM:
return windowsLocation
else:
return unixLocation
else:
return self.dataLocation
def __getPassword(self):
""" Getter for the password. """
if self._password is None:
return None
else:
return base64.decodestring(self._password)
def __setPassword(self, value):
""" Setter for the password. """
if value is None:
self._password = None
else:
try:
self._password = base64.encodestring(value)
except Exception:
raise ValueError("Irregular password has been provided.")
password = property(__getPassword, __setPassword)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.password = self._password
return store
class FtpDataStore(DefaultDataStore):
""" Restricts properties of a FTP data store configuration. """
_xmlBindingClass = datastores.ftp
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._password = self._store.password
self._dataLocationUri = self.dataLocation
self._parameters = self._parameters = {"username": self.username, "password": self.password}
def __getPassword(self):
""" Getter for the password. """
if self._password is None:
return None
else:
return base64.decodestring(self._password)
def __setPassword(self, value):
""" Setter for the password. """
if value is None:
self._password = None
else:
try:
self._password = base64.encodestring(value)
except Exception:
raise ValueError("Irregular password has been provided.")
password = property(__getPassword, __setPassword)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.password = self._password
return store
class OfflineDataStore(DefaultDataStore):
""" Restricts properties of an Offline data store configuration. """
_xmlBindingClass = datastores.offlinemedia
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
class GridFtpDataStore(DefaultDataStore):
""" Restricts properties of a GridFTP data store configuration. """
_xmlBindingClass = datastores.gridftp
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._tcpBufferSize = self._store.tcpBufferSize
self._parallelConnections = self._store.parallelConnections
def __setTcpbuffersize(self, tcpBufferSize):
""" Setter for TCP buffer size. """
try:
intValue = int(tcpBufferSize)
except (TypeError, ValueError):
raise ValueError("TCP buffer size has to be an integer value > 0.")
if intValue <= 0:
raise ValueError("TCP buffer size has to be an integer value > 0.")
self._tcpBufferSize = intValue
def __getTcpbuffersize(self):
""" Getter for TCP buffer size. """
return self._tcpBufferSize
tcpBufferSize = property(__getTcpbuffersize, __setTcpbuffersize)
def __setParallelconnections(self, parallelConnections):
""" Setter for parallel connections. """
try:
intValue = int(parallelConnections)
except (TypeError, ValueError):
raise ValueError("Parallel connections property has to be an integer value >= 0.")
if intValue < 0:
raise ValueError("Parallel connections property has to be an integer value >= 0.")
self._parallelConnections = intValue
def __getParallelconnections(self):
""" Getter for parallel connections. """
return self._parallelConnections
parallelConnections = property(__getParallelconnections, __setParallelconnections)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.tcpBufferSize = self._tcpBufferSize
store.parallelConnections = self._parallelConnections
return store
class TsmDataStore(DefaultDataStore):
""" Restricts properties of a File data store configuration. """
_xmlBindingClass = datastores.tsm
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._password = self._store.password
self._dataLocationUri = "tsm://" + self.clientHostName
if self.archiveRootDirectory.startswith("/"):
self._dataLocationUri += self.archiveRootDirectory
else:
self._dataLocationUri += "/" + self.archiveRootDirectory
self._parameters = {"username": self.username, "password": <PASSWORD>, "serverNodeName": self.serverNodeName}
def __getPassword(self):
""" Getter for the password. """
if self._password is None:
return None
else:
return base64.decodestring(self._password)
def __setPassword(self, value):
""" Setter for the password. """
if value is None:
self._password = None
else:
try:
self._password = base64.encodestring(value)
except Exception:
raise ValueError("Irregular password has been provided.")
password = property(__getPassword, __setPassword)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.password = self._password
return store
class WebdavDataStore(DefaultDataStore):
""" Restricts properties of a WebDAV data store configuration. """
_xmlBindingClass = datastores.webdav
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._password = <PASSWORD>
self._dataLocationUri = self.dataLocation
self._parameters = {"username": self.username, "password": <PASSWORD>}
def __getPassword(self):
""" Getter for the password. """
if self._password is None:
return None
else:
return base64.decodestring(self._password)
def __setPassword(self, value):
""" Setter for the password. """
if value is None:
self._password = None
else:
try:
self._password = base64.encodestring(value)
except Exception:
raise ValueError("Irregular password has been provided.")
password = property(__getPassword, __setPassword)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.password = self._password
return store
class SubversionDataStore(WebdavDataStore):
""" Restricts properties of a Subversion data store configuration. """
_xmlBindingClass = datastores.svn
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
WebdavDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
class S3DataStore(DefaultDataStore):
""" Restricts properties of a S3 data store configuration """
_xmlBindingClass = datastores.s3
def __init__(self, name=None, storeType=None, iconName="dataStore", url=None, isDefault=False, owner=None, persistedStore=None):
""" Constructor. """
DefaultDataStore.__init__(self, name, storeType, iconName, url, isDefault, owner, persistedStore)
self._password = <PASSWORD>
self._dataLocationUri = "S3:" + self.dataLocation # datalocation = bucketname
self._parameters = {"username": self.username, "password": <PASSWORD>}
def __getPassword(self):
""" Getter for the password. """
if self._password is None:
return None
else:
return base64.decodestring(self._password)
def __setPassword(self, value):
""" Setter for the password. """
if value is None:
self._password = None
else:
try:
self._password = base64.encodestring(value)
except Exception:
raise ValueError("Irregular password has been provided.")
password = property(__getPassword, __setPassword)
def toPersistenceRepresentation(self):
""" Overwrites default implementation. """
store = DefaultDataStore.toPersistenceRepresentation(self)
store.password = self._password
return store
| StarcoderdataPython |
1725824 | #!/usr/bin/env python3
#
# Copyright 2018 Red Hat, Inc.
#
# Authors:
# <NAME> <<EMAIL>>
#
# This work is licensed under the MIT License. Please see the LICENSE file or
# http://opensource.org/licenses/MIT.
from collections import OrderedDict
from django.contrib.auth.models import User
from django.http import Http404
from django.template import loader
from mod import dispatch_module_hook
from .models import Project, ProjectResult, Message, MessageResult, Result
from .search import SearchEngine
from rest_framework import (permissions, serializers, viewsets, filters,
mixins, generics, renderers, status)
from rest_framework.decorators import detail_route, action
from rest_framework.fields import SerializerMethodField, CharField, JSONField, EmailField
from rest_framework.relations import HyperlinkedIdentityField
from rest_framework.response import Response
import rest_framework
from mbox import addr_db_to_rest, MboxMessage
from rest_framework.parsers import JSONParser, BaseParser
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import ValidationError
import re
import mod
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
SEARCH_PARAM = 'q'
# patchew-specific permission classes
class PatchewPermission(permissions.BasePermission):
"""
Generic code to lookup for permissions based on message and project
objects. If the view has a "project" property, it should return an
api.models.Project, and has_permission will check that property too.
Subclasses can override the methods, or specify a set of groups that
are granted authorization independent of object permissions.
"""
allowed_groups = ()
def is_superuser(self, request):
return request.user and request.user.is_superuser
def has_project_permission(self, request, view, obj):
return obj.maintained_by(request.user)
def has_message_permission(self, request, view, obj):
return obj.project.maintained_by(request.user)
def has_group_permission(self, request, view):
for grp in request.user.groups.all():
if grp.name in self.allowed_groups:
return True
return False
def has_generic_permission(self, request, view):
return (request.method in permissions.SAFE_METHODS) or \
self.is_superuser(request) or \
self.has_group_permission(request, view) or \
self.has_result_group_permission(request, view)
def has_permission(self, request, view):
return self.has_generic_permission(request, view) or \
(hasattr(view, 'project') and view.project and \
self.has_project_permission(request, view, view.project))
def has_object_permission(self, request, view, obj):
return self.has_generic_permission(request, view) or \
(isinstance(obj, Message) and \
self.has_message_permission(request, view, obj)) or \
(isinstance(obj, Project) and \
self.has_project_permission(request, view, obj)) or \
(isinstance(obj, Result) and \
self.has_result_permission(request, view, obj))
def has_result_group_permission(self, request, view):
name = request.resolver_match.kwargs.get('name')
if name:
found = re.match("^[^.]*", name)
module = mod.get_module(found.group(0)) if found else None
for grp in request.user.groups.all():
if grp.name in module.allowed_groups:
return True
return False
def has_result_permission(self, request, view, result_obj):
return self.has_object_permission(request, view, result_obj.project)
class ImportPermission(PatchewPermission):
allowed_groups = ('importers',)
# pluggable field for plugin support
class PluginMethodField(SerializerMethodField):
"""
A read-only field that get its representation from calling a method on
the plugin class. The method called will be of the form
"get_{field_name}", and should take a single argument, which is the
object being serialized.
For example:
fields['extra_info'] = api.rest.PluginMethodField(obj=self)
def get_extra_info(self, obj):
return ... # Calculate some data to return.
"""
def __init__(self, obj=None, method_name=None, **kwargs):
self.obj = obj
super(PluginMethodField, self).__init__(method_name=method_name, **kwargs)
def to_representation(self, value):
method = getattr(self.obj, self.method_name)
request = self.context['request']
format = self.context.get('format', None)
return method(value, request, format)
# Users
# TODO: include list of projects maintained by the user, login
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('resource_uri', 'username')
class UsersViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('id')
serializer_class = UserSerializer
permission_classes = (PatchewPermission,)
# Projects
# TODO: include list of maintainers, connect plugins
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Project
fields = ('resource_uri', 'name', 'mailing_list', 'prefix_tags', 'url', 'git', \
'description', 'display_order', 'logo', 'parent_project', 'messages',
'results', 'series', 'properties')
messages = HyperlinkedIdentityField(view_name='messages-list', lookup_field='pk',
lookup_url_kwarg='projects_pk')
results = HyperlinkedIdentityField(view_name='results-list', lookup_field='pk',
lookup_url_kwarg='projects_pk')
series = HyperlinkedIdentityField(view_name='series-list', lookup_field='pk',
lookup_url_kwarg='projects_pk')
class ProjectsViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all().order_by('id')
serializer_class = ProjectSerializer
permission_classes = (PatchewPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, )
def get_queryset(self):
name = self.request.query_params.get('name', None)
if name is not None:
return Project.objects.filter(name=name).order_by('id')
return Project.objects.all().order_by('id')
@action(methods=['post'], detail=True, permission_classes=[ImportPermission])
def update_project_head(self, request, pk=None):
"""
updates the project head and message_id which are matched are merged.
Data input format:
{
"old_head": "..",
"new_head": "..",
"message_ids": []
}
"""
project = self.get_object()
head = project.project_head
old_head = request.data['old_head']
message_ids = request.data['message_ids']
if head and head != old_head:
return Response('Wrong old head', status_code=status.HTTP_409_CONFLICT)
ret = project.series_update(message_ids)
project.project_head = request.data['new_head']
return Response({"new_head": project.project_head, "count": ret})
# Common classes for series and messages
class HyperlinkedMessageField(HyperlinkedIdentityField):
lookup_field = 'message_id'
def get_url(self, obj, view_name, request, format):
kwargs = {'projects_pk': obj.project_id, self.lookup_field: obj.message_id}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class AddressSerializer(serializers.Serializer):
name = CharField(required=False)
address = EmailField()
def to_representation(self, obj):
return addr_db_to_rest(obj)
def create(self, validated_data):
try:
return [validated_data['name'], validated_data['address']]
except:
return [validated_data['address'], validated_data['address']]
class BaseMessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('resource_uri', 'message_id', 'subject', 'date', 'sender', 'recipients')
resource_uri = HyperlinkedMessageField(view_name='messages-detail')
recipients = AddressSerializer(many=True)
sender = AddressSerializer()
def create(self, validated_data):
validated_data['recipients'] = self.fields['recipients'].create(validated_data['recipients'])
validated_data['sender'] = self.fields['sender'].create(validated_data['sender'])
if 'project' in validated_data:
project = validated_data.pop('project')
return Message.objects.create(project=project, **validated_data)
return Message.objects.create(project=self.context['project'], **validated_data)
# a message_id is *not* unique, so we can only list
class BaseMessageViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = BaseMessageSerializer
queryset = Message.objects.all()
permission_classes = (ImportPermission,)
lookup_field = 'message_id'
lookup_value_regex = '[^/]+'
# a (project, message_id) tuple is unique, so we can always retrieve an object
class ProjectMessagesViewSetMixin(mixins.RetrieveModelMixin):
def get_queryset(self):
return self.queryset.filter(project=self.kwargs['projects_pk'])
@property
def project(self):
if hasattr(self, '__project'):
return self.__project
try:
self.__project = Project.objects.get(id=self.kwargs['projects_pk'])
except:
self.__project = None
return self.__project
def get_serializer_context(self):
if self.project is None:
return Http404
return {'project': self.project, 'request': self.request}
# Series
class ReplySerializer(BaseMessageSerializer):
class Meta:
model = Message
fields = BaseMessageSerializer.Meta.fields + ('in_reply_to', )
class PatchSerializer(BaseMessageSerializer):
class Meta:
model = Message
fields = BaseMessageSerializer.Meta.fields + ('stripped_subject',
'last_comment_date', 'patch_num')
class SeriesSerializer(BaseMessageSerializer):
class Meta:
model = Message
fields = ('resource_uri',) + BaseMessageSerializer.Meta.fields + (
'message', 'stripped_subject', 'last_comment_date', 'last_reply_date',
'is_complete', 'is_merged', 'num_patches', 'total_patches', 'results')
resource_uri = HyperlinkedMessageField(view_name='series-detail')
message = HyperlinkedMessageField(view_name='messages-detail')
results = HyperlinkedMessageField(view_name='results-list', lookup_field='series_message_id')
total_patches = SerializerMethodField()
def __init__(self, *args, **kwargs):
self.detailed = kwargs.pop('detailed', False)
super(SeriesSerializer, self).__init__(*args, **kwargs)
def get_fields(self):
fields = super(SeriesSerializer, self).get_fields()
request = self.context['request']
dispatch_module_hook("rest_series_fields_hook", request=request,
fields=fields, detailed=self.detailed)
return fields
def get_total_patches(self, obj):
return obj.get_total_patches()
class SeriesSerializerFull(SeriesSerializer):
class Meta:
model = Message
fields = SeriesSerializer.Meta.fields + ('patches', 'replies')
patches = PatchSerializer(many=True)
replies = ReplySerializer(many=True)
def __init__(self, *args, **kwargs):
if not 'detailed' in kwargs:
kwargs['detailed'] = True
super(SeriesSerializerFull, self).__init__(*args, **kwargs)
class PatchewSearchFilter(filters.BaseFilterBackend):
search_param = SEARCH_PARAM
search_title = 'Search'
search_description = 'A search term.'
template = 'rest_framework/filters/search.html'
def filter_queryset(self, request, queryset, view):
search = request.query_params.get(self.search_param) or ''
terms = [x.strip() for x in search.split(" ") if x]
se = SearchEngine()
query = se.search_series(queryset=queryset, *terms)
return query
def to_html(self, request, queryset, view):
if not getattr(view, 'search_fields', None):
return ''
term = request.query_params.get(self.search_param) or ''
context = {
'param': self.search_param,
'term': term
}
template = loader.get_template(self.template)
return template.render(context)
class SeriesViewSet(BaseMessageViewSet):
serializer_class = SeriesSerializer
queryset = Message.objects.filter(is_series_head=True).order_by('-last_reply_date')
filter_backends = (PatchewSearchFilter,)
search_fields = (SEARCH_PARAM,)
class ProjectSeriesViewSet(ProjectMessagesViewSetMixin,
SeriesViewSet, mixins.DestroyModelMixin):
authentication_classes = (CsrfExemptSessionAuthentication, )
def collect_patches(self, series):
if series.is_patch:
patches = [series]
else:
patches = Message.objects.filter(in_reply_to=series.message_id,
project=self.kwargs['projects_pk'],
is_patch=True).order_by('patch_num')
return patches
def collect_replies(self, parent, result):
replies = Message.objects.filter(in_reply_to=parent.message_id,
project=self.kwargs['projects_pk'],
is_patch=False).order_by('date')
for m in replies:
result.append(m)
for m in replies:
self.collect_replies(m, result)
return result
def get_serializer_class(self, *args, **kwargs):
if self.lookup_field in self.kwargs:
return SeriesSerializerFull
return SeriesSerializer
def get_object(self):
series = super(ProjectSeriesViewSet, self).get_object()
series.patches = self.collect_patches(series)
series.replies = self.collect_replies(series, [])
if not series.is_patch:
for i in series.patches:
self.collect_replies(i, series.replies)
return series
def perform_destroy(self, instance):
Message.objects.delete_subthread(instance)
# Messages
# TODO: add POST endpoint connected to email plugin?
class MessageSerializer(BaseMessageSerializer):
class Meta:
model = Message
fields = BaseMessageSerializer.Meta.fields + ('mbox', )
mbox = CharField()
def get_fields(self):
fields = super(MessageSerializer, self).get_fields()
try:
# When called from the CoreAPI schema generator, there is no context defined?
request = self.context['request']
except TypeError:
request = None
dispatch_module_hook("rest_message_fields_hook", request=request,
fields=fields)
return fields
class StaticTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'mbox'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context.get('response')
if response and response.exception:
return '%d %s' % (response.status_code, response.status_text.title())
else:
return data
class MessagePlainTextParser(BaseParser):
media_type = 'message/rfc822'
def parse(self, stream, media_type=None, parser_context=None):
data = stream.read().decode("utf-8")
return MboxMessage(data).get_json()
class ProjectMessagesViewSet(ProjectMessagesViewSetMixin,
BaseMessageViewSet, mixins.CreateModelMixin):
serializer_class = MessageSerializer
parser_classes = (JSONParser, MessagePlainTextParser, )
@detail_route(renderer_classes=[StaticTextRenderer])
def mbox(self, request, *args, **kwargs):
message = self.get_object()
return Response(message.get_mbox())
@detail_route()
def replies(self, request, *args, **kwargs):
message = self.get_object()
replies = Message.objects.filter(in_reply_to=message.message_id,
project=self.kwargs['projects_pk']).order_by('date')
page = self.paginate_queryset(replies)
serializer = BaseMessageSerializer(page, many=True,
context=self.get_serializer_context())
return self.get_paginated_response(serializer.data)
class MessagesViewSet(BaseMessageViewSet):
serializer_class = MessageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
parser_classes = (JSONParser, MessagePlainTextParser, )
authentication_classes = (CsrfExemptSessionAuthentication, )
def create(self, request, *args, **kwargs):
m = MboxMessage(request.data['mbox'])
projects = [p for p in Project.objects.all() if p.recognizes(m)]
grps = request.user.groups.all()
grps_name = [grp.name for grp in grps]
if 'importers' not in grps_name:
projects = (p for p in projects if p.maintained_by(self.request.user))
results = []
for project in projects:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(project=project)
results.append(serializer.data)
# Fake paginator response. Note that there is no Location header.
return Response(OrderedDict([('count', len(results)),
('results', results)]),
status=status.HTTP_201_CREATED)
# Results
class HyperlinkedResultField(HyperlinkedIdentityField):
def get_url(self, result, view_name, request, format):
obj = result.obj
kwargs = {'name': result.name}
if isinstance(obj, Message):
kwargs['projects_pk'] = obj.project_id
kwargs['series_message_id'] = obj.message_id
else:
kwargs['projects_pk'] = obj.id
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class ResultSerializer(serializers.ModelSerializer):
class Meta:
model = Result
fields = ('resource_uri', 'name', 'status', 'last_update', 'data', 'log_url')
resource_uri = HyperlinkedResultField(view_name='results-detail')
log_url = SerializerMethodField(required=False)
data = JSONField(required=False)
def get_log_url(self, obj):
request = self.context['request']
return obj.get_log_url(request)
def validate(self, data):
found = re.match("^[^.]*", self.instance.name)
module = mod.get_module(found.group(0)) if found else None
is_valid = module.result_data_serializer_class(data=data['data']).is_valid(raise_exception=True)
if found is None and not is_valid:
raise ValidationError("Invalid")
return data
class ResultSerializerFull(ResultSerializer):
class Meta:
model = Result
fields = ResultSerializer.Meta.fields + ('log',)
# The database field is log_xz, so this is needed here
log = CharField(required=False)
class ResultsViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, viewsets.GenericViewSet):
lookup_field = 'name'
lookup_value_regex = '[^/]+'
filter_backends = (filters.OrderingFilter,)
ordering_fields = ('name',)
ordering = ('name',)
authentication_classes = (CsrfExemptSessionAuthentication, )
permission_classes = (PatchewPermission, )
def get_serializer_class(self, *args, **kwargs):
if self.lookup_field in self.kwargs:
return ResultSerializerFull
return ResultSerializer
@property
def project(self):
if hasattr(self, '__project'):
return self.__project
try:
self.__project = Project.objects.get(id=self.kwargs['projects_pk'])
except:
self.__project = None
return self.__project
class ProjectResultsViewSet(ResultsViewSet):
def get_queryset(self):
return ProjectResult.objects.filter(project=self.kwargs['projects_pk'])
class SeriesResultsViewSet(ResultsViewSet):
def get_queryset(self):
return MessageResult.objects.filter(message__project=self.kwargs['projects_pk'],
message__message_id=self.kwargs['series_message_id'])
| StarcoderdataPython |
1616920 | <filename>part-2/5-context_managers/project-goal1.py<gh_stars>0
"""
Create a context manager that:
- input: file_name
_ outpur: a lazy iterator that iterate over the data file and yield the named tuple
"""
import csv
from collections import namedtuple
def get_dialect(fname):
with open(fname, 'r') as f:
sample = f.read(10)
dialect = csv.Sniffer().sniff(sample)
return dialect
class DataIterator:
def __init__(self, fname):
self._fname = fname
self._f = None
self._nt = None
self._data = None
def __iter__(self):
"""Iterator protocol"""
return self
def __next__(self):
if self._f.closed:
raise StopIteration
else:
return self._nt(*next(self._data))
def __enter__(self):
"""Context Manager protocol"""
self._f = open(self._fname, 'r')
self._data = csv.reader(self._f, get_dialect(self._fname))
headers = map(lambda s: s.lower(), next(self._data))
self._nt = namedtuple('Record', headers)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print('exit')
self._f.close()
return False
with DataIterator('materials/cars.csv') as f:
print(next(f))
| StarcoderdataPython |
121127 | <filename>snapmerge/home/tests.py
from django.test import TestCase, RequestFactory
from django.test.client import Client
from django.db import models
from . import models
from . import views
# Create your tests here.
class TestProjectModel(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
def test_project_create(self):
proj = models.Project.create_and_save(name="Class 10a",
picture="TODO",
description="The first Snap! Project of class 10a")
self.assertEquals(len(models.Project.objects.all()), 1)
self.assertEquals(proj.name, "Class 10a")
self.assertEquals(proj.picture, "TODO")
self.assertEquals(proj.description, "The first Snap! Project of class 10a")
def test_string_representation(self):
proj = models.Project.create_and_save(name="Class 10a",
picture="TODO",
description="The first Snap! Project of class 10a")
self.assertEquals(str(proj), "Class 10a")
class TestSnapFileModel(TestCase):
def test_file_create(self):
# Proj
proj = models.Project.create_and_save(name="Class 10a",
picture="TODO",
description="The first Snap! Project of class 10a")
file = models.SnapFile.create_and_save(project=proj,
file="filepath")
self.assertEquals(len(models.SnapFile.objects.all()), 1)
def test_ancestors(self):
# Proj
proj = models.Project.create_and_save(name="Class 10a",
picture="TODO",
description="The first Snap! Project of class 10a")
file = models.SnapFile.create_and_save(project=proj,
file="filepath")
self.assertEquals(len(models.SnapFile.objects.all()), 1)
file2 = models.SnapFile.create_and_save(project=proj,
file="filepath",
ancestors=[file])
self.assertEquals(len(models.SnapFile.objects.all()), 2)
| StarcoderdataPython |
113190 | '''
Created on 2020-07-11
@author: wf
'''
import os
from ptp.titleparser import TitleParser
from ptp.event import EventManager, Event
class WikiData(object):
'''
WikiData proceedings titles event source
'''
defaultEndpoint="https://query.wikidata.org/sparql"
def __init__(self, config=None):
'''
Constructor
Args:
config(StorageConfig): the storage configuration to use
'''
self.em=EventManager('wikidata',url='https://www.wikidata.org/wiki/Wikidata:Main_Page',title='Wikidata',config=config)
self.debug=self.em.config.debug
self.profile=self.em.config.profile
path=os.path.dirname(__file__)
self.sampledir=path+"/../sampledata/"
self.sampleFilePath=self.sampledir+"proceedings-wikidata.txt"
def cacheEvents(self,limit=1000000,batchSize=500):
'''
initialize me from my sample file
Args:
limit(int): the maximum number of events to cache
batchSize(int): the batchSize to use
'''
tp=TitleParser.getDefault(self.em.name)
tp.fromFile(self.sampleFilePath, "wikidata")
tc,errs,result=tp.parseAll()
if self.debug:
print(tc)
print("%d errs %d titles" % (len(errs),len(result)))
for title in result:
if 'acronym' in title.metadata():
if self.debug:
print(title.metadata())
if 'eventId' in title.info:
event=Event()
event.fromTitle(title,self.debug)
event.eventId=event.eventId.replace("http://www.wikidata.org/entity/","")
event.url="%s" % (title.info['eventId'])
self.em.add(event)
self.em.store(limit=limit,batchSize=batchSize)
def initEventManager(self):
''' init my event manager '''
if not self.em.isCached():
self.cacheEvents()
else:
self.em.fromStore()
self.em.extractCheckedAcronyms()
| StarcoderdataPython |
3233634 | def num_special(mat):
ones_positions = [[row, col] for row in range(len(mat)) for col in range(len(mat[0])) if mat[row][col] == 1]
cols = [pos[1] for pos in ones_positions]
special_cols = [col for col in cols if cols.count(col) == 1]
special_count = 0
for pos in ones_positions:
row = pos[0]
col = pos[1]
if col in special_cols and mat[row].count(1) == 1:
special_count += 1
return special_count
print(num_special([[1, 0, 0],
[0, 0, 1],
[1, 0, 0]]))
print(num_special([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
print(num_special([[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]))
print(num_special([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]]))
print(num_special([[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]]))
print(num_special(
[[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0]]))
| StarcoderdataPython |
3266925 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from . import config
from .http_client import HttpClient
from thrift.protocol.TCompactProtocol import TCompactProtocolAcceleratedFactory
from frugal.provider import FServiceProvider
from frugal.context import FContext
from .proto import LegyProtocolFactory
from .lib.Gen import f_LineService
class Connection(object):
def __init__(self, uri_path):
self.context = FContext()
self.transport = HttpClient(config.BASE_URL + uri_path)
self.protocol_factory = TCompactProtocolAcceleratedFactory()
self.wrapper_factory = LegyProtocolFactory(self.protocol_factory)
self.service_provider = FServiceProvider(self.transport, self.wrapper_factory)
self.client = self.ClientFactory()
def call(self, rfunc: str, *args, **kws) -> callable:
assert isinstance(rfunc, str), 'Function name must be str not '+type(rfunc).__name__
rfr = getattr(self.client, rfunc, None)
if rfr:
return rfr(self.context, *args, **kws)
else:
raise Exception(rfunc + ' is not exist')
def renew(self):
self.client = self.ClientFactory()
def ClientFactory(self):
return f_LineService.Client(self.service_provider)
def setHeaders(self, dict_key_val):
self.transport._headers = dict_key_val
def updateHeaders(self, dict_key_val):
self.transport._headers.update(dict_key_val)
def url(self, path='/'):
self.transport._url = config.BASE_URL + path | StarcoderdataPython |
3328123 | <reponame>sarkarghya/GSOC_org_analysis
import pickle as pk
import os
from datetime import date
from datetime import timedelta
def clean(stir):
return "".join(
[st for st in stir if not (st.isnumeric() or st.isspace() or st == ".")]
)
def defra(lisa):
n_lisa = []
for item in lisa:
n_lisa.extend(item.split("/"))
return n_lisa
def lang_org(directory):
langs_raw = ["c++", "c", "javascript", "java", "python"] # add other languages
langs = sorted(langs_raw, key=len)
la_c = {}
for file in os.listdir(directory):
file_path = os.path.join(directory, file)
with open(file_path, "rb") as datfile:
dic = pk.load(datfile)
for item in defra(dic["tech"]):
for lang in langs:
ci = clean(item.lower())
if lang in ci and (len(lang) == len(ci)):
la_c.setdefault(lang, []).append(dic["name"])
x = dict(map(lambda x: (x[0].capitalize(), len(set(x[1]))), la_c.items()))
return {k: v for k, v in sorted(x.items(), key=lambda item: item[1])}
# return set(la_c) #to see orgs
if __name__ == "__main__":
direct = f"./code/orgs-{date.today() - timedelta(days = 2)}/" # edit date differance or type in full path
print(lang_org(direct))
| StarcoderdataPython |
139720 | #!/usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
from androguard.cli import androlyze_main
from androguard.core.androconf import *
from androguard.misc import *
import os
import sql
import sqlstorehash
LIST_NAME_METHODS=["sendBroadcast", "onReceive","startService","onHandleIntent","startActivity","getIntent"];
LIST_HEADER=["STT","APK Name"]+LIST_NAME_METHODS+["Component Type", "Component Name", "Exported Status","getPermissions"]
def count_Method_APK(methodName, listMethods):
count=0
newlist=list()
for element in listMethods:
newlist.append(element.__repr__())
for l in newlist:
if methodName in l:
count+=1
return count
def attribute_Component(apk_Obj):
manifest_Obj=apk_Obj.get_android_manifest_xml()
application_Tag=manifest_Obj.findall("application")
latrr=list()
list_Component=list()
dem=0
for childs in application_Tag:
for child in childs:
keys=list()
keys=child.keys()
newdict=dict()
list_Component.append(child.tag)
for key in keys:
lsplit=key.split("}")
newdict[lsplit[-1]]=child.get(key)
latrr.append(newdict)
return latrr, list_Component
def get_Atrribute(listDict):
list_Name_Of_Component=list()
list_Exported_Of_Component=list()
for dictt in listDict:
list_Name_Of_Component.append(dictt.get('name'))
list_Exported_Of_Component.append(dictt.get('exported'))
return list_Name_Of_Component, list_Exported_Of_Component
def get_List_Contens(path, nameAPK):
try:
a, d, dx=AnalyzeAPK(path)
listMethods=list(dx.get_methods())
list_Count_Methods=list()
list_Count_Methods.append(nameAPK)
for i in range(0,len(LIST_NAME_METHODS)):
list_Count_Methods.append(count_Method_APK(LIST_NAME_METHODS[i], listMethods))
atrrs, components=attribute_Component(a)
names, exports=get_Atrribute(atrrs)
list_Count_Methods.append(components)
list_Count_Methods.append(names)
list_Count_Methods.append(exports)
list_Count_Methods.append(a.get_permissions())
except:
for i in range(0,len(LIST_NAME_METHODS)):
list_Count_Methods.append("Failed!")
return list_Count_Methods
def get_Path_Files(pathFolder):
Fjoin=os.path.join
lapkname=os.listdir(pathFolder)
list_Of_Path_Files=[Fjoin(pathFolder,f) for f in os.listdir(pathFolder)]
return list_Of_Path_Files, lapkname
def map_List_Methods(pathFolder):
lspath, lsnameAPK=get_Path_Files(pathFolder)
newlist=list()
newlist.append(LIST_HEADER)
i=1
for (lp,ln) in zip(lspath,lsnameAPK):
#hash here
md5, sha1, sha256 = sqlstorehash.hashMd5Sha1Sha256(lp)
if(sql.CheckExist(ln,md5, sha1, sha256) == False):
ltemp=get_List_Contens(lp,ln)
ltemp.insert(0,i)
newlist.append(ltemp)
#sql here
sql.InsertApp(ltemp, md5, sha1, sha256)
print ("Completed " + str(round(i/float(len(lspath))*100))+"%.")
i=i+1
return newlist
def main():
#sql.CreateDB();
#sql.CreateTable();
#sql.DangerousPermission();
try:
fh = open("config","r")
fh.close()
except:
fh = open("config","w")
fh.write('1')
fh.close()
map_List_Methods("/root/Downloads/Test")
if __name__ == '__main__':
main() | StarcoderdataPython |
1662249 | import glob
import pandas as pd
import numpy as np
import os
filenames = glob.glob("*.csv")
filenames = [filename for filename in filenames if os.path.getsize(filename) > 10000]
#filenames = ["CreditRequirement.csv"]
timestamp_col = "Complete Timestamp" # column that indicates completion timestamp
case_id_col = "Case ID"
activity_col = "Activity"
def add_all_columns(group):
group = group.sort_values(timestamp_col, ascending=True, kind="mergesort")
group["event_nr"] = range(1,group.shape[0]+1)
group["unique_events"] = group[activity_col].nunique()
group["total_events"] = len(group[activity_col])
end_date = group[timestamp_col].iloc[-1]
tmp = end_date - group[timestamp_col]
tmp = tmp.fillna(0)
start_date = group[timestamp_col].iloc[0]
elapsed = group[timestamp_col] - start_date
elapsed = elapsed.fillna(0)
group["elapsed"] = elapsed.apply(lambda x: float(x / np.timedelta64(1, 'D')))
group["remtime"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'D'))) # D is for days
#group["case_length"] = group.shape[0]
return group
with open("log_summary.tsv", 'w') as fout:
fout.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (
"log", "total_cases", "unique_activities", "total_events","avg_unique_events_per_trace", "mean_case_length",
"std_case_length", "mean_case_duration","std_case_duration","mean_remtime","std_remtime"))
for filename in filenames:
print(filename)
# dtypes = {col:"str" for col in ["proctime", "elapsed", "label", "last"]} # prevent type coercion
data = pd.read_csv(filename, sep=";")
data[timestamp_col] = pd.to_datetime(data[timestamp_col])
data = data.groupby(case_id_col).apply(add_all_columns)
df0 = data.loc[data["event_nr"] == 1].copy()
df0["UER"] = df0["unique_events"] / df0["total_events"]
#print("Avg percentage of unique timestamps per trace: %.3f" %np.mean(df0["UTR"]))
#print("%s out of %s unique timestamps" %(len(data[timestamp_col].unique()),data[timestamp_col].count()))
global_unique_timestamps = len(data[timestamp_col].unique()) / data[timestamp_col].count()
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
#print("In %s of them elapsed time is still 0" %len(df.loc[df["elapsed"]==0]))
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
fout.write("%s, %s, %s, %s, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f\n"%(filename,
data[case_id_col].nunique(),
data[activity_col].nunique(),
data.shape[0],
np.mean(df0["UER"]),
np.mean(df0["total_events"]),
np.std(df0["total_events"]),
np.mean(df0["remtime"]),
np.std(df0["remtime"]),
np.mean(data["remtime"]),
np.std(data["remtime"])))
| StarcoderdataPython |
3362222 | # Import the socket library.
import os
import socket
import threading
import configparser
import protocol
from time import sleep
class ChatClient:
MAX_MSG_LENGTH = 10
RECEIVE_SIZE = 1024
RECEIVE_INTERVAL = 0.1
SEND_INTERVAL = 0.5
def __init__(self, server_address, port):
"""
Start a connection to the chat server.
:param server_address: IP of the server.
:param port: Port of the server application.
"""
self.server_address = server_address
self.port = int(port)
self.socket = None
# Queue for message to send to the server
self.send_queue = []
self.sending = False
self.sending_thread = None
# Thread that polls for new messages
self.polling_thread = None
# Indicates if the client should be polling
self.polling = False
def create_connection(self, socket_family=socket.AF_INET, socket_type=socket.SOCK_STREAM):
"""
Create the socket connection between this client en given server in the constructor.
:param socket_family: Protocol to be used by sockets, defaults to INET (IP).
:param socket_type: Packaging type to be used by sockets, defaults to STREAM.
:return: True if connection was successful, else false.
"""
try:
# Create a new socket.
self.socket = socket.socket(socket_family, socket_type)
# Connect to another application.
self.socket.connect((self.server_address, self.port))
return True
except:
print('Failed connecting to {}:{}'.format(self.server_address, self.port))
return False
def do_handshake(self, username):
"""
Do the handshake provided from our protocol
:param username: Name of the user to represent yourself at the server.
:return: Tuple. True if successful, False if failed. Followed by reason string.
"""
buffer = str.encode("{} {}{}".format(protocol.HELLO_FROM, username, protocol.MESSAGE_END))
num_bytes_sent = self.socket.sendall(buffer)
response = self.socket.recv(self.RECEIVE_SIZE)
decoded = response.decode()
return self._check_response(decoded)
def start_polling(self):
"""
Creates a thread to start polling form incoming messages. Only 1 can be active at a time
:return:
"""
if self.polling_thread is None:
self.polling_thread = threading.Thread(target=self._poll)
self.polling_thread.start()
else:
print('Polling already active!')
def stop_polling(self):
"""
Stops the polling thread if it is polling.
:return: None
"""
self.polling = False
def _poll(self):
"""
Poll for incoming messages.
:return:
"""
message = ''
self.polling = True
while self.polling:
received = self.socket.recv(self.RECEIVE_SIZE)
message += received.decode()
# For messages lager than the buffer, search for the message end.
if protocol.MESSAGE_END not in message:
continue
# Only report bad responses and deliveries to the user
good, reason = self._check_response(message)
if not good:
print('Bad response: ' + reason)
elif protocol.DELIVERY in message:
user, msg = message.replace(protocol.DELIVERY, '', 1).replace(' ', '', 1).split(' ', 1)
print('{}: {}'.format(user, msg))
elif protocol.WHO_OK in message:
users = message.replace(protocol.WHO_OK, '', 1).replace(' ', '', 1)
print('{}'.format(users))
message = ''
sleep(self.RECEIVE_INTERVAL)
def _check_response(self, message):
"""
Check a received message from the server.
:param message: The message to be checked.
:return: Tuple. True if successful, False if failed. Followed by reason string.
"""
if message is None:
return False
for good in protocol.GOOD_RESPONSE:
if message.startswith(good):
return True, good
for bad in protocol.BAD_RESPONSE:
if message.startswith(bad):
return False, bad
def get_users(self):
"""
Get current only users.
:return: A string listing all online users.
"""
buffer = '{}{}'.format(protocol.WHO, protocol.MESSAGE_END)
self.send(buffer)
def start_sending(self):
"""
:return:
"""
if self.sending_thread is None:
self.sending_thread = threading.Thread(target=self._sending)
self.sending_thread.start()
else:
print('Sending already active!')
def stop_sending(self):
"""
:return:
"""
self.sending = False
def _sending(self):
"""
:return:
"""
self.sending = True
while self.sending:
if self.send_queue:
message = self.send_queue.pop(0)
# Encode string to bytes and send
self.socket.sendall(str.encode(message))
sleep(self.SEND_INTERVAL)
def send(self, message):
"""
Add message to send queue
:return:
"""
self.send_queue.append(message)
def send_message(self, user, message):
"""
Send a message to another user.
:param user: The user to send the message to.
:param message: The message to send
:return: None
"""
buffer = "{} {} {}{}".format(protocol.SEND, user, message, protocol.MESSAGE_END)
self.send(buffer)
def stop(self):
"""
Stops
:return:
"""
self.__del__()
def __del__(self):
"""
Cleanup after destroying this object. Close connection.
:return: None
"""
self.socket.close()
self.polling = False
if self.polling_thread:
self.polling_thread = None
os._exit(0)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read('../../server_info.ini')
client1 = ChatClient(config['DEFAULT']['ip'], config['DEFAULT']['port'])
| StarcoderdataPython |
4824115 | <gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPortfolioResult',
'AwaitableGetPortfolioResult',
'get_portfolio',
'get_portfolio_output',
]
@pulumi.output_type
class GetPortfolioResult:
def __init__(__self__, accept_language=None, description=None, display_name=None, id=None, portfolio_name=None, provider_name=None, tags=None):
if accept_language and not isinstance(accept_language, str):
raise TypeError("Expected argument 'accept_language' to be a str")
pulumi.set(__self__, "accept_language", accept_language)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if portfolio_name and not isinstance(portfolio_name, str):
raise TypeError("Expected argument 'portfolio_name' to be a str")
pulumi.set(__self__, "portfolio_name", portfolio_name)
if provider_name and not isinstance(provider_name, str):
raise TypeError("Expected argument 'provider_name' to be a str")
pulumi.set(__self__, "provider_name", provider_name)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> Optional[str]:
return pulumi.get(self, "accept_language")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="portfolioName")
def portfolio_name(self) -> Optional[str]:
return pulumi.get(self, "portfolio_name")
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[str]:
return pulumi.get(self, "provider_name")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.PortfolioTag']]:
return pulumi.get(self, "tags")
class AwaitableGetPortfolioResult(GetPortfolioResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPortfolioResult(
accept_language=self.accept_language,
description=self.description,
display_name=self.display_name,
id=self.id,
portfolio_name=self.portfolio_name,
provider_name=self.provider_name,
tags=self.tags)
def get_portfolio(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPortfolioResult:
"""
Resource Type definition for AWS::ServiceCatalog::Portfolio
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:servicecatalog:getPortfolio', __args__, opts=opts, typ=GetPortfolioResult).value
return AwaitableGetPortfolioResult(
accept_language=__ret__.accept_language,
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
portfolio_name=__ret__.portfolio_name,
provider_name=__ret__.provider_name,
tags=__ret__.tags)
@_utilities.lift_output_func(get_portfolio)
def get_portfolio_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPortfolioResult]:
"""
Resource Type definition for AWS::ServiceCatalog::Portfolio
"""
...
| StarcoderdataPython |
3354744 | from django.shortcuts import render
import csv
from django.http import HttpResponse
from foundation.models import TimeSeriesDatum
def download_csv_report_01_temperature_sensor_api(request):
# Create the HttpResponse object with the appropriate CSV header.
data = TimeSeriesDatum.objects.filter(
sensor__name ="Temperature",
sensor__insturment__user=request.user,
)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="temperature_data.csv"'
writer = csv.writer(response)
writer.writerow(['Sensor_id','time', 'value'])
for datum in data:
#header row
writer.writerow([datum.sensor.id, datum.time,datum.value])
return response
| StarcoderdataPython |
46955 | from django.db.models.signals import post_save
from django.dispatch import receiver
from company.models import Company
from company.tasks import deploy_new_company
@receiver(post_save, sender=Company)
def company_created(sender, instance, created, **kwargs):
if created:
deploy_new_company.delay(instance.id)
| StarcoderdataPython |
1781977 | import tkinter as tk
# sets root window object
window = tk.Tk()
# sets title
window.title("Manage My Life")
# sets default size of window
window.geometry("700x350")
# adds a widget to the GUI
greetingwidget = tk.Label(text="Greetings!")
greetingwidget.pack()
# blocks program, any code after here will not run. Consider using update().
window.mainloop()
| StarcoderdataPython |
109919 | <reponame>Ryukyo/google-scrape
import csv
import functools
import os
import random
import time
import re
from multiprocessing import dummy
import requests
import urllib3
import uvicorn
from bs4 import BeautifulSoup
from fastapi import FastAPI, Request
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic.main import BaseModel
from starlette.concurrency import run_in_threadpool
def read_file(file):
with open(file) as f:
return f.read().splitlines()
def parse(query, body):
query = query.replace(" ", "+")
URL = f"https://www.google.com/search?q={query}&num={body.results_per_page}&lang={body.language}®ion={body.region}"
headers = {"user-agent": random.choice(body.user_agents)}
session = requests.Session()
seen = set()
results = []
regexMail = re.compile(
"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z]{2,4})")
regexPostal = re.compile("(\d{4}\s[a-zA-Z]+)")
regexPhone = re.compile(
"([+(\d]{1})(([\d+() -./]){5,16})([+(\d]{1})")
regexStreet = re.compile(
"([A-ZÖÄÜßéúíóáýèùìòà. -]{1,}[0-9]{0,}[a-zäöüßéúíóáýèùìòà. -]{1,}) ([0-9 ]{1,}[-+/\\a-z0-9 ]{0,})")
for i in range(1, body.max_pages):
resp = session.get(
URL, headers=headers, verify=False, timeout=body.timeout,
)
if resp.status_code == 429:
print("response was 429, please wait a few minutes")
time.sleep(60)
continue
elif resp.status_code != 200:
print(f"ERROR PARSE QUERY '{query}' on page {i + 1} !")
break
soup = BeautifulSoup(resp.text, "html.parser")
for div in soup.find_all(class_="g"):
anchors = div.find(class_="yuRUbf").find_all("a")
if anchors:
link = anchors[0]["href"]
title = div.find(class_="LC20lb DKV0Md").getText()
snippet = (
div.find("div", class_="IsZvec")
.find("span", class_="aCOpRe")
.getText()
)
mail = regexMail.findall(snippet)
postal = regexPostal.findall(snippet)
phone = regexPhone.findall(snippet)
street = regexStreet.findall(snippet, re.IGNORECASE)
item = {"title": title,
"link": link, "snippet": snippet, "mail": mail, "street": street, "postal": postal, "phone": phone}
if item["link"] not in seen:
seen.add(item["link"])
results.append(item)
nextLink = soup.find("a", {"aria-label": f"Page {i+1}"})
if nextLink is None:
print(f"END RESULTS FOR QUERY '{query}'")
return {"query": query, "results": results}
nextLinkUrl = nextLink["href"]
print(nextLinkUrl)
URL = f"https://www.google.com{nextLinkUrl}"
time.sleep(random.randint(10, 30))
return {"query": query, "results": results}
def execute_queries(body):
pool = dummy.Pool(body.threads)
func = functools.partial(parse, body=body)
out = pool.map(func, body.queries)
for query in out:
with open(f"out/{body.out_filename}", "a+", newline="") as csvfile:
writer = csv.writer(
csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
writer.writerow(
[query["query"],
f"{len(query['results'])} results"]
)
for item in query["results"]:
writer.writerow([item["title"], item["link"],
item["snippet"], item["mail"], item["street"], item["postal"], item["phone"]])
if not os.path.exists("out"):
os.mkdir("out")
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
app.mount("/out", StaticFiles(directory="out"), name="out")
templates = Jinja2Templates(directory="templates")
class Body(BaseModel):
threads: int
max_pages: int
results_per_page: int
language: str
region: str
timeout: int
queries_file: str
users_agents_file: str
out_filename: str
@property
def user_agents(self):
return read_file(self.users_agents_file)
@property
def queries(self):
return read_file(self.queries_file)
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.post("/process/")
async def process(request: Request, body: Body):
print(request)
await run_in_threadpool(execute_queries, body)
return templates.TemplateResponse("index.html", {"request": request})
if __name__ == "__main__":
urllib3.disable_warnings()
app.add_middleware(TrustedHostMiddleware, allowed_hosts=["*"])
uvicorn.run(app, host="127.0.0.1", port=8000, http="h11", loop="uvloop")
| StarcoderdataPython |
1787326 | <filename>tests/utils/molname_test.py
import pytest
from exojax.utils.molname import s2e_stable
def test_s2estable():
EXOMOL_SIMPLE2EXACT= \
{\
"CO":"12C-16O",\
"OH":"16O-1H",\
"NH3":"14N-1H3",\
"NO":"14N-16O",
"FeH":"56Fe-1H",
"H2S":"1H2-32S",
"SiO":"28Si-16O",
"CH4":"12C-1H4",
"HCN":"1H-12C-14N",
"C2H2":"12C2-1H2",
"TiO":"48Ti-16O",
"CO2":"12C-16O2",
"CrH":"52Cr-1H",
"H2O":"1H2-16O",
"VO":"51V-16O",
"CN":"12C-14N",
"PN":"31P-14N",
}
check=True
for i in EXOMOL_SIMPLE2EXACT:
assert s2e_stable(i)==EXOMOL_SIMPLE2EXACT[i]
if __name__ == "__main__":
test_s2estable()
| StarcoderdataPython |
137183 | import FWCore.ParameterSet.Config as cms
from Validation.HGCalValidation.simhitValidation_cff import *
from Validation.HGCalValidation.digiValidation_cff import *
from Validation.HGCalValidation.rechitValidation_cff import *
from Validation.HGCalValidation.hgcalHitValidation_cfi import *
from Validation.HGCalValidation.HGCalValidator_cfi import hgcalValidator
hgcalValidatorSequence = cms.Sequence(hgcalValidator)
hgcalValidation = cms.Sequence(hgcalSimHitValidationEE
+ hgcalSimHitValidationHEF
+ hgcalSimHitValidationHEB
+ hgcalDigiValidationEE
+ hgcalDigiValidationHEF
+ hgcalDigiValidationHEB
+ hgcalRecHitValidationEE
+ hgcalRecHitValidationHEF
+ hgcalRecHitValidationHEB
+ hgcalHitValidationSequence
+ hgcalValidatorSequence)
| StarcoderdataPython |
1719333 | <filename>pyzmq/examples/bench/xmlrpc_server.py<gh_stars>0
from SimpleXMLRPCServer import SimpleXMLRPCServer
def echo(x):
return x
server = SimpleXMLRPCServer(('localhost',10002))
server.register_function(echo)
server.serve_forever() | StarcoderdataPython |
14403 | <filename>setup.py
"""
Copyright 2016 Disney Connected and Advanced Technologies
Licensed under the Apache License, Version 2.0 (the "Apache License")
with the following modification; you may not use this file except in
compliance with the Apache License and the following modification to it:
Section 6. Trademarks. is deleted and replaced with:
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor
and its affiliates, except as required to comply with Section 4(c) of
the License and to reproduce the content of the NOTICE file.
You may obtain a copy of the Apache License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the Apache License with the above modification is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the Apache License for the specific
language governing permissions and limitations under the Apache License.
"""
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2016, Disney Connected and Advanced Technologies"
__license__ = "Apache"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from distutils.errors import DistutilsError
from distutils.spawn import find_executable
from setuptools import setup, Command
from glob import glob
import os.path
# If we have a thrift compiler installed, let's use it to re-generate
# the .py files. If not, we'll use the pre-generated ones.
class gen_thrift(Command):
user_options=[]
def initialize_options(self):
self.root = None
self.thrift = None
def finalize_options(self):
self.root = os.path.abspath(os.path.dirname(__file__))
self.thrift = find_executable('thrift1')
if self.thrift is None:
self.thrift = find_executable('thrift')
def run(self):
if self.thrift is None:
raise DistutilsError(
'Apache Thrift binary not found. Please install Apache Thrift or use pre-generated Thrift classes.')
self.mkpath(os.path.join(self.root, 'blockchain', 'gen'))
for f in glob(os.path.join(self.root, 'thrift', '*.thrift')):
self.spawn([self.thrift, '-out', os.path.join(self.root, 'blockchain', 'gen'),
'-r', '--gen', 'py',
os.path.join(self.root, 'thrift', f)])
setup(name = 'Blockchain',
version = '0.0.2',
description = 'blockchain stuff',
author = 'Folks',
packages = ['blockchain'],
cmdclass = {
'gen_thrift': gen_thrift
}
)
| StarcoderdataPython |
1613131 | import copy
import itertools
import re
import threading
import html5lib
import requests
import urllib.parse
from bs4 import BeautifulSoup
from typing import List
try:
import basesite
except (ModuleNotFoundError, ImportError) as e:
from . import basesite
class DaocaorenshuwuSite(basesite.BaseSite):
def __init__(self):
self.site_info = basesite.SiteInfo(
type='文学',
statue='上线版本',
url='https://www.daocaorenshuwu.com',
name='稻草人书屋',
brief_name='稻草人',
version='1.1',
max_threading_number=10, # 每个chapter会进行多线程下载(get_chapter_content),所以总线程数量设置为10
)
super().__init__(self.site_info)
self.base_url = 'https://www.daocaorenshuwu.com'
self.encoding = 'utf-8'
self.search_url = 'https://www.daocaorenshuwu.com/plus/search.php?q=%s'
self.session = requests.session()
@basesite.print_in_out
def get_books(self, search_info: str) -> List[basesite.Book]:
url = self.search_url % urllib.parse.quote(search_info)
r = self.try_get_url(self.session, url, try_timeout=5)
soup = BeautifulSoup(r.content, 'html.parser')
book_soup_list = soup.select('tbody > tr')
search_book_results = []
for book_soup in book_soup_list:
td_soup_list = book_soup.select('td')
book_url = self.base_url + td_soup_list[0].select_one('a').attrs['href']
if book_url.find('search.html') != -1:
continue
book_name = td_soup_list[0].text
book_author = td_soup_list[1].text
book_brief = "无"
book = basesite.Book(site=self, url=book_url, name=book_name, author=book_author,
brief=book_brief)
search_book_results.append(book)
return search_book_results
@basesite.print_in_out
def get_chapters(self, book: basesite.Book) -> List[basesite.Chapter]:
r = self.try_get_url(self.session, book.url)
if r is None:
return []
soup = BeautifulSoup(r.content, 'html.parser')
chapter_soup_list = soup.select('div#all-chapter div.panel-body div.item a')
chapters = [basesite.Chapter(site=self,
url='https:' + chapter.attrs['href'],
title=chapter.text)
for chapter in chapter_soup_list]
return chapters
def get_chapter_content(self, chapter: basesite.Chapter) -> str:
class _InnerDown(threading.Thread):
def __init__(self, func, session_, url):
super().__init__()
self.func = func
self.session = copy.deepcopy(session_)
self.url = url
self.r = None
def run(self) -> None:
self.r = self.func(self.session, self.url)
self.session.close()
session = copy.deepcopy(self.session)
partial_url = chapter.url[:-5]
# step1: 先下载第一页和第二页, 判断总共多少页
tasks = [_InnerDown(self.try_get_url, session, chapter.url),
_InnerDown(self.try_get_url, session, partial_url + "_2.html")]
for task in tasks:
task.start()
for task in tasks:
task.join()
r1, r2 = tasks[0].r, tasks[1].r
if r1 is None:
session.close()
return f'\r\n{chapter.title}\r\n下载失败'
soup1 = BeautifulSoup(r1.content, 'html5lib') # 文档格式有错误,不能使用速度较快的html.parser
has_multipages = False
try:
if soup1.select('div.text-center')[0].select('button.btn-info')[2].text.find('下一页') >= 0:
has_multipages = True
except IndexError:
pass
if has_multipages:
if r2 is None:
session.close()
return f'\r\n{chapter.title}\r\n下载失败'
soup2 = BeautifulSoup(r2.content, 'html5lib')
page_info = soup2.select_one('div.book-type li.active').text
pages = int(re.search(r'/(\d+)页)', page_info).group(1))
soup_list = [soup1, soup2]
else:
pages = 1
soup_list = [soup1]
# step2: 多线程下载
url_list = ([f'{partial_url}_{i}.html' for i in range(3, pages + 1)])
tasks = [_InnerDown(self.try_get_url, session, url) for url in url_list]
for task in tasks:
task.start()
for task in tasks:
task.join()
session.close()
for task in tasks:
if task.r is None:
return f'\r\n{chapter.title}\r\n下载失败'
else:
soup_list.append(BeautifulSoup(task.r.content, 'html5lib'))
# step3: 合并下载内容
content_list = []
for soup in soup_list:
content_soup = soup.select_one('div#cont-text')
for i in content_soup.select('script,style,[class]'):
i.decompose()
content_list.append(content_soup.text.strip())
# return f'\r\n{chapter.title}\r\n{"".join(content_list)}'
return "\r\n".join(content_list)
def save_chapter(self, chapter, filename):
content = self.get_chapter_content(chapter)
with open(filename, 'w', encoding=self.encoding) as f:
f.write(content)
| StarcoderdataPython |
1638533 | <filename>cnsproject/__init__.py<gh_stars>0
from pathlib import Path
from . import (
utils,
network,
learning,
encoding,
decision,
plotting,
)
ROOT_DIR = Path(__file__).parents[0].parents[0]
| StarcoderdataPython |
3344745 | <reponame>ArianeDucellier/timelags
"""
This module contains different methods of stacking of seismic signal
"""
from obspy.core.stream import Stream
from obspy.signal.filter import envelope
import numpy as np
from scipy.signal import hilbert
def linstack(streams, normalize=True, method='RMS'):
"""
Compute the linear stack of a list of streams
Several streams -> returns a stack for each station and each channel
One stream -> returns a stack for each channel (and merge stations)
Input:
type streams = list of streams
streams = List of streams to stack
type normalize = boolean
normalize = Normalize traces by RMS amplitude before stacking
Output:
type stack = stream
stack = Stream with stacked traces for each channel (and each station)
"""
# If there are several streams in the list,
# return one stack for each station and each channel
if len(streams) > 1:
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
# If there is only one stream in the list,
# return one stack for each channel and merge the stations
else:
channels = []
for tr in streams[0]:
if not(tr.stats.channel in channels):
channels.append(tr.stats.channel)
stack = Stream()
for i in range(0, len(channels)):
stack.append(streams[0][0].copy())
stack[-1].stats['channel'] = channels[i]
stack[-1].stats['station'] = 'all'
# Initialize trace to 0
for tr in stack:
tr.data = np.zeros(tr.stats.npts)
# Initialize number of traces stacked to 0
ntr = np.zeros((len(stack)))
# Stack traces
for i in range(0, len(streams)):
for k in range (0, len(stack)):
if len(streams) > 1:
matchtr = streams[i].select(station=stack[k].stats.station, \
channel=stack[k].stats.channel)
else:
matchtr = streams[i].select(channel=stack[k].stats.channel)
for j in range(0, len(matchtr)):
ntr[k] = ntr[k] + 1
# Normalize the data before stacking
if normalize:
if (method == 'RMS'):
norm = matchtr[j].data / \
np.sqrt(np.mean(np.square(matchtr[j].data)))
elif (method == 'Max'):
norm = matchtr[j].data / \
np.max(np.abs(matchtr[j].data))
else:
raise ValueError( \
'Method must be RMS or Max')
norm = np.nan_to_num(norm)
else:
norm = matchtr[j].data
stack[k].data = np.sum((norm, stack[k].data), axis=0)
# Divide by the number of traces stacked
for k in range (0, len(stack)):
stack[k].data = stack[k].data / ntr[k]
return stack
def powstack(streams, weight=2.0, normalize=True):
"""
Compute the power (Nth-root) stack of a list of streams
Several streams -> returns a stack for each station and each channel
One stream -> returns a stack for each channel (and merge stations)
Input:
type streams = list of streams
streams = List of streams to stack
type weight = float
weight = Power of the stack (usually integer greater than 1)
type normalize = boolean
normalize = Normalize traces by RMS amplitude before stacking
Output:
type stack = stream
stack = Stream with stacked traces for each channel (and each station)
"""
# If there are several streams in the list,
# return one stack for each station and each channel
if len(streams) > 1:
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
# If there is only one stream in the list,
# return one stack for each channel and merge the stations
else:
channels = []
for tr in streams[0]:
if not(tr.stats.channel in channels):
channels.append(tr.stats.channel)
stack = Stream()
for i in range(0, len(channels)):
stack.append(streams[0][0].copy())
stack[-1].stats['channel'] = channels[i]
stack[-1].stats['station'] = 'all'
# Initialize trace to 0
for tr in stack:
tr.data = np.zeros(tr.stats.npts)
# Initialize number of traces stacked to 0
ntr = np.zeros((len(stack)))
# Stack traces
for i in range(0, len(streams)):
for k in range (0, len(stack)):
if len(streams) > 1:
matchtr = streams[i].select(station=stack[k].stats.station, \
channel=stack[k].stats.channel)
else:
matchtr = streams[i].select(channel=stack[k].stats.channel)
for j in range(0, len(matchtr)):
ntr[k] = ntr[k] + 1
# Normalize the data before stacking
if normalize:
norm = matchtr[j].data / \
np.sqrt(np.mean(np.square(matchtr[j].data)))
norm = np.nan_to_num(norm)
else:
norm = matchtr[j].data
stack[k].data = np.sum((np.power(np.abs(norm), 1.0 / weight) \
* np.sign(norm), stack[k].data), axis=0)
# Take the power of the stack and divide by the number of traces stacked
for k in range (0, len(stack)):
stack[k].data = np.sign(stack[k].data) * np.power(stack[k].data, \
weight) / ntr[k]
return stack
def PWstack(streams, weight=2, normalize=True):
"""
Compute the phase-weighted stack of a list of streams
Several streams -> returns a stack for each station and each channel
One stream -> returns a stack for each channel (and merge stations)
Input:
type streams = list of streams
streams = List of streams to stack
type weight = float
weight = Power of the stack (usually integer greater than 1)
type normalize = boolean
normalize = Normalize traces by RMS amplitude before stacking
Output:
type stack = stream
stack = Stream with stacked traces for each channel (and each station)
"""
# First get the linear stack which we will weight by the phase stack
Linstack = linstack(streams, normalize=normalize)
# Compute the instantaneous phase
instaphases = []
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
env = envelope(tr.data)
tr.data = analytic / env
tr.data = np.nan_to_num(tr.data)
instaphases.append(instaphase)
# Compute the phase stack
Phasestack = linstack(instaphases, normalize=False)
# Compute the phase-weighted stack
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station, \
channel=tr.stats.channel)[0].data \
* np.power(np.abs(tr.data), weight)
return Phasestack
| StarcoderdataPython |
1729978 | #!/usr/bin/python
import os, json, errno, codecs
import ConfigParser
import types
import re
_config = ConfigParser.ConfigParser()
_config.read('testconfig.ini')
base_path = _config.get("fix", "root_path")
out_path = _config.get("fix", "out_path")
if out_path == None:
out_path = base_path
def goodPayloadSchema(ps):
isGood = True
if ps is not None and isinstance(ps, types.ListType):
for s in ps:
isGood &= len(s) > 1
return isGood
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
for fname in os.listdir(base_path):
changed = False
file_path = os.path.join(base_path, fname)
if os.path.isdir(file_path) or re.match("^\\.", fname, flags=re.IGNORECASE) is not None:
continue
with codecs.open(file_path,'r+', 'utf-8-sig') as f:
data = json.load(f)
data['doc_version'] = '0.21.0'
if data.has_key('submitter'):
changed = True
del data['submitter']
if data.has_key('submitter_type'):
changed = True
del data['submitter_type']
if data.has_key('identify'):
del data['identify']
changed = True
if not data.has_key('identity'):
changed = True
identity = {
'submitter_type':'agent',
'submitter':'Learning Registry',
'curator':'European School Net',
'owner':'European School Net'
# 'signer':''
}
data['identity'] = identity
if data.has_key('submission_TOS'):
changed = True
del data['submission_TOS']
if not data.has_key('TOS'):
changed = True
data['TOS'] = {
'submission_TOS':'http://www.learningregistry.org/tos/cc0/v0-5/'
}
if data.has_key('payload_schema') and not goodPayloadSchema(data['payload_schema']):
bad = data['payload_schema']
better = "".join(bad)
good = re.split("\\s+", better)
data['payload_schema'] = good
changed = True
if data.has_key('created_timestamp'):
changed = True
del data['created_timestamp']
if data.has_key('update_timestamp'):
changed = True
del data['update_timestamp']
if data.has_key('publishing_node'):
del data['publishing_node']
if changed == True:
data = json.dumps(data, sort_keys=True, indent=4)
mkdir_p(out_path)
out_file = os.path.join(out_path, fname)
# with open(out_file,'w' ) as f:
# f.write(data)
with codecs.open(out_file,'w', "utf-8-sig" ) as f:
# f.write(u'\ufeff')
f.write(data)
f.close()
print out_file
| StarcoderdataPython |
3341337 | from datetime import date, timedelta
from django.core.exceptions import ValidationError
from django.test import SimpleTestCase, TestCase
from people import validators
from people.constants import MAX_HUMAN_AGE
from people.factories import PersonFactory
from people.utils import get_todays_adult_dob
class ValidateFullNameTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.person = PersonFactory.build()
def test_one_name(self):
with self.assertRaisesRegex(
ValidationError, validators.INVALID_FULL_NAME_ERROR
):
validators.validate_full_name(self.person.username)
def test_one_name_with_leading_space(self):
with self.assertRaisesRegex(
ValidationError, validators.INVALID_FULL_NAME_ERROR
):
validators.validate_full_name(f" {self.person.username}")
def test_one_name_with_trailing_space(self):
with self.assertRaisesRegex(
ValidationError, validators.INVALID_FULL_NAME_ERROR
):
validators.validate_full_name(f"{self.person.username} ")
class ValidateDateOfBirthTestCase(SimpleTestCase):
def test_date_in_future(self):
with self.assertRaisesRegex(ValidationError, validators.DOB_IN_FUTURE_ERROR):
tomorrow = date.today() + timedelta(days=1)
validators.validate_date_of_birth(tomorrow)
def test_date_in_distant_past(self):
with self.assertRaisesRegex(
ValidationError, validators.DOB_IN_DISTANT_PAST_ERROR
):
days_lived = 365.25 * (MAX_HUMAN_AGE + 1)
long_ago = date.today() - timedelta(days=round(days_lived))
validators.validate_date_of_birth(long_ago)
class ValidateAdultTestCase(SimpleTestCase):
def test_child_dob(self):
error_message = f"Date of birth must be before {get_todays_adult_dob()}"
with self.assertRaisesRegex(ValidationError, error_message):
child_dob = get_todays_adult_dob() + timedelta(days=1)
validators.validate_adult(child_dob)
class ValidateChildTestCase(SimpleTestCase):
def test_child_dob(self):
error_message = f"Date of birth must be after {get_todays_adult_dob()}"
with self.assertRaisesRegex(ValidationError, error_message):
adult_dob = get_todays_adult_dob() - timedelta(days=1)
validators.validate_child(adult_dob)
class ValidatePersonUsernameTestCase(TestCase):
def test_non_existent_username(self):
username = "does-not-exist"
error_message = validators.PERSON_DOES_NOT_EXIST_ERROR % dict(username=username)
with self.assertRaisesRegex(ValidationError, error_message):
validators.validate_person_username(username)
class ValidateUniqueCaseInsensitiveUsernameTestCase(TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.person = PersonFactory()
def test_swapcase(self):
username = self.person.username.swapcase()
with self.assertRaisesRegex(
ValidationError, validators.NON_UNIQUE_USERNAME_ERROR
):
validators.validate_unique_case_insensitive_username(username)
| StarcoderdataPython |
3396539 | """Main entrypoint into the application.
----
Copyright 2019 Data Driven Empathy LLC
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import os
import flask
import pg8000
import model
import telemetry
import util
def create_app(app, records_keep, reporter):
"""Create a new exemplar exploration application.
Args:
app: The flask.Flask application into which endpoints should be registered.
records_keep: The records to be served by this application.
reporter: Optional telemetry.UsageReporter with with to report usage information. If None,
telemetry is not reported.
Return:
The flask.Flask applicatino after registering endpoints.
"""
def report_maybe(page, query):
"""Report telemetry if reporter is given.
Args:
page: String page name.
query: String query or empty string if not applicable.
"""
if reporter == None:
return
ip_address = flask.request.remote_addr
user_agent = flask.request.headers.get('User-Agent')
reporter.report_usage(ip_address, user_agent, page, query)
@app.route('/')
def home():
"""Render the homepage.
Returns:
String rendered app page.
"""
report_maybe('home', '')
return flask.render_template('app.html', page='app')
@app.route('/code')
def code():
"""Render the page about code.
Returns:
String rendered code page.
"""
report_maybe('code', '')
return flask.render_template('code.html', page='code')
@app.route('/data')
def data():
"""Render the page about data.
Returns:
String rendered data page.
"""
report_maybe('data', '')
return flask.render_template('data.html', page='data')
@app.route('/download')
def download():
"""Redirect to the download.
Returns:
Redirect to the sqlite download.
"""
report_maybe('download', '')
return flask.redirect('/static/zip/who_wrote_this_data.zip')
@app.route('/privacy')
def privacy():
"""Render the page about privacy.
Returns:
String rendered data page.
"""
report_maybe('privacy', '')
return flask.render_template('privacy.html', page='privacy')
@app.route('/terms')
def terms():
"""Render the page about terms.
Returns:
String rendered data page.
"""
report_maybe('terms', '')
return flask.render_template('terms.html', page='terms')
@app.route('/paper')
def paper():
"""Render the page about the paper.
Returns:
String rendered paper page.
"""
report_maybe('paper', '')
return flask.render_template('paper.html', page='paper')
@app.route('/prototypical.json')
def get_prototypical():
"""Query for the prototypical articles across all topics.
Returns:
JSON listing of prototypical records.
"""
report_maybe('prototypical', '')
records = records_keep.get_prototypical()
records_serial = list(sorted(
map(model.serialize_record_to_dict, records),
key=lambda x: x['source']
))
return json.dumps({'records': records_serial})
@app.route('/query.json')
def query():
"""Query for prototypical articles within a topic (using "search" url param).
Returns:
JSON listing of prototypical records for the given topic.
"""
query_string = flask.request.args.get('search')
keywords = util.get_words(query_string)
report_maybe('query', query_string)
records = records_keep.query(keywords)
records_serial = list(sorted(
map(model.serialize_record_to_dict, records),
key=lambda x: x['source']
))
return json.dumps({'records': records_serial})
return app
def create_connection_generator(db_url, username, password, db_name, db_port):
"""Create a new closure over the given parameters to generate postgres connections.
Args:
db_url: The string hostname of the database.
password: The string password of the database.
db_name: The database name.
db_port: The string or integer db port.
Returns:
Function which, taking no paramters, will return a new database connection.
"""
def connect():
"""Inner closure.
Returns:
New DB API v2 compliant connection.
"""
return pg8000.connect(
host=db_url,
user=username,
password=password,
port=int(db_port),
database=db_name,
ssl=True
)
return connect
def create_default_app():
"""Setup this application using defaults.
Returns:
The flask.Flask application.
"""
records_keep = model.load_keep_from_disk()
reporter = None
if 'TELEMETRY_DB_URL' in os.environ:
db_url = os.environ['TELEMETRY_DB_URL']
username = os.environ['TELEMETRY_DB_USERNAME']
password = <PASSWORD>['<PASSWORD>']
db_name = os.environ['TELEMETRY_DB_NAME']
db_port = os.environ['TELEMETRY_DB_PORT']
connection_generator = create_connection_generator(
db_url,
username,
password,
db_name,
db_port
)
connection = connection_generator()
connection.close()
reporter = telemetry.UsageReporter(connection_generator)
app = create_app(flask.Flask(__name__), records_keep, reporter)
return app
application = create_default_app()
if __name__ == '__main__':
application.run()
| StarcoderdataPython |
1717621 | <reponame>opennode/nodeconductor
from __future__ import unicode_literals
import logging
from smtplib import SMTPException
from celery import shared_task
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import timezone
from waldur_core.structure.models import ProjectRole
from waldur_core.users import models
logger = logging.getLogger(__name__)
@shared_task(name='waldur_core.users.cancel_expired_invitations')
def cancel_expired_invitations(invitations=None):
"""
Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired.
"""
expiration_date = timezone.now() - settings.WALDUR_CORE['INVITATION_LIFETIME']
if not invitations:
invitations = models.Invitation.objects.filter(state=models.Invitation.State.PENDING)
invitations = invitations.filter(created__lte=expiration_date)
invitations.update(state=models.Invitation.State.EXPIRED)
@shared_task(name='waldur_core.users.send_invitation')
def send_invitation(invitation_uuid, sender_name):
invitation = models.Invitation.objects.get(uuid=invitation_uuid)
if invitation.project_role is not None:
context = dict(type='project', name=invitation.project.name)
role_prefix = 'project' if invitation.project_role == ProjectRole.MANAGER else 'system'
context['role'] = '%s %s' % (role_prefix, invitation.get_project_role_display())
else:
context = dict(
type='organization',
name=invitation.customer.name,
role=invitation.get_customer_role_display()
)
context['sender'] = sender_name
context['link'] = invitation.link_template.format(uuid=invitation_uuid)
subject = render_to_string('users/invitation_subject.txt', context)
text_message = render_to_string('users/invitation_message.txt', context)
html_message = render_to_string('users/invitation_message.html', context)
logger.debug('About to send invitation to {email} to join {name} {type} as {role}'.format(
email=invitation.email, **context))
try:
send_mail(subject, text_message, settings.DEFAULT_FROM_EMAIL, [invitation.email], html_message=html_message)
except SMTPException as e:
invitation.error_message = str(e)
invitation.save(update_fields=['error_message'])
raise
| StarcoderdataPython |
3204080 | # Generated by Django 3.1.8 on 2021-05-13 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('business_register', '0096_auto_20210511_1204'),
]
operations = [
migrations.AlterField(
model_name='declaration',
name='nacp_declarant_id',
field=models.PositiveBigIntegerField(db_index=True, verbose_name='NACP id of the declarant'),
),
migrations.AlterField(
model_name='historicalpep',
name='nacp_id',
field=models.PositiveBigIntegerField(blank=True, db_index=True, help_text='id from the National agency on corruption prevention', null=True, verbose_name='id from NACP'),
),
migrations.AlterField(
model_name='pep',
name='nacp_id',
field=models.PositiveBigIntegerField(blank=True, help_text='id from the National agency on corruption prevention', null=True, unique=True, verbose_name='id from NACP'),
),
]
| StarcoderdataPython |
4835444 | # n 이 1이 될 때까지 , n - 1과 n/k 두 과정 중 하나를 반복적으로 수행한다. n/k 는 n이 k로 나누어 떨어질 때만 선택 / 1이 될때까지 최소횟수를 구하라
n,k = map(int,input().split())
result = 0
while True:
target = ( n // k ) * k
result += (n-target)
n = target
if n<k:
break
result += 1
n //= k
result += (n-1)
print(result)
| StarcoderdataPython |
3226950 | <reponame>petercunning/notebook
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # netCDF File Visualization Case Study
#
# I was asked by a colleague to visualize data contained within this [netCDF file](https://motherlode.ucar.edu/repository/entry/show/RAMADDA/Unidata/Staff/Julien+Chastang/netcdf-explore?entryid=c7239224-d3fe-45d8-b100-43ae043824c3) ([OPeNDAP link](https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das)) with Python. What follows is an exploration of how I achieved that objective. Because this exercise touches upon many technologies related to Unidata, it makes for an interesting case study. We will be meandering through,
#
# - netCDF
# - WMO GRIB metadata
# - Map projections
# - xray data analysis library
# - cartopy visualization library
# <markdowncell>
# # Crack Open the File
#
# To get our bearings let's see what there is inside our netCDF file. We will be using the [xray library](https://github.com/xray/xray) to dig inside our netCDF data. xray is similar to pandas, but for the [Common Data Model](http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM/). We could have just used the [netcdf4-python library](https://github.com/Unidata/netcdf4-python) but xray has output that is more nicely formatted. Let's first import xray and open the dataset.
# <codecell>
import xray
ds = xray.open_dataset('https://motherlode.ucar.edu/repository/opendap/41f2b38a-4e70-4135-8ff8-dbf3d1dcbfc1/entry.das',
decode_times=False)
print(ds)
# <markdowncell>
# # Dimensions, Coordinates, Data Variables
#
# As far as the dimensions and coordinates go, the most relevant and important coordinates variables are `x` and `y`. We can see the data variables, such as, temperature (`t`), mixing ratio (`mr`), and potential temperature (`th`), are mostly on a 1901 x 1801 grid. There is also the mysterious `nav` dimension and associated data variables which we will be examining later.
#
# Let's set a goal of visualizing **potential temperature** with the [Cartopy](http://scitools.org.uk/cartopy/) plotting package.
#
# The first step is to get more information concerning the variables we are interested in. For example, let's look at _potential temperature_ or `th`.
# <codecell>
print(ds['th'])
# <markdowncell>
# # potential temperature (`th`)
#
# Let's grab the data array for potential temperature (`th`).
# <codecell>
th = ds['th'].values[0][0]
print(th)
# <markdowncell>
# # To Visualize the Data, We have to Decrypt the Projection
#
# In order, to visualize the data that are contained within a two-dimensional array onto a map that represents a three-dimensional globe, we need to understand the projection of the data.
#
# We can make an educated guess these are contained in the data variables with the `nav` cooridinate variable.
#
# Specifically,
#
# - `grid_type`
# - `grid_type_code`
# - `x_dim`
# - `y_dim`
# - `Nx`
# - `Ny`
# - `La1`
# - `Lo1`
# - `LoV`
# - `Latin1`
# - `Latin2`
# - `Dx`
# - `Dy`
#
# **But what are these??**
# <headingcell level=1>
# For Grins, Let's Scrutinize the `grid_type_code`
# <codecell>
print(ds['grid_type_code'])
# <markdowncell>
# # Google to the Rescue
#
# A simple Google search of `GRIB-1 GDS data representation type` takes us to
# [A GUIDE TO THE CODE FORM FM 92-IX Ext. GRIB Edition 1 from 1994](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB") document. Therein one can find an explanation of the variables needed to understand the map projection. Let's review these variables.
# <codecell>
print(ds['grid_type_code'].values[0])
# <markdowncell>
# # What is `grid_type_code` of `5`?
#
# Let's look at [Table 6 ](http://www.wmo.int/pages/prog/www/WMOCodes/Guides/GRIB/GRIB1-Contents.html "GRIB Projection Definitions"). A `grid_type_code` of `5` corresponds to a projection of **Polar Stereographic**.
# <headingcell level=1>
# Next up `grid_type`
# <codecell>
grid_type = ds['grid_type'].values
print('The grid type is ', grid_type[0])
# <markdowncell>
# # Uh oh! Polar Stereographic or Lambert Conformal??
#
# _Note that this newest piece of information relating to a Lambert Conformal projection disagrees with the earlier projection information about a Polar Stereographic projection._ There is a **bug** in the metadata description of the projection.
# <markdowncell>
# # Moving on Anyway, next `Nx` and `Ny`
#
# According to the grib documentation `Nx` and `Ny` represent the number grid points along the x and y axes. Let's grab those.
# <codecell>
nx, ny = ds['Nx'].values[0], ds['Ny'].values[0]
print(nx, ny)
# <markdowncell>
# # `La1` and `Lo1`
#
# Next let's get `La1` and `Lo1` which are defined as the "first grid points" These are probably the latitude and longitude for one of the corners of the grid.
# <codecell>
la1, lo1 = ds['La1'].values[0], ds['Lo1'].values[0]
print(la1, lo1)
# <markdowncell>
# # `Latin1` and `Latin2`
#
# Next up are the rather mysteriously named `Latin1` and `Latin2` variables. When I first saw these identifiers, I thought they referred to a Unicode block, but in fact they relate to the secants of the projection cone. I do not know why they are called "Latin" and this name is confusing. **At any rate, we can feel comfortable that we are dealing with Lambert Conformal rather than Polar Stereographic.**
#
# 
#
# Credit: http://www.geo.hunter.cuny.edu/~jochen
# <codecell>
latin1, latin2 = ds['Latin1'].values[0], ds['Latin2'].values[0]
print(latin1, latin2)
# <markdowncell>
# # The Central Meridian for the Lambert Conformal Projection, `LoV`
#
# If we are defining a Lambert Conformal projection, we will require the central meridian that the GRIB documentation refers to as `LoV`.
# <codecell>
lov = ds['LoV'].values[0]
print(lov)
# <markdowncell>
# # `Dx` and `Dy`
#
# Finally, let's look at the grid increments. In particular, we need to find the units.
# <codecell>
print(ds['Dx'])
print(ds['Dy'])
# <markdowncell>
# # Units for `Dx` and `Dy`
#
# The units for the deltas are in meters.
# <codecell>
dx,dy = ds['Dx'].values[0],ds['Dy'].values[0]
print(dx,dy)
# <markdowncell>
# # Let's Review What We Have
#
# We now have all the information we need to understand the Lambert projection:
#
# - The secants of the Lambert Conformal projection (`Latin1`, `Latin2`)
# - The central meridian of the projection (`LoV`)
#
# Moreover, we have additional information that shows how the data grid relates to the projection:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
# <markdowncell>
# # We are Ready for Visualization (almost)!
#
# Let's import **cartopy** and **matplotlib**.
# <codecell>
%matplotlib inline
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl
# <headingcell level=1>
# Define the Lambert Conformal Projection with Cartopy
# <codecell>
proj = ccrs.LambertConformal(central_longitude=lov,standard_parallels=(latin1,latin2))
# <markdowncell>
# # Lambert Conformal Grid Extents
#
# - To plot the data we need the `left`,`right`,`bottom`,`top` extents of the grid **expressed in Lambert Conformal
# coordinates**.
# - __**Key point**: The projection coordinate systems have flat topology and Euclidean distance.__
# <markdowncell>
# # Calculating the Extents
#
# Remember, we have:
#
# - The number of grid points in x and y (`Nx`, `Ny`)
# - The delta in meters between grid point (`Dx`, `Dy`)
# - The first latitude and longitude of the data (`first latitude`, `first longitude`).
#
# We have one of the corners in latitude and longitude, but we need to convert it LC coordinates and derive the other corner.
# <markdowncell>
# # Platte Carrée Projection
#
# The Platte Carrée Projection is a very simple X,Y/Cartesian projection. It is used a lot in Cartopy because it allows you to express coordinates in familiar Latitude and Longitudes. **Remember**: The projection coordinate systems have flat topology and Euclidean distance.
# <markdowncell>
# # Platte Carrée
#
# 
#
# Source: [Wikipedia Source](https://en.wikipedia.org/wiki/Equirectangular_projection)
# <headingcell level=1>
# Create the PlatteCarre Cartopy Projection
# <codecell>
pc = ccrs.PlateCarree()
# <markdowncell>
# # Convert Corner from Lat/Lon PlatteCarre to LC
#
# The `transform_point` method translates coordinates from one projection coordinate system to the other.
# <codecell>
left,bottom = proj.transform_point(lo1,la1,pc)
print(left,bottom)
# <markdowncell>
# # Derive Opposite Corner
#
# Derive the opposite corner from the number of points and the delta. **Again**, we can do this because the projection coordinate systems have flat topology and Euclidean distance.
# <codecell>
right,top = left + nx*dx,bottom + ny*dy
print(right,top)
# <markdowncell>
# # Plot It Up!
#
# We now have the extents, we are ready to plot.
# <codecell>
#Define the figure
fig = plt.figure(figsize=(12, 12))
# Define the extents and add the data
ax = plt.axes(projection=proj)
extents = (left, right, bottom, top)
ax.contourf(th, origin='lower', extent=extents, transform=proj)
# Add bells and whistles
ax.coastlines(resolution='50m', color='black', linewidth=2)
ax.add_feature(ccrs.cartopy.feature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lines', scale='50m',facecolor='none'))
ax.add_feature(ccrs.cartopy.feature.BORDERS, linewidth='1', edgecolor='black')
ax.gridlines()
plt.show()
# <markdowncell>
# # Exercises for the Reader
#
# - The extents are actually not perfect and snip the image. Why? Fix.
# - Add a colorbar and jazz up the plot.
# - Trick question: Can you label the axes with latitude and longitudes?
# - Try a different projection which should be fairly easy with Cartopy.
# <codecell>
th.shape
# <codecell>
th[0,0]
# <codecell>
| StarcoderdataPython |
4841173 | <filename>python/cugraph/dask/pagerank/__init__.py
from .pagerank import pagerank, get_chunksize
| StarcoderdataPython |
3236041 | <gh_stars>0
from baseplugin import BasePlugin, PluginPlotItem, SegmentConfig, PluginConfig, PlotConfig
from implotwidget import ImplotWidget
from implotwidget import ImplotWidgetCont
from lineplotwidget import LineplotWidget
from phdplotwidget import PHDPlotWidgetCont
import numpy as np
import time
IM_BITS = 10
PHD_BITS = 8
PAK_SIZE = 244
# An example of a simple plugin
class SkeletonPlugin(BasePlugin):
def __init__(self):
BasePlugin.__init__(self)
# New config: 10 bit x and y (1024x1024).
# 8 bit pulse height.
# One segment
# Overwrite config
self._config = PluginConfig()
# Segment configurations
self._config.segment_configs = [SegmentConfig(xbit = IM_BITS, ybit = IM_BITS, pbit = PHD_BITS, segment = 0)]
# Plot configurations
pc = PlotConfig(xbit = IM_BITS, ybit = IM_BITS, pbit = PHD_BITS, segment = 0)
print(pc.xbit)
self._config.plots = [PluginPlotItem(plot_config = pc, name = ImplotWidgetCont, row = 0, column = 0,
row_span = 1, column_span = 1, segment = 0),
PluginPlotItem(plot_config = pc, name = PHDPlotWidgetCont, row = 0, column = 1,
row_span = 1, column_span = 1, segment = 0),
PluginPlotItem(plot_config = pc, name = LineplotWidget, row = 1, column = 0,
row_span = 1, column_span = 2, segment = 0)]
# self._config.segments = 1
# self._config.xbits = [IM_BITS]
# self._config.ybits = [IM_BITS]
# self._config.pbits = [PHD_BITS]
# self._config.plots = [PluginPlotItem(name = ImplotWidget, row = 0, column = 0,
# row_span = 1, column_span = 1, segment = 0,
# xbit = 10. ybit = 10, pbit = 8)]
# For random number generation...
self._cen = (2**IM_BITS)//2
self._pcen = (2**PHD_BITS)//2
print("Skeleton plugin loaded...")
# Override _run function...
def _run(self):
while(True):
# This portion allows pausing and closing of the thread
if (self._lock.is_set()):
# Search for quit flag
if (self._flag):
# End the thread...
return
# while paused do small sleep to keep CPU usage lower
time.sleep(0.01)
continue
# Generate random gaussian data for x, y, p
x = np.random.normal(self._cen, 40, 244)
x = x.astype(np.uint16)
x[(x > 1023)] = 1023
y = np.random.normal(self._cen, 40, 244)
y = y.astype(np.uint16)
y[(y > 1023)] = 1023
p = np.random.normal(self._pcen, 40, 244)
p = p.astype(np.uint8)
#p[(p > 255)] = 255
#s = np.zeros(244, dtype = np.uint8)
s = 0
# Populate data object
self._data.x[:] = x[:]
self._data.y[:] = y[:]
self._data.p[:] = p[:]
self._data.segment = s
self._data.len = 244
# Queue up the data...
self._q.put(self._data)
time.sleep(0.01)
def load_plugin():
p = SkeletonPlugin()
return(p)
| StarcoderdataPython |
80864 | <filename>apptools/logger/agent/quality_agent_mailer.py
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
# Standard library imports.
import logging
import os
# Enthought library imports.
from traits.util.home_directory import get_home_directory
# Setup a logger for this module.
logger = logging.getLogger(__name__)
def create_email_message(
fromaddr,
toaddrs,
ccaddrs,
subject,
priority,
include_project=False,
stack_trace="",
comments="",
):
# format a message suitable to be sent to the Roundup bug tracker
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
message = MIMEMultipart()
message["Subject"] = "%s [priority=%s]" % (subject, priority)
message["To"] = ", ".join(toaddrs)
message["Cc"] = ", ".join(ccaddrs)
message["From"] = fromaddr
message.preamble = "You will not see this in a MIME-aware mail reader.\n"
message.epilogue = " " # To guarantee the message ends with a newline
# First section is simple ASCII data ...
m = []
m.append("Bug Report")
m.append("==============================")
m.append("")
if len(comments) > 0:
m.append("Comments:")
m.append("========")
m.append(comments)
m.append("")
if len(stack_trace) > 0:
m.append("Stack Trace:")
m.append("===========")
m.append(stack_trace)
m.append("")
msg = MIMEText("\n".join(m))
message.attach(msg)
# Include the log file ...
if True:
try:
log = os.path.join(get_home_directory(), "envisage.log")
f = open(log, "r")
entries = f.readlines()
f.close()
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
msg = MIMEBase(maintype, subtype)
msg = MIMEText("".join(entries))
msg.add_header(
"Content-Disposition", "attachment", filename="logfile.txt"
)
message.attach(msg)
except Exception:
logger.exception("Failed to include log file with message")
# Include the environment variables ...
if True:
"""
Transmit the user's environment settings as well. Main purpose is to
work out the user name to help with following up on bug reports and
in future we should probably send less data.
"""
try:
entries = []
for key, value in os.environ.items():
entries.append("%30s : %s\n" % (key, value))
ctype = "application/octet-stream"
maintype, subtype = ctype.split("/", 1)
msg = MIMEBase(maintype, subtype)
msg = MIMEText("".join(entries))
msg.add_header(
"Content-Disposition", "attachment", filename="environment.txt"
)
message.attach(msg)
except Exception:
logger.exception(
"Failed to include environment variables with message"
)
return message
| StarcoderdataPython |
1748771 | <gh_stars>10-100
from server.extensions import db
from sqlalchemy import Numeric
from sqlalchemy.dialects.postgresql import JSONB, UUID
from server.models.mixin import TimestampMixin
from sqlalchemy import (
UniqueConstraint,
)
class Portfolio(db.Model, TimestampMixin):
__tablename__ = "portfolio"
id = db.Column(db.Integer(), db.Sequence("portfolio_id_seq"), primary_key=True)
name = db.Column(db.String(50), nullable=False)
info = db.Column(db.Text())
user_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
)
stocks = db.relationship("Stock", secondary="portfolio_stocks", backref="portfolio")
holdings = db.relationship(
"Holding", cascade="all,delete", backref="portfolio", uselist=True
)
__table_args__ = (
UniqueConstraint("name", "user_id", name="uq_portfolio_name_user_id"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def json_short(self):
return {
"id": self.id,
"name": self.name,
"holdings": [holding.json for holding in self.holdings],
"stocks": [stock.json_short for stock in self.stocks],
}
@property
def json(self):
return {
"id": self.id,
"name": self.name,
"holdings": [holding.json for holding in self.holdings],
"stocks": [stock.json for stock in self.stocks],
}
class Stock(db.Model, TimestampMixin):
__tablename__ = "stocks"
id = db.Column(db.Integer(), db.Sequence("stocks_id_seq"), primary_key=True)
ticker = db.Column(db.String(15))
short_name = db.Column(db.String(255))
latest_market_data = db.Column(JSONB)
company_info = db.Column(JSONB)
__table_args__ = (UniqueConstraint("ticker", name="uq_stocks_ticker"),)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def json(self):
return {
"id": self.id,
"ticker": self.ticker,
"short_name": self.short_name,
"company_info": self.company_info,
"latest_market_data": self.latest_market_data,
}
@property
def json_short(self):
return {
"id": self.id,
"ticker": self.ticker,
"short_name": self.short_name,
"latest_market_data": self.latest_market_data,
}
class Holding(
db.Model, TimestampMixin
): # all user holdings (which portfolio, which stock, at what price)
__tablename__ = "holdings"
id = db.Column(db.Integer(), db.Sequence("holdings_id_seq"), primary_key=True)
user_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
)
portfolio_id = db.Column(
db.Integer(), db.ForeignKey("portfolio.id", ondelete="CASCADE"), nullable=False
)
stock_id = db.Column(
db.Integer(), db.ForeignKey("stocks.id", ondelete="CASCADE"), nullable=False
)
shares = db.Column(Numeric(asdecimal=False), nullable=False)
price = db.Column(Numeric(asdecimal=False), nullable=False)
purchased_at = db.Column(db.DateTime(), nullable=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def json(self):
return {
"id": self.id,
"user_id": self.user_id,
"portfolio_id": self.portfolio_id,
"stock_id": self.stock_id,
"price": self.price,
"purchased_at": self.purchased_at,
"shares": self.shares,
}
class PortfolioStocks(db.Model):
__tablename__ = "portfolio_stocks"
portfolio_id = db.Column(
db.Integer(),
db.ForeignKey("portfolio.id", ondelete="CASCADE"),
primary_key=True,
)
stock_id = db.Column(
db.Integer(), db.ForeignKey("stocks.id", ondelete="CASCADE"), primary_key=True
)
| StarcoderdataPython |
4811908 | <reponame>c11/earthengine-py-notebooks
import ee
from ee_plugin import Map
# Map an expression over a collection.
#
# Computes the mean NDVI and SAVI by mapping an expression over a collection
# and taking the mean. This intentionally exercises both variants of
# Image.expression.
# Filter the L7 collection to a single month.
collection = ee.ImageCollection('LANDSAT/LE07/C01/T1_TOA') \
.filterDate('2002-11-01', '2002-12-01')
# A function to compute NDVI.
def NDVI(image):
return image.expression('float(b("B4") - b("B3")) / (b("B4") + b("B3"))')
# A function to compute Soil Adjusted Vegetation Index.
def SAVI(image):
return image.expression(
'(1 + L) * float(nir - red)/ (nir + red + L)',
{
'nir': image.select('B4'),
'red': image.select('B3'),
'L': 0.2
})
# Shared visualization parameters.
vis = {
'min': 0,
'max': 1,
'palette': [
'FFFFFF', 'CE7E45', 'DF923D', 'F1B555', 'FCD163', '99B718',
'74A901', '66A000', '529400', '3E8601', '207401', '056201',
'004C00', '023B01', '012E01', '011D01', '011301'
]
}
Map.setCenter(-93.7848, 30.3252, 11)
# Map the functions over the collection, reduce to mean and display.
Map.addLayer(collection.map(NDVI).mean(), vis, 'Mean NDVI')
Map.addLayer(collection.map(SAVI).mean(), vis, 'Mean SAVI')
| StarcoderdataPython |
88249 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .microsoft_ciqs_models_gallery_person import MicrosoftCiqsModelsGalleryPerson
from .microsoft_ciqs_models_gallery_resource_provider import MicrosoftCiqsModelsGalleryResourceProvider
from .microsoft_ciqs_models_gallery_guide import MicrosoftCiqsModelsGalleryGuide
from .microsoft_ciqs_models_gallery_try_it_now import MicrosoftCiqsModelsGalleryTryItNow
from .microsoft_ciqs_models_gallery_content import MicrosoftCiqsModelsGalleryContent
from .microsoft_ciqs_models_gallery_estimated_cost import MicrosoftCiqsModelsGalleryEstimatedCost
from .microsoft_ciqs_models_gallery_link import MicrosoftCiqsModelsGalleryLink
from .microsoft_ciqs_models_gallery_regex_entry import MicrosoftCiqsModelsGalleryRegexEntry
from .microsoft_ciqs_models_error_info import MicrosoftCiqsModelsErrorInfo
from .microsoft_ciqs_models_gallery_parameter import MicrosoftCiqsModelsGalleryParameter
from .microsoft_ciqs_models_provisioning_steps_provisioning_step import MicrosoftCiqsModelsProvisioningStepsProvisioningStep
from .microsoft_ciqs_models_provisioning_steps_azure_resource_group import MicrosoftCiqsModelsProvisioningStepsAzureResourceGroup
from .microsoft_ciqs_models_provisioning_steps_azure_resource_list import MicrosoftCiqsModelsProvisioningStepsAzureResourceList
from .microsoft_ciqs_models_provisioning_steps_aad_application import MicrosoftCiqsModelsProvisioningStepsAadApplication
from .microsoft_ciqs_models_provisioning_steps_delete_azure_entities import MicrosoftCiqsModelsProvisioningStepsDeleteAzureEntities
from .microsoft_ciqs_models_gallery_summary import MicrosoftCiqsModelsGallerySummary
from .microsoft_ciqs_models_gallery_extended_property import MicrosoftCiqsModelsGalleryExtendedProperty
from .microsoft_ciqs_models_gallery_template import MicrosoftCiqsModelsGalleryTemplate
from .microsoft_ciqs_models_email_parameter import MicrosoftCiqsModelsEmailParameter
from .microsoft_ciqs_models_email_template import MicrosoftCiqsModelsEmailTemplate
from .microsoft_ciqs_models_gallery_offer_plan import MicrosoftCiqsModelsGalleryOfferPlan
from .microsoft_ciqs_models_deployment_deployment import MicrosoftCiqsModelsDeploymentDeployment
from .microsoft_ciqs_models_gallery_function_definition import MicrosoftCiqsModelsGalleryFunctionDefinition
from .microsoft_ciqs_models_deployment_create_deployment_request import MicrosoftCiqsModelsDeploymentCreateDeploymentRequest
from .microsoft_ciqs_models_deployment_provisioning_log import MicrosoftCiqsModelsDeploymentProvisioningLog
from .microsoft_ciqs_models_deployment_deployment_provisioning_step import MicrosoftCiqsModelsDeploymentDeploymentProvisioningStep
from .microsoft_ciqs_models_deployment_deployment_details import MicrosoftCiqsModelsDeploymentDeploymentDetails
from .system_nullable_system_net_http_status_code import SystemNullableSystemNetHttpStatusCode
from .microsoft_ciqs_models_deployment_deployment_provisioning_step_deployment_provisioning_step_hidden_properties import MicrosoftCiqsModelsDeploymentDeploymentProvisioningStepDeploymentProvisioningStepHiddenProperties
from .microsoft_ciqs_api_models_execute_provisioning_step_response import MicrosoftCiqsApiModelsExecuteProvisioningStepResponse
from .microsoft_ciqs_api_models_delete_deployment_result import MicrosoftCiqsApiModelsDeleteDeploymentResult
__all__ = [
'MicrosoftCiqsModelsGalleryPerson',
'MicrosoftCiqsModelsGalleryResourceProvider',
'MicrosoftCiqsModelsGalleryGuide',
'MicrosoftCiqsModelsGalleryTryItNow',
'MicrosoftCiqsModelsGalleryContent',
'MicrosoftCiqsModelsGalleryEstimatedCost',
'MicrosoftCiqsModelsGalleryLink',
'MicrosoftCiqsModelsGalleryRegexEntry',
'MicrosoftCiqsModelsErrorInfo',
'MicrosoftCiqsModelsGalleryParameter',
'MicrosoftCiqsModelsProvisioningStepsProvisioningStep',
'MicrosoftCiqsModelsProvisioningStepsAzureResourceGroup',
'MicrosoftCiqsModelsProvisioningStepsAzureResourceList',
'MicrosoftCiqsModelsProvisioningStepsAadApplication',
'MicrosoftCiqsModelsProvisioningStepsDeleteAzureEntities',
'MicrosoftCiqsModelsGallerySummary',
'MicrosoftCiqsModelsGalleryExtendedProperty',
'MicrosoftCiqsModelsGalleryTemplate',
'MicrosoftCiqsModelsEmailParameter',
'MicrosoftCiqsModelsEmailTemplate',
'MicrosoftCiqsModelsGalleryOfferPlan',
'MicrosoftCiqsModelsDeploymentDeployment',
'MicrosoftCiqsModelsGalleryFunctionDefinition',
'MicrosoftCiqsModelsDeploymentCreateDeploymentRequest',
'MicrosoftCiqsModelsDeploymentProvisioningLog',
'MicrosoftCiqsModelsDeploymentDeploymentProvisioningStep',
'MicrosoftCiqsModelsDeploymentDeploymentDetails',
'SystemNullableSystemNetHttpStatusCode',
'MicrosoftCiqsModelsDeploymentDeploymentProvisioningStepDeploymentProvisioningStepHiddenProperties',
'MicrosoftCiqsApiModelsExecuteProvisioningStepResponse',
'MicrosoftCiqsApiModelsDeleteDeploymentResult',
]
| StarcoderdataPython |
1780605 | # Module
import sys
import time
from threading import Thread
from . import updater
from .modules import audio_module, time_module, dropbox_module, wlan_module, internet_module,\
battery_module, load_module
def run():
sys.stdout.write('{"version":1}\n')
sys.stdout.write('[\n')
sys.stdout.write('[]\n')
MODULES = [dropbox_module, audio_module, wlan_module,
# internet_module,
battery_module, load_module, time_module]
MODULE_NAMES = {module:'Module of ' + str(module) for module in MODULES}
UPDATER = updater.Updater(MODULE_NAMES.values())
for module in MODULES:
thread = Thread(name=MODULE_NAMES[module], target=lambda: module.run(UPDATER.updater_hook(MODULE_NAMES[module])), daemon=True)
thread.start()
while True:
time.sleep(1)
| StarcoderdataPython |
67264 | <filename>src/pymor/discretizers/builtin/grids/tria.py
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.core.cache import cached
from pymor.discretizers.builtin.grids.interfaces import AffineGridWithOrthogonalCenters
from pymor.discretizers.builtin.grids.referenceelements import triangle
class TriaGrid(AffineGridWithOrthogonalCenters):
r"""Basic implementation of a triangular grid on a rectangular domain.
The global face, edge and vertex indices are given as follows ::
6---------10----------7---------11----------8
| \ / | \ / |
| 22 10 18 | 23 11 19 |
| \ / | \ / |
3 14 11 6 4 15 12 7 5
| / \ | / \ |
| 14 2 26 | 15 3 27 |
| / \ | / \ |
3----------8----------4----------9----------5
| \ / | \ / |
| 20 8 16 | 21 9 17 |
| \ / | \ / |
0 12 9 4 1 13 10 5 2
| / \ | / \ |
| 12 0 24 | 13 1 25 |
| / \ | / \ |
0----------6----------1----------7----------2
Parameters
----------
num_intervals
Tuple `(n0, n1)` determining a grid with `n0` x `n1` codim-0 entities.
domain
Tuple `(ll, ur)` where `ll` defines the lower left and `ur` the upper right
corner of the domain.
identify_left_right
If `True`, the left and right boundaries are identified, i.e. the left-most
codim-0 entities become neighbors of the right-most codim-0 entities.
identify_bottom_top
If `True`, the bottom and top boundaries are identified, i.e. the bottom-most
codim-0 entities become neighbors of the top-most codim-0 entities.
"""
dim = 2
reference_element = triangle
def __init__(self, num_intervals=(2, 2), domain=([0, 0], [1, 1]),
identify_left_right=False, identify_bottom_top=False):
if identify_left_right:
assert num_intervals[0] > 1
if identify_bottom_top:
assert num_intervals[1] > 1
domain = np.array(domain)
self.__auto_init(locals())
self.x0_num_intervals = x0_num_intervals = num_intervals[0]
self.x1_num_intervals = x1_num_intervals = num_intervals[1]
self.x0_range = self.domain[:, 0]
self.x1_range = self.domain[:, 1]
self.x0_width = self.x0_range[1] - self.x0_range[0]
self.x1_width = self.x1_range[1] - self.x1_range[0]
self.x0_diameter = self.x0_width / x0_num_intervals
self.x1_diameter = self.x1_width / x1_num_intervals
n_elements = x0_num_intervals * x1_num_intervals * 4
# TOPOLOGY
n_outer_vertices = (x0_num_intervals + 1 - identify_left_right) * (x1_num_intervals + 1 - identify_bottom_top)
self.__sizes = (n_elements,
((x0_num_intervals + 1 - identify_left_right) * x1_num_intervals
+ (x1_num_intervals + 1 - identify_bottom_top) * x0_num_intervals
+ n_elements),
n_outer_vertices + int(n_elements / 4))
# calculate subentities -- codim-1
V_EDGE_H_INDICES = np.arange(x0_num_intervals + 1, dtype=np.int32)
if identify_left_right:
V_EDGE_H_INDICES[-1] = 0
V_EDGE_V_INDICES = np.arange(x1_num_intervals, dtype=np.int32) * (x0_num_intervals + 1 - identify_left_right)
V_EDGE_INDICES = V_EDGE_V_INDICES[:, np.newaxis] + V_EDGE_H_INDICES
num_v_edges = x1_num_intervals * (x0_num_intervals + 1 - identify_left_right)
H_EDGE_H_INDICES = np.arange(x0_num_intervals, dtype=np.int32)
H_EDGE_V_INDICES = np.arange(x1_num_intervals + 1, dtype=np.int32)
if identify_bottom_top:
H_EDGE_V_INDICES[-1] = 0
H_EDGE_V_INDICES *= x0_num_intervals
H_EDGE_INDICES = H_EDGE_V_INDICES[:, np.newaxis] + H_EDGE_H_INDICES + num_v_edges
num_h_edges = x0_num_intervals * (x1_num_intervals + 1 - identify_bottom_top)
D_EDGE_LL_INDICES = np.arange(n_elements / 4, dtype=np.int32) + (num_v_edges + num_h_edges)
D_EDGE_UR_INDICES = D_EDGE_LL_INDICES + int(n_elements / 4)
D_EDGE_UL_INDICES = D_EDGE_UR_INDICES + int(n_elements / 4)
D_EDGE_LR_INDICES = D_EDGE_UL_INDICES + int(n_elements / 4)
E0 = np.array([H_EDGE_INDICES[:-1, :].ravel(),
D_EDGE_LR_INDICES,
D_EDGE_LL_INDICES]).T
E1 = np.array([V_EDGE_INDICES[:, 1:].ravel(),
D_EDGE_UR_INDICES,
D_EDGE_LR_INDICES]).T
E2 = np.array([H_EDGE_INDICES[1:, :].ravel(),
D_EDGE_UL_INDICES,
D_EDGE_UR_INDICES]).T
E3 = np.array([V_EDGE_INDICES[:, :-1].ravel(),
D_EDGE_LL_INDICES,
D_EDGE_UL_INDICES]).T
codim1_subentities = np.vstack((E0, E1, E2, E3))
# calculate subentities -- codim-2
VERTEX_H_INDICES = np.arange(x0_num_intervals + 1, dtype=np.int32)
if identify_left_right:
VERTEX_H_INDICES[-1] = 0
VERTEX_V_INDICES = np.arange(x1_num_intervals + 1, dtype=np.int32)
if identify_bottom_top:
VERTEX_V_INDICES[-1] = 0
VERTEX_V_INDICES *= x0_num_intervals + 1 - identify_left_right
VERTEX_NUMERS = VERTEX_V_INDICES[:, np.newaxis] + VERTEX_H_INDICES
VERTEX_CENTER_NUMBERS = np.arange(x0_num_intervals * x1_num_intervals, dtype=np.int32) + n_outer_vertices
V0 = np.array([VERTEX_CENTER_NUMBERS,
VERTEX_NUMERS[:-1, :-1].ravel(),
VERTEX_NUMERS[:-1, 1:].ravel()]).T
V1 = np.array([VERTEX_CENTER_NUMBERS,
VERTEX_NUMERS[:-1, 1:].ravel(),
VERTEX_NUMERS[1:, 1:].ravel()]).T
V2 = np.array([VERTEX_CENTER_NUMBERS,
VERTEX_NUMERS[1:, 1:].ravel(),
VERTEX_NUMERS[1:, :-1].ravel()]).T
V3 = np.array([VERTEX_CENTER_NUMBERS,
VERTEX_NUMERS[1:, :-1].ravel(),
VERTEX_NUMERS[:-1, :-1].ravel()]).T
codim2_subentities = np.vstack((V0, V1, V2, V3))
self.__subentities = (codim1_subentities, codim2_subentities)
# GEOMETRY
# embeddings
x0_shifts = np.arange(x0_num_intervals) * self.x0_diameter + (self.x0_range[0] + 0.5 * self.x0_diameter)
x1_shifts = np.arange(x1_num_intervals) * self.x1_diameter + (self.x1_range[0] + 0.5 * self.x1_diameter)
B = np.tile(np.array(np.meshgrid(x0_shifts, x1_shifts)).reshape((2, -1)).T,
(4, 1))
ROT45 = np.array([[1./np.sqrt(2.), -1./np.sqrt(2.)],
[1./np.sqrt(2.), 1./np.sqrt(2.)]])
ROT135 = np.array([[-1./np.sqrt(2.), -1./np.sqrt(2.)],
[1./np.sqrt(2.), -1./np.sqrt(2.)]])
ROT225 = np.array([[-1./np.sqrt(2.), 1./np.sqrt(2.)],
[-1./np.sqrt(2.), -1./np.sqrt(2.)]])
ROT315 = np.array([[1./np.sqrt(2.), 1./np.sqrt(2.)],
[-1./np.sqrt(2.), 1./np.sqrt(2.)]])
SCAL = np.diag([self.x0_diameter / np.sqrt(2), self.x1_diameter / np.sqrt(2)])
A0 = np.tile(SCAL.dot(ROT225), (int(n_elements / 4), 1, 1))
A1 = np.tile(SCAL.dot(ROT315), (int(n_elements / 4), 1, 1))
A2 = np.tile(SCAL.dot(ROT45), (int(n_elements / 4), 1, 1))
A3 = np.tile(SCAL.dot(ROT135), (int(n_elements / 4), 1, 1))
A = np.vstack((A0, A1, A2, A3))
self.__embeddings = (A, B)
def __reduce__(self):
return (TriaGrid,
(self.num_intervals, self.domain, self.identify_left_right, self.identify_bottom_top))
def __str__(self):
return (f'Tria-Grid on domain '
f'[{self.x0_range[0]},{self.x0_range[1]}] x [{self.x1_range[0]},{self.x1_range[1]}]\n'
f'x0-intervals: {self.x0_num_intervals}, x1-intervals: {self.x1_num_intervals}\n'
f'elements: {self.size(0)}, edges: {self.size(1)}, vertices: {self.size(2)}')
def size(self, codim=0):
assert 0 <= codim <= 2, 'Invalid codimension'
return self.__sizes[codim]
def subentities(self, codim, subentity_codim):
assert 0 <= codim <= 2, 'Invalid codimension'
assert codim <= subentity_codim <= 2, 'Invalid subentity codimension'
if codim == 0:
if subentity_codim == 0:
return np.arange(self.size(0), dtype='int32')[:, np.newaxis]
else:
return self.__subentities[subentity_codim - 1]
else:
return super().subentities(codim, subentity_codim)
def embeddings(self, codim=0):
if codim == 0:
return self.__embeddings
else:
return super().embeddings(codim)
def bounding_box(self):
return np.array(self.domain)
@cached
def orthogonal_centers(self):
embeddings = self.embeddings(0)
ne4 = len(embeddings[0]) // 4
if self.x0_diameter > self.x1_diameter:
x0_fac = (self.x1_diameter / 2) ** 2 / (3 * (self.x0_diameter / 2) ** 2)
x1_fac = 1./3.
else:
x1_fac = (self.x0_diameter / 2) ** 2 / (3 * (self.x1_diameter / 2) ** 2)
x0_fac = 1./3.
C0 = embeddings[0][:ne4].dot(np.array([x1_fac, x1_fac])) + embeddings[1][:ne4]
C1 = embeddings[0][ne4:2*ne4].dot(np.array([x0_fac, x0_fac])) + embeddings[1][ne4:2*ne4]
C2 = embeddings[0][2*ne4:3*ne4].dot(np.array([x1_fac, x1_fac])) + embeddings[1][2*ne4:3*ne4]
C3 = embeddings[0][3*ne4:4*ne4].dot(np.array([x0_fac, x0_fac])) + embeddings[1][3*ne4:4*ne4]
return np.concatenate((C0, C1, C2, C3), axis=0)
def visualize(self, U, codim=2, **kwargs):
"""Visualize scalar data associated to the grid as a patch plot.
Parameters
----------
U
|NumPy array| of the data to visualize. If `U.dim == 2 and len(U) > 1`, the
data is visualized as a time series of plots. Alternatively, a tuple of
|Numpy arrays| can be provided, in which case a subplot is created for
each entry of the tuple. The lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
kwargs
See :func:`~pymor.discretizers.builtin.gui.qt.visualize_patch`
"""
from pymor.discretizers.builtin.gui.qt import visualize_patch
from pymor.vectorarrays.interface import VectorArray
from pymor.vectorarrays.numpy import NumpyVectorSpace, NumpyVectorArray
if isinstance(U, (np.ndarray, VectorArray)):
U = (U,)
assert all(isinstance(u, (np.ndarray, VectorArray)) for u in U)
U = tuple(NumpyVectorSpace.make_array(u) if isinstance(u, np.ndarray) else
u if isinstance(u, NumpyVectorArray) else
NumpyVectorSpace.make_array(u.to_numpy())
for u in U)
bounding_box = kwargs.pop('bounding_box', self.domain)
visualize_patch(self, U, codim=codim, bounding_box=bounding_box, **kwargs)
| StarcoderdataPython |
3399288 | <reponame>earrighi/Pangolab-1
class Ls350(object):
def __init__(self,inst):
self.inst = inst
def data_info(self,devicenumber):
self.data_keys = [str(devicenumber) + '_LS350_A',str(devicenumber) + '_LS350_B',str(devicenumber) + '_LS350_C',str(devicenumber) + '_LS350_D']
return(self.data_keys)
def read_info(self,devicenumber):
self.read_keys = [str(devicenumber) + '_LS350_A',str(devicenumber) + '_LS350_B',str(devicenumber) + '_LS350_C',str(devicenumber) + '_LS350_D']
return(self.read_keys)
def write_info(self,devicenumber):
self.write_keys = [str(devicenumber) + '_LS350_A',str(devicenumber) + '_LS350_C',str(devicenumber) + '_LS350_range']
return(self.write_keys)
def write_pattern(self,devicenumber,write_key):
if write_key == self.write_keys[0]:
return([('Setpoint T(K)','text',None)])
elif write_key == self.write_keys[1]:
return([('Setpoint T(K)','text',None)])
elif write_key == self.write_keys[2]:
return([('Output','choice',['A','C']),('Range','choice',['Off','1','2','3','4','5'])])
def floatHandling(self,text):
try:
f = str(float(text))
except:
try:
f = str(float(text.replace(',','.')))
except:
f = False
return(f)
def Write(self,Key,L):
# print(Key)
# print(L)
if Key == self.write_keys[0]:
T = self.floatHandling(L[0])
if type(T) == type(''): #checking if it is a string?
self.inst.write('SETP ' + '1' + ',' + T)#default control loop is 1 for input A
#self.setp(Key,L)
elif Key == self.write_keys[1]:
T = self.floatHandling(L[0])
if type(T) == type(''):
self.inst.write('SETP ' + '2' + ',' + T)#default control loop is 2 for input B
elif Key == self.write_keys[2]:
rDict = {'Off':0,'1':1,'2':2,'3':3,'4':4,'5':5}
rDict2 = {'A':1,'C':2}
if L[0] in rDict2 and L[1] in rDict:
self.inst.write('RANGE ' + str(rDict2[L[0]])+','+str(rDict[L[1]]))
return()
def Read(self,Key):
ABinput = Key[-1]#A,B,C or D
T = float(self.inst.query('KRDG? '+ABinput))
return([(Key,T),])
| StarcoderdataPython |
3315290 | """
File Name: hello_world.py
Tells you what to eat
Usage Examples:
- "What type of food should I eat tonight"
"""
from celestai.classes.module import Module
from celestai.classes.task import ActiveTask
class SpeakPhrase(ActiveTask):
def __init__(self):
# Matches any statement with these words
super(SpeakPhrase, self).__init__(words=['eat', 'food', 'type'])
def action(self, text):
self.speak('celest')
# This is a bare-minimum module
class HelloWorld(Module):
def __init__(self):
tasks = [SpeakPhrase()]
super(HelloWorld, self).__init__('hello_world', tasks, priority=2)
| StarcoderdataPython |
1608853 | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name='x', parent_name='streamtube.starts', **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='data',
**kwargs
)
| StarcoderdataPython |
1704585 | <reponame>aswinchari/VAST<filename>scripts/generate_random_clusters.py
import vast.io_mesh as io
import vast.surface_tools as st
import numpy as np
import nibabel as nb
import csv
import argparse
import os
flatten = lambda l: [item for sublist in l for item in sublist]
#load in data
def main(args)
neighbours=st.get_neighbours(args.combined_surface)
lh_cluster=nb.load(args.left_clusters)
lh_cluster=np.asanyarray(lh_cluster.dataobj).ravel()
rh_cluster=nb.load(args.right_clusters)
rh_cluster=np.asanyarray(rh_cluster.dataobj).ravel()
lh_cortex=nb.freesurfer.io.read_label(os.path.join(args.subject_id,'label','lh.cortex.label'))
rh_cortex=nb.freesurfer.io.read_label(os.path.join(args.subject_id,'label','rh.cortex.label'))
lh_area=nb.freesurfer.io.read_morph_data(os.path.join(args.subject_id,'surf','lh.area'))
rh_area=nb.freesurfer.io.read_morph_data(os.path.join(args.subject_id,'surf','rh.area'))
#get cortical vertices
rh_cortex=rh_cortex+len(lh_cluster)
cortical_vertices = np.hstack((lh_cortex,rh_cortex))
#get clusters (add max left )
clusters= np.hstack((lh_cluster, rh_cluster+np.max(lh_cluster)))
cluster_indices=np.unique(np.round(clusters))
areas = np.hstack((lh_area,rh_area))
n_vertices=len(neighbours)
for cluster_index in cluster_indices:
n_clusters=100
cluster_area=np.sum(areas[clusters==cluster_index])
for cluster in np.arange(n_clusters):
if cluster==0:
# random_cluster_matrix[cluster,clusters==cluster_index]=1
random_cluster_lists=[list(np.where(clusters==cluster_index)[0])]
else:
seed_vertex = np.random.choice(cortical_vertices)
random_cluster_area=0
old_cluster=neighbours[seed_vertex]
while cluster_area > random_cluster_area:
new_cluster = st.f7(flatten(neighbours[old_cluster]))
random_cluster_area = np.sum(areas[new_cluster])
old_cluster=new_cluster
random_cluster_lists.append(new_cluster)
with open(os.path.join(subject_id,'surf','random_clusters_{}.csv'.format(cluster_index)),'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(random_cluster_lists)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Write cluster files to outputs')
parser.add_argument('--subject-id', dest='subject_id', type=str, help='Subject id for loading files')
parser.add_argument('--combined-surface', dest='combined_surface', type=str, help='Combined mesh surface')
parser.add_argument('--left-clusters', dest='left_clusters', type=str, help='Left clusters mgh file')
parser.add_argument('--right-clusters', dest='right_clusters', type=str, help='Left clusters mgh file')
args = parser.parse_args()
main(args) | StarcoderdataPython |
1759465 | import errno
import socket
import time
from microio import *
__all__ = ('Stream', 'connect', 'listen', 'serve', 'spawn')
class Stream:
def __init__(self, sock, read_size=65536):
sock.setblocking(False)
self.sock = sock
self.buffer = b''
self.read_size = read_size
def close(self):
self.sock.close()
def read_bytes(self, n, partial=False):
try:
while len(self.buffer) < n:
err = yield self.sock, POLLREAD | POLLERROR
if err & POLLERROR:
raise IOError()
data = self.sock.recv(self.read_size)
if not data:
raise IOError('Connection closed')
self.buffer += data
if partial:
break
yield self.sock, None
except IOError:
yield self.sock, None
buffer = self.buffer[:n]
self.buffer = self.buffer[n:]
raise Return(buffer)
def read_until(self, pat, n=65536):
try:
while pat not in self.buffer and len(self.buffer) < n:
err = yield self.sock, POLLREAD | POLLERROR
if err & POLLERROR:
raise IOError()
data = self.sock.recv(self.read_size)
if not data:
raise IOError('Connection closed')
self.buffer += data
yield self.sock, None
except IOError:
yield self.sock, None
if pat not in self.buffer:
raise IOError('Buffer limit exceeded')
n = self.buffer.find(pat) + len(pat)
buffer = self.buffer[:n]
self.buffer = self.buffer[n:]
raise Return(buffer)
def write(self, data):
try:
while data:
err = yield self.sock, POLLWRITE | POLLERROR
if err & POLLERROR:
raise IOError()
sent = self.sock.send(data)
if not sent:
raise IOError('Connection closed')
data = data[sent:]
yield self.sock, None
except IOError:
yield self.sock, None
def connect(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
ret = sock.connect_ex(address)
if ret in (errno.EWOULDBLOCK, errno.EINPROGRESS, errno.EAGAIN):
err = yield sock, POLLWRITE | POLLERROR
yield sock, None
if err & POLLERROR:
raise IOError()
ret = sock.connect_ex(address)
if ret not in (0, errno.EISCONN):
raise IOError()
if ret == errno.ECONNREFUSED:
raise IOError()
raise Return(sock)
def listen(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.setblocking(False)
sock.listen(128)
return sock
def serve(sock, handler):
try:
while True:
err = yield sock, POLLREAD | POLLERROR
if err & POLLERROR:
raise IOError()
csock, addr = sock.accept()
stream = Stream(csock)
yield spawn(handler(stream, addr))
except IOError:
yield sock, None
def spawn(task):
def _spawn_genfunc():
yield task
return _spawn_genfunc
| StarcoderdataPython |
97512 | <reponame>SNU-Blockchain-2021-Fall-Group-H/aries-cloudagent-python
"""Test resolver routes."""
# pylint: disable=redefined-outer-name
import pytest
from asynctest import mock as async_mock
from pydid import DIDDocument
from ...admin.request_context import AdminRequestContext
from .. import routes as test_module
from ..base import (
DIDMethodNotSupported,
DIDNotFound,
ResolutionMetadata,
ResolutionResult,
ResolverError,
ResolverType,
)
from ..did_resolver import DIDResolver
from . import DOC
@pytest.fixture
def did_doc():
yield DIDDocument.deserialize(DOC)
@pytest.fixture
def resolution_result(did_doc):
metadata = ResolutionMetadata(
resolver_type=ResolverType.NATIVE,
resolver="mock_resolver",
retrieved_time="some time",
duration=10,
)
yield ResolutionResult(did_doc, metadata)
@pytest.fixture
def mock_response():
json_response = async_mock.MagicMock()
temp_value = test_module.web.json_response
test_module.web.json_response = json_response
yield json_response
test_module.web.json_response = temp_value
@pytest.fixture
def mock_resolver(resolution_result):
did_resolver = async_mock.MagicMock()
did_resolver.resolve = async_mock.CoroutineMock(return_value=did_doc)
did_resolver.resolve_with_metadata = async_mock.CoroutineMock(
return_value=resolution_result
)
yield did_resolver
@pytest.fixture
def mock_request(mock_resolver):
context = AdminRequestContext.test_context({DIDResolver: mock_resolver})
outbound_message_router = async_mock.CoroutineMock()
request_dict = {
"context": context,
"outbound_message_router": outbound_message_router,
}
request = async_mock.MagicMock(
match_info={
"did": "did:ethr:mainnet:0xb9c5714089478a327f09197987f16f9e5d936e8a",
},
query={},
json=async_mock.CoroutineMock(return_value={}),
__getitem__=lambda _, k: request_dict[k],
)
yield request
@pytest.mark.asyncio
async def test_resolver(mock_request, mock_response: async_mock.MagicMock, did_doc):
await test_module.resolve_did(mock_request)
mock_response.call_args[0][0] == did_doc.serialize()
# TODO: test http response codes
@pytest.mark.asyncio
@pytest.mark.parametrize(
"side_effect, error",
[
(DIDNotFound, test_module.web.HTTPNotFound),
(DIDMethodNotSupported, test_module.web.HTTPNotImplemented),
(ResolverError, test_module.web.HTTPInternalServerError),
],
)
async def test_resolver_not_found_error(
mock_resolver, mock_request, side_effect, error
):
mock_resolver.resolve_with_metadata = async_mock.CoroutineMock(
side_effect=side_effect()
)
with pytest.raises(error):
await test_module.resolve_did(mock_request)
@pytest.mark.asyncio
async def test_register():
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
@pytest.mark.asyncio
async def test_post_process_routes():
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
| StarcoderdataPython |
61639 | #!/usr/bin/env python
# coding=utf-8
'''
@Description:
@Author: Xuannan
@Date: 2019-12-15 22:25:14
@LastEditTime: 2020-03-03 16:16:55
@LastEditors: Xuannan
'''
from flask_restful import Resource,reqparse,fields,marshal,abort,inputs
from app.models import Admin,AdminLog,AdminRole, Crud
from app.apis.api_constant import *
from .common import get_admin,login_required,logout,permission_required
import uuid,datetime,json
from app.ext import cache
from flask import g,request
from app.utils import object_to_json,mysql_to_json
from app.apis.common.auth import Auth
from app.config import PAGINATE_NUM
from app.utils.api_doc import Apidoc
from app.api_docs.admin import admin_doc
api = Apidoc('系统-系统管理员')
# 单数据操作
parse_id = reqparse.RequestParser()
parse_id.add_argument('id',type=str)
# 新增
parse_register = reqparse.RequestParser()
parse_register.add_argument('username',type=str,required=True,help='请输入用户名')
parse_register.add_argument('password',type=str,required=True,help='请输入密码')
parse_register.add_argument('email',type=str,required=True,help='请输入邮箱地址')
parse_register.add_argument('name',type=str,required=True,help='请输入姓名')
parse_register.add_argument('phone',type=inputs.regex(r'1[35789]\d{9}'),required=True,help='手机号码错误')
# 授权
parse_role = parse_id.copy()
parse_role.add_argument('roles',type=str,required=True,help='请设置角色')
# 登录
parse_login = reqparse.RequestParser()
parse_login.add_argument('username',type=str,required=True,help='请输入用户名!')
parse_login.add_argument('password',type=str,required=True,help='请输入密码!')
parse_login.add_argument('captcha')
parse_login.add_argument('image_code')
# 修改密码
parse_change_pwd = reqparse.RequestParser()
parse_change_pwd.add_argument('password',type=str,required=True,help='请输入原密码!')
parse_change_pwd.add_argument('new_password',type=str,required=True,help='请输入新密码!')
# 修改基本信息
parse_info = reqparse.RequestParser()
parse_info.add_argument('email',type=str,required=True,help='请输入邮箱地址')
parse_info.add_argument('name',type=str,required=True,help='请输入姓名')
parse_info.add_argument('phone',type=inputs.regex(r'1[35789]\d{9}'),required=True,help='手机号码错误')
# 分页信息
parse_page = reqparse.RequestParser()
parse_page.add_argument('page',type=int,help='页码只能是数字')
parse_page.add_argument('paginate',type=int,help='每页数量只能是数字')
user_fields = {
'username':fields.String,
'name':fields.String,
'email':fields.String,
'phone':fields.String
}
sing_user_fields = {
'status':fields.Integer,
'msg':fields.String,
'data':fields.Nested(user_fields)
}
class AdminCurrent(Resource):
# 当前用户信息
@api.doc(api_doc=admin_doc.get_admin)
@login_required
def get(self):
'''
获取当前登录用户的信息
'''
if g.admin:
data = {
'status':RET.OK,
'data':object_to_json(g.admin)
}
return data
abort(RET.BadRequest,msg='请勿非法操作')
# 修改密码
@api.doc(api_doc=admin_doc.change_pwd)
@login_required
def put(self):
args = parse_change_pwd.parse_args()
password = args.get('password')
new_password = args.get('new_password')
admin = g.admin
if (not admin.check_pwd(password)) or admin.is_del != '0':
abort(RET.Unauthorized,msg='密码错误')
admin.password = <PASSWORD>
admin.last_editor = g.admin.username
if admin.updata():
logout()
return {
'status':RET.REENTRY,
'msg':'密码修改成功,请重新登录',
}
abort(RET.BadRequest,msg='修改密码失败')
# 修改用户信息
@api.doc(api_doc=admin_doc.change_info)
@login_required
def post(self):
'''
修改用户信息
'''
args = parse_info.parse_args()
name = args.get('name')
email = args.get('email')
phone = args.get('phone')
admin = g.admin
admin.name = name
admin.email = email
admin.phone = phone
admin.last_editor = g.admin.username
if admin.updata():
return {
'status':RET.OK,
'msg':'修改成功',
}
abort(RET.BadRequest,msg='修改失败')
# 设置管理员角色
class AdminAuth(Resource):
@api.doc(api_doc=admin_doc.roles)
@login_required
@permission_required
def post(self):
args_role = parse_role.parse_args()
id = args_role.get('id')
roles = json.loads(args_role.get('roles'))
admin = get_admin(id)
# 清空原来的roles
old_data = AdminRole.query.filter_by(admin_id = admin.id ).all()
if old_data :
Crud.clean_all(old_data)
# 如果有配置规则
if roles:
admin_roles = [AdminRole(
admin_id = admin.id,
role_id =v
) for v in roles ]
Crud.add_all(admin_roles)
admin.last_editor = g.admin.username
admin.updata()
return {
'status':RET.OK,
'msg':'角色设置成功'
}
class AdminResource(Resource):
@api.doc(api_doc=admin_doc.admin_list)
@login_required
@permission_required
def get(self):
'''
获取用户信息
'''
args_id = parse_id.parse_args()
id = args_id.get('id')
if id:
admin = get_admin(id)
data = {
'status':RET.OK,
'data':object_to_json(admin)
}
return data
args = parse_page.parse_args()
page = 1
paginate = PAGINATE_NUM
if args.get('page'):
page = int(args.get('page'))
if args.get('paginate'):
paginate = int(args.get('paginate'))
sql = '''
SELECT SQL_CALC_FOUND_ROWS a.id,a.username,a.name,a.phone,a.email,
GROUP_CONCAT(r.name SEPARATOR ',') as roles_name,
GROUP_CONCAT(ar.role_id SEPARATOR ',') as roles,
l.create_time as last_login,
l.ip as ip
FROM admin as a
left join admin_role as ar on a.id = ar.admin_id
left join role as r on r.id = ar.role_id
left join admin_log as l on l.username = a.username and l.create_time=(select max(create_time) from admin_log where username=a.username)
WHERE a.is_del = 0
GROUP BY a.id
LIMIT {0},{1};
'''.format((page-1)*paginate,paginate)
sql_data,count = Crud.auto_select(sql,count=True)
if not sql_data:
abort(RET.NotFound,msg='暂无数据')
fetchall_data = sql_data.fetchall()
data = {
'status':RET.OK,
'paginate':{
'page':page,
'per_page':paginate,
'total':count
},
'data':([mysql_to_json(dict(v)) for v in fetchall_data])
}
return data
@api.doc(api_doc=admin_doc.admin_add)
@login_required
@permission_required
def post(self):
'''
添加用户
'''
args_register = parse_register.parse_args()
password = args_register.get('password')
username = args_register.get('username').lower()
name = args_register.get('name')
email = args_register.get('email')
phone = args_register.get('phone')
has_admin = Admin.query.filter_by(username = username,is_del='0').first()
if has_admin:
abort(RET.Forbidden,msg='管理员已存在')
admin = Admin()
admin.username = username
admin.password = password
admin.name = name
admin.email = email
admin.phone = phone
admin.last_editor = g.admin.username
if admin.add():
data = {
'status':RET.Created,
'msg':'新增管理员成功',
'data':admin
}
return marshal(data,sing_user_fields)
abort(RET.BadRequest,msg='新增失败')
# 重置密码
@api.doc(api_doc=admin_doc.reset_pwd)
@login_required
@permission_required
def put(self):
'''
重置密码
'''
args_id = parse_id.parse_args()
id = args_id.get('id')
if not id:
abort(RET.BadRequest,msg='请勿非法操作')
admin = get_admin(id)
if admin.is_super == 1:
abort(RET.BadRequest,msg='重置失败,超级管理员的密码不能重置!!!')
admin.password = '<PASSWORD>'
if admin.updata():
# 清除用户登录状态
cache.delete(admin.id)
return {
'status':RET.OK,
'msg':'重置密码成功',
}
abort(RET.BadRequest,msg='重置密码失败')
@api.doc(api_doc=admin_doc.del_admin)
@login_required
@permission_required
def delete(self):
'''
删除用户
'''
args_id = parse_id.parse_args()
id = args_id.get('id')
if not id:
abort(RET.BadRequest,msg='请勿非法操作')
admin = get_admin(id)
if not admin:
abort(RET.BadRequest,msg='用户不存在!!!')
if admin.is_super == 1:
abort(RET.BadRequest,msg='删除失败,无法删除超级管理员!!!')
admin.last_editor = g.admin.username
result = admin.delete()
if result:
# 清除用户登录状态
cache.delete(admin.id)
return {
'status':RET.OK,
'msg':'删除成功'
}
abort(RET.BadRequest,msg='删除失败,请重试')
class AdminLogin(Resource):
@api.doc(api_doc=admin_doc.login)
def post(self):
'''
登录
'''
args_login = parse_login.parse_args()
password = args_login.get('password')
username = args_login.get('username').lower()
captcha = args_login.get('captcha')
text = cache.get('image_code_%s'%args_login.get('image_code'))
if not text:
abort(RET.Forbidden,msg='验证码错误')
if captcha.lower() != text.lower():
abort(RET.Forbidden,msg='验证码错误')
cache.delete('image_code_%s'%args_login.get('image_code'))
admin = Admin.query.filter_by(username = username,is_del='0').first()
if not admin:
abort(RET.BadRequest,msg='用户名或密码错误')
if not admin.check_pwd(password):
abort(RET.Unauthorized,msg='用户名或密码错误')
token = Auth.encode_auth_token(admin.id)
cache.set(admin.id,token,timeout=60*60*8)
# 记录登陆日志
admin_log = AdminLog()
admin_log.username = admin.username
admin_log.ip = request.remote_addr
admin_log.add()
data = {
'status':RET.OK,
'msg':'登录成功',
'token':token
}
return data
@api.doc(api_doc=admin_doc.logout)
@login_required
def delete(self):
'''
登出
'''
# admin = g.admin
# cache.delete(admin.id)
logout()
abort(RET.BadRequest,msg='已退出',status=RET.REENTRY) | StarcoderdataPython |
133482 | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import pychrono.sensor as sens
print ("Example: create a system and visualize it in realtime 3D");
# Change this path to asset path, if running from other working dir.
# It must point to the data folder, containing GUI assets (textures, fonts, meshes, etc.)
chrono.SetChronoDataPath("../../../data/")
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
sys = chrono.ChSystemNSC()
# Create a fixed rigid body
mbody1 = chrono.ChBody()
mbody1.SetBodyFixed(True)
mbody1.SetPos( chrono.ChVectorD(0,0,-0.2))
sys.Add(mbody1)
mboxasset = chrono.ChBoxShape()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mbody1.AddVisualShape(mboxasset)
# Create a swinging rigid body
mbody2 = chrono.ChBody()
mbody2.SetBodyFixed(False)
sys.Add(mbody2)
mboxasset = chrono.ChBoxShape()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mboxAsset.SetTexture('../../../data/textures/concrete.jpg')
mbody2.AddVisualShape(mboxasset)
# Create a revolute constraint
mlink = chrono.ChLinkRevolute()
# the coordinate system of the constraint reference in abs. space:
mframe = chrono.ChFrameD(chrono.ChVectorD(0.1,0.5,0))
# initialize the constraint telling which part must be connected, and where:
mlink.Initialize(mbody1,mbody2, mframe)
sys.Add(mlink)
# Create an Irrlicht application to visualize the system
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('PyChrono example')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(0.6,0.6,0.8))
vis.AddTypicalLights()
# ---------------------------------------------------------------------
#
# Run the simulation
#
manager = sens.ChSensorManager(sys)
imu = sens.ChIMUSensor(mbody2, 2, chrono.ChFrameD(chrono.ChVectorD(0,0,0)))
imu.SetName("IMU")
SHIB = sens.SensorHostIMUBuffer()
fl = imu.FilterList()
print(type(fl))
FiA = sens.ChFilterIMUAccess()
print(type(FiA))
#imu->PushFilter(chrono_types::make_shared<ChFilterIMUAccess>())
imu.FilterList().append(FiA)
manager.AddSensor(imu)
#mybuf = rec_buf.GetData()
#print(type(mybuf))
#my_filacc = imu.FilterList()[0]
#print(type(my_filacc))
while vis.Run():
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(5e-3)
manager.Update()
#rec_buf = imu.GetMostRecentIMUBuffer()
fil0 = fl[0]
#print(type(fil0))
fil = imu.GetFilterIMU()
#print(type(fil))
SHbuf = fil.GetBuffer()
#print(type(SHbuf))
buf = SHbuf.GetData(0)
#print(type(buf))
print(buf.Yaw)
del SHbuf
#del buf
#print(type(rec_buf))
#mybuf = rec_buf.GetData()
#myacc = rec_buf.GetAcc0()
#print(type(mybuf))
#mybuf = rec_buf.GetData()
| StarcoderdataPython |
4830080 | '''
practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses
by <NAME>/<NAME>
'''
#testing variables
x = 25
print (x)
x = 30
print (x)
#end of the Program | StarcoderdataPython |
3286757 | <gh_stars>1-10
import aiocqhttp
import nonebot
import config
bot = nonebot.get_bot()
group_stat = {}
@nonebot.on_websocket_connect
async def _(event: aiocqhttp.Event):
await bot.send_private_msg(user_id = config.SUPERUSERS, message = '软白上线惹≥w≤')
@bot.on_message()
async def grouprepeat(event):
if event['message_type'] != 'group':
return
group_id = str(event['group_id'])
msg = event['message']
if not group_stat.get(group_id) or msg != group_stat.get(group_id)[0]:
group_stat[group_id] = [msg, 0]
group_stat[group_id][1] += 1
if group_stat[group_id][1] == config.REPEAT_TIME:
await bot.send(event, msg) | StarcoderdataPython |
1793959 | from django.contrib import admin
from .models import Newsletter
# Register your models here.
class NewsletterAdmin(admin.ModelAdmin):
list_display = ('first_name', 'email',)
admin.site.register(Newsletter, NewsletterAdmin) | StarcoderdataPython |
3218115 | <reponame>ligmitz/zulip<gh_stars>1-10
import json
import re
from typing import Callable, Iterator, List, Optional, Union
import scrapy
from scrapy.http import Request, Response
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.url import url_has_any_extension
from twisted.python.failure import Failure
EXCLUDED_URLS = [
# Google calendar returns 404s on HEAD requests unconditionally
'https://calendar.google.com/calendar/embed?src=<EMAIL>',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.org/',
# Returns 404 to HEAD requests unconditionally
'https://www.git-tower.com/blog/command-line-cheat-sheet/',
'https://marketplace.visualstudio.com/items?itemName=rafaelmaiolla.remote-vscode',
# Requires authentication
'https://circleci.com/gh/zulip/zulip/tree/master',
'https://circleci.com/gh/zulip/zulip/16617',
'https://www.linkedin.com/company/zulip-project',
# Returns 403 errors to HEAD requests
'https://giphy.com',
'https://giphy.com/apps/giphycapture',
'https://www.udemy.com/course/the-complete-react-native-and-redux-course/',
]
VNU_IGNORE = [
# Real errors that should be fixed.
r'Duplicate ID “[^”]*”\.',
r'The first occurrence of ID “[^”]*” was here\.',
r'Attribute “markdown” not allowed on element “div” at this point\.',
r'No “p” element in scope but a “p” end tag seen\.',
r'Element “div” not allowed as child of element “ul” in this context\. '
+ r'\(Suppressing further errors from this subtree\.\)',
# Warnings that are probably less important.
r'The “type” attribute is unnecessary for JavaScript resources\.',
]
VNU_IGNORE_REGEX = re.compile(r'|'.join(VNU_IGNORE))
class BaseDocumentationSpider(scrapy.Spider):
name: Optional[str] = None
# Exclude domain address.
deny_domains: List[str] = []
start_urls: List[str] = []
deny: List[str] = []
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
def _has_extension(self, url: str) -> bool:
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url: str) -> bool:
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response: Response) -> None:
self.log(response)
def _is_external_link(self, url: str) -> bool:
if "zulip.readthedocs" in url or "zulip.com" in url or "zulip.org" in url:
# We want CI to check any links to Zulip sites.
return False
if (len(url) > 4 and url[:4] == "file") or ("localhost" in url):
# We also want CI to check any links to built documentation.
return False
if 'github.com/zulip' in url:
# We want to check these links but due to rate limiting from GitHub, these checks often
# fail in the CI. Thus, we should treat these as external links for now.
# TODO: Figure out how to test github.com/zulip links in CI.
return True
return True
def check_fragment(self, response: Response) -> None:
self.log(response)
xpath_template = "//*[@id='{fragment}' or @name='{fragment}']"
m = re.match(r".+\#(?P<fragment>.*)$", response.request.url) # Get fragment value.
if not m:
return
fragment = m.group('fragment')
# Check fragment existing on response page.
if not response.selector.xpath(xpath_template.format(fragment=fragment)):
self.logger.error(
"Fragment #%s is not found on page %s", fragment, response.request.url)
def _vnu_callback(self, url: str) -> Callable[[Response], None]:
def callback(response: Response) -> None:
vnu_out = json.loads(response.text)
for message in vnu_out['messages']:
if not VNU_IGNORE_REGEX.fullmatch(message['message']):
self.logger.error(
'"%s":%d.%d-%d.%d: %s: %s',
url,
message.get('firstLine', message['lastLine']),
message.get('firstColumn', message['lastColumn']),
message['lastLine'],
message['lastColumn'],
message['type'],
message['message'],
)
return callback
def _make_requests(self, url: str) -> Iterator[Request]:
# These URLs are for Zulip's webapp, which with recent changes
# can be accessible without login an account. While we do
# crawl documentation served by the webapp (E.g. /help/), we
# don't want to crawl the webapp itself, so we exclude these.
if url in ['http://localhost:9981/', 'http://localhost:9981'] or url.startswith('http://localhost:9981/#') or url.startswith('http://localhost:9981#'):
return
callback: Callable[[Response], Optional[Iterator[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):
callback = self.check_existing
method = 'HEAD'
elif '#' in url:
dont_filter = True
callback = self.check_fragment
if getattr(self, 'skip_external', False) and self._is_external_link(url):
return
yield Request(url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def start_requests(self) -> Iterator[Request]:
for url in self.start_urls:
yield from self._make_requests(url)
def parse(self, response: Response) -> Iterator[Request]:
self.log(response)
if getattr(self, 'validate_html', False):
yield Request(
'http://127.0.0.1:9988/?out=json',
method='POST',
headers={'Content-Type': response.headers['Content-Type']},
body=response.body,
callback=self._vnu_callback(response.url),
errback=self.error_callback,
)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
tags=self.tags, attrs=self.attrs, deny=self.deny,
canonicalize=False).extract_links(response):
yield from self._make_requests(link.url)
def retry_request_with_get(self, request: Request) -> Iterator[Request]:
request.method = 'GET'
request.dont_filter = True
yield request
def exclude_error(self, url: str) -> bool:
return url in EXCLUDED_URLS
def error_callback(self, failure: Failure) -> Optional[Union[Failure, Iterator[Request]]]:
if failure.check(HttpError):
response = failure.value.response
if self.exclude_error(response.url):
return None
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.logger.error("Please check link: %s", response.request.url)
return failure
| StarcoderdataPython |
1673652 | <reponame>DhellionFena/flask-CatsAPI<gh_stars>0
from sql_alchemy import db
class UserModel(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(40), nullable=False)
sobrenome = db.Column(db.String(120), nullable=False)
email = db.Column(db.String(80), nullable=False)
senha = db.Column(db.String(40), nullable=False)
nascimento = db.Column(db.DateTime, nullable=False)
telefone = db.Column(db.String(15), nullable=False)
pets = db.relationship('PetModel', back_populates="user")
def __init__(self, nome, sobrenome, email, senha, nascimento, telefone):
self.nome = nome
self.sobrenome = sobrenome
self.email = email
self.senha = senha #TODO: falta por hash
self.nascimento = nascimento
self.telefone = telefone
def __repr__(self):
return f"User(nome = {nome})"
def json(self):
data = {
'id': self.id,
'nome': self.nome,
'sobrenome': self.sobrenome,
'email': self.email,
'nascimento': str(self.nascimento),
'telefone': self.telefone
}
return data
def save_user(self):
db.session.add(self)
db.session.commit()
def delete_user(self):
db.session.delete(self)
db.session.commit()
@classmethod
def find_by_email(cls, email):
user = cls.query.filter_by(email=email).first()
if user:
return user
return None | StarcoderdataPython |
38076 | import json
import os
import pymongo
'''
fileService.py
Author: <NAME>
'''
mongo_client = pymongo.MongoClient()
#db = {}
'''
initialize
Takes a 'unique_id'entifier and sets up a database in MongoDB
and ensures that that database has collections associated with
the various file types that are stored.
'''
def initialize(unique_id='default'):
#global db
#if unique_id is None:
# unique_id = 'default';
db = mongo_client[unique_id]
if(not 'ontology' in db.collection_names()):
db.create_collection('ontology')
if(not 'abstraction' in db.collection_names()):
db.create_collection('abstraction')
if(not 'commands' in db.collection_names()):
db.create_collection('commands')
if(not 'linkograph' in db.collection_names()):
db.create_collection('linkograph')
return db
'''
FileNotFound
Custom exception class for reporting a file not found exception.
Value should be the name of the file as a string.
'''
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return "File "+self.value+" not found!"
'''
FileTypeNotFound
Custom exception class for reporting a file type not found.
Value should be the name of the file type as a string.
'''
class FileTypeNotFound(Exception):
def __init__(self,value):
self.value=value
def __str__(self):
return "File type "+self.value+" not found!"
'''
FileTypeMismatch
Custom exception class for reporting a conflict in a type given
by the user and a type found by the type detection system.
Given and found should both be the file types as strings.
'''
class FileTypeMismatch(Exception):
def __init__(self,given,found):
self.given = given
self.found = found
def __str__(self):
return "Given "+self.given+", but found "+self.found
'''
loadFile
Looks for a fileName of fileType. Both arguments are strings.
Upon success returns the file, throws exceptions when either
the type or name is not found.
'''
def loadFile(fileName,fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
if (None==db[fileType].find_one({'name':fileName})):
raise FileNotFound(fileName)
result = db[fileType].find_one({'name':fileName})
result.pop('_id',None) #Removes the MongoDB object id
return result #What will usually be returned is a dictionary
#of the type {'name':[...],'content':[....]}.
#The caller will be responsible for handling
#this format.
'''
fileList
Returns a list of all of the file names for files of
type fileType. Argument is a string. Throws error when
fileType is not found.
'''
def fileList(fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
results = []
for record in db[fileType].find():
if 'name' in record:
results.append(record['name'])
return results
'''
saveLinko
Helper function for saving a linkograph.
All arguments are strings. Throws an error if the commandsName
file cannot be found.
'''
def saveLinko(fileName,fileContent,commandsName,unique_id):
try:
db=initialize(unique_id)
loadFile(commandsName,'commands',unique_id)
toSave = {}
toSave['content'] = fileContent
toSave['commands'] = commandsName
toSave['name']=fileName
db['linkograph'].insert_one(toSave)
return "File " +fileName + " is saved as type linkograph"
except:
raise FileNotFound(fileName)
'''
saveFile
Takes a file and the content stored in it and saves it in the file store.
If the fileType is unknown or there is a mismatch, and exception is thrown.
If fileType isn't given, the system will try and detect the file type.
Stores it in the mongo database in the format of {'name':fileName,'content':
fileContent}, except in the case of a linkograph, at which point it the commandsName is stored along with it with a key of 'commands'.
'''
def saveFile(fileName,fileContent,fileType=None,commandsName=None,unique_id='default'):
db=initialize(unique_id)
if fileType==None:
fileType=detectFiletype(fileContent)
else:
if not fileType == detectFiletype(fileContent):
raise FileTypeMismatch(fileType,detectFiletype(fileContent))
if fileType == "Unknown file":
raise FileTypeNotFound(fileType)
if fileType == "linkograph":
if commandsName==None:
raise FileNotFound("commands file")
return saveLinko(fileName,fileContent,commandsName,unique_id)
if fileType in db.collection_names():
if not None==db[fileType].find_one({'name':fileName}):
if fileContent==db[fileType].find_one({'name':fileName})['content']:
return "We already have "+fileName
else:
fileName=fileName+"new"
return saveFile(fileName,fileContent,fileType,unique_id=unique_id)
else:
toSave = {}
toSave['name'] = fileName
toSave['content'] = fileContent
db[fileType].insert_one(toSave)
return "File "+fileName+" saved as type "+fileType
raise FileTypeNotFound(fileType)
'''
detectFiletype
Function which takes the contents of a file and tries to detect what sort
of file it is. Currently has support for detecting commands, abstraction
and ontology files.
'''
def detectFiletype(fileContent):
try:
file_parsed = json.loads(fileContent)
if (type(file_parsed) is list):
if(type(file_parsed[0]) is dict):
if("ts" in file_parsed[0] and "cmd" in file_parsed[0]):
return "commands"
else:
return "Unknown file"
if(type(file_parsed[0]) is list):
if(len(file_parsed[0])==0):
return "Unknown file"
for label in file_parsed[0]:
if not type(label) is str:
return "Unknown file"
for tupl in file_parsed[1:]:
if not type(tupl) is list:
return "Unknown file"
if not len(tupl)==3:
return "Unknown file"
return "linkograph"
return "Unknown file"
elif (type(file_parsed) is dict):
if(len(file_parsed.keys())==0):
return "Unknown file"
longest_entry = []
for key in file_parsed:
if not type(file_parsed[key]) is list:
return "Unknown file"
if len(file_parsed[key])>len(longest_entry):
longest_entry=file_parsed[key]
if len(longest_entry)==0:
return "Unknown file"
if type(longest_entry[0]) is str:
return "ontology"
if type(longest_entry[0]) is dict:
if "command" in longest_entry[0]:
return "abstraction"
return "Unknown file"
return "Unknown file"
except:
return "Unknown file"
#initialize()
| StarcoderdataPython |
168647 | import pygame
import time
import datetime
import os
import Colors
import SwitchState
import random
from photoslideshow import Photoslideshow
class Photobox():
def __init__(self, windowsize, photofolder, camera, switch, cheesepicfolder):
pygame.init()
self.windowsize = windowsize
self.photofolder = photofolder
self.screen = pygame.display.set_mode(
self.windowsize, pygame.FULLSCREEN)
self.clock = pygame.time.Clock()
self.camera = camera
self.switch = switch
self.countdowntime = 3
self.estimatedtriggertime = 2
self.lastphototaken = datetime.datetime.now()
self.cheesepicfolder = cheesepicfolder
pygame.mouse.set_visible(0)
def start(self):
slideshow = Photoslideshow(self)
self.slideshow = slideshow
while True:
# don't crash the program if an error happens
try:
self.clearscreen()
slideshow.shownextphoto()
self.handleevents()
except Exception as e:
print type(e)
print e.args
print e
pass
self.exit_if_needed()
def exit_if_needed(self):
events = pygame.event.get()
if len(events) == 0:
return
event = events[0]
if event.type == pygame.KEYDOWN:
key = event.key
if key == pygame.K_ESCAPE:
exit()
if key == pygame.K_RETURN:
self.lastphototaken = datetime.datetime.now()
self.takenewphoto()
self.slideshow.reset_timer()
def showphoto(self, path):
self.clearscreen()
photo = pygame.image.load(path)
photo = pygame.transform.scale(photo, self.windowsize)
self.screen.blit(photo, (0, 0))
self.updatescreen()
def updatescreen(self):
pygame.display.flip()
def clearscreen(self):
self.screen.fill((0, 0, 0))
def showtext(self, text, fontsize):
self.clearscreen()
font = pygame.font.Font(None, fontsize)
fontWidth = font.size(text)[0]
fontHeight = font.size(text)[1]
midX = self.windowsize[0] / 2 - fontWidth / 2
midY = self.windowsize[1] / 2 - fontHeight / 2
screentext = font.render(text, True, Colors.randomcolor_rgb())
self.screen.blit(screentext, (midX, midY))
self.updatescreen()
def handleevents(self):
switchstate = self.switch.get_switch_state()
if switchstate == SwitchState.SHUTDOWN:
os.system("sudo shutdown now -h")
exit(0)
if switchstate == SwitchState.EXIT:
exit(0)
if self.newPhotoAllowed():
if switchstate == SwitchState.TRIGGER:
self.lastphototaken = datetime.datetime.now()
self.takenewphoto()
self.slideshow.reset_timer()
def newPhotoAllowed(self):
secondsdelta = self.countdowntime + self.estimatedtriggertime + 5
nextphotoallowed = (self.lastphototaken +
datetime.timedelta(0, secondsdelta))
timenow = datetime.datetime.now()
if timenow > nextphotoallowed:
return True
return False
def takenewphoto(self):
self.showcountdown(self.countdowntime)
randomcheesefile = self.cheesepicfolder.getrandomphoto()
randomnumber = random.randint(0, 1021)
if randomnumber % 2 == 0:
self.showphoto(randomcheesefile)
else:
self.showtext("Cheese!", 300)
nextphotophath = self.photofolder.getnextfilename_fullpath()
photoTaken = self.camera.takephoto(nextphotophath)
if photoTaken:
self.showphoto(nextphotophath)
def showcountdown(self, upperbound):
for i in range(upperbound, 0, -1):
self.showtext(str(i), 400)
time.sleep(1)
| StarcoderdataPython |
83785 | import unittest
import dwmon
class CronTests(unittest.TestCase):
def test_parse_requirements(self):
cron_string = "CHECKHOURS0-9 CHECKMINUTES0-10 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS3600"
result = dwmon.parse_requirements(cron_string)
self.assertTrue(result["check_hours_lower"] == 0)
self.assertTrue(result["check_hours_upper"] == 9)
self.assertTrue(result["check_minutes_lower"] == 0)
self.assertTrue(result["check_minutes_upper"] == 10)
self.assertFalse(result["include_weekends"])
self.assertTrue(result["include_weekdays"])
self.assertTrue(result["min_num"] == 5)
self.assertTrue(result["max_num"] == 20)
self.assertTrue(result["lookback_seconds"] == 3600)
def test_parse_requirements_star(self):
cron_string = "CHECKHOURS0-9 CHECKMINUTES*/10 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS3600"
result = dwmon.parse_requirements(cron_string)
self.assertTrue(result["check_minutes_lower"] is None)
self.assertTrue(result["check_minutes_upper"] is None)
self.assertTrue(result["check_minutes_star"] == 10)
def test_switched_hours_range(self):
# 9 to 5 is a reverse range, bad! (shoul be 9 - 17 probably
cron_string = "CHECKHOURS9-5 CHECKMINUTES0-0 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS1000"
with self.assertRaisesRegexp(AssertionError, "bad hours relationship"):
dwmon.parse_requirements(cron_string)
def test_switched_minutes_range(self):
# 9 to 5 is a reverse range, bad! (shoul be 9 - 17 probably
cron_string = "CHECKHOURS0-5 CHECKMINUTES9-5 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS1000"
with self.assertRaisesRegexp(
AssertionError, "bad minutes relationship"):
dwmon.parse_requirements(cron_string)
def test_missing_hours(self):
cron_string = "CHECKMINUTES9-15 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS1000"
with self.assertRaisesRegexp(Exception, "missing CHECKHOURS"):
dwmon.parse_requirements(cron_string)
def test_switched_max_min(self):
cron_string = "CHECKHOURS0-5 CHECKMINUTES9-15 " \
"WEEKDAYS MINNUM500 MAXNUM20 LOOKBACKSECONDS1000"
with self.assertRaisesRegexp(AssertionError, "bad minnum/maxnum"):
dwmon.parse_requirements(cron_string)
def test_minutes_out_of_range(self):
cron_string = "CHECKHOURS0-5 CHECKMINUTES0-60 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS1000"
with self.assertRaisesRegexp(AssertionError, "out of range minutes"):
dwmon.parse_requirements(cron_string)
def test_time_pattern_252pm_saturday(self):
cron_string_1 = "CHECKHOURS12-18 CHECKMINUTES50-55 " \
"WEEKDAYS WEEKENDS MINNUM5 MAXNUM20 LOOKBACKSECONDS3600"
cron_string_2 = "CHECKHOURS12-18 CHECKMINUTES54-55 " \
"WEEKDAYS WEEKENDS MINNUM5 MAXNUM20 LOOKBACKSECONDS3600"
cron_string_3 = "CHECKHOURS12-18 CHECKMINUTES50-55 " \
"WEEKDAYS MINNUM5 MAXNUM20 LOOKBACKSECONDS3600"
epoch = 1455997930
requirements_1 = dwmon.parse_requirements(cron_string_1)
requirements_2 = dwmon.parse_requirements(cron_string_2)
requirements_3 = dwmon.parse_requirements(cron_string_3)
self.assertTrue(dwmon.matches_time_pattern(requirements_1, epoch))
self.assertFalse(dwmon.matches_time_pattern(requirements_2, epoch))
self.assertFalse(dwmon.matches_time_pattern(requirements_3, epoch))
| StarcoderdataPython |
3312730 | # -*- coding: utf-8 -*-
'''
Microbial Bioinformatics Group in MML,SJTU
run_pssm.py: Script for generating PSSM profiles
<NAME> <<EMAIL>>
Usage:
$ python run_pssm.py -i <input sequence>
-db <blast database>
-e <evalue>
-n <num_iteration>
-o <ouput directory>
-threads <num_threads>
'''
import os
import argparse
from Bio import SeqIO
import tempfile
def pssm_command(query, db, evalue, n_iter, out_file, pssm_file, num_threads):
"""
Single PSI-BLAST command
@param query: single query protein sequence
@param db: database used for PSI-BLAST
@param evalue: BLAST evalue
@param n_iter: num. of PSI-BLAST iterations
@param out_file: BLAST output file
@param pssm_file: output .pssm file
@param num_threads: threads used for PSSM-BLAST
"""
print("Generating PSSM profile for sequence %s ..." % os.path.basename(query))
os.system(f'psiblast \
-query "{query}" \
-db {db} \
-num_iterations {n_iter} \
-evalue {evalue} \
-out "{out_file}" \
-out_ascii_pssm "{pssm_file}" \
-num_threads {num_threads} \
2>/dev/null')
def pssm(sequences, db, evalue, n_iter, outdir, num_threads):
"""
Obtain all .pssm files for query protein sequences
@param sequences,: query protein sequences
@param db: database used for PSI-BLAST
@param evalue: BLAST evalue
@param n_iter: num. of PSI-BLAST iterations
@param outdir: directory for output of PSI-BLAST
@param num_threads: threads used for PSI-BLAST
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
temp_dir = tempfile.TemporaryDirectory()
for record in SeqIO.parse(sequences, "fasta"):
query = os.path.join(temp_dir.name, "%s.fasta" % record.id)
SeqIO.write([record], query, "fasta")
output_file = os.path.join(temp_dir.name,"%s.out" % record.id)
pssm_file = os.path.join(outdir,"%s.pssm" % record.id)
pssm_command(query, db, evalue, n_iter, output_file, pssm_file, num_threads)
temp_dir.cleanup()
def main():
parser = argparse.ArgumentParser(description="Generate PSSM files")
parser.add_argument('-i', '--input_sequence', type=str, required=True)
parser.add_argument('-db', '--blast_database', type=str, required=True)
parser.add_argument('-e', '--evalue', type=float, default=1e-3)
parser.add_argument('-n', '--num_iterations', type=int, default=3)
parser.add_argument('-o', '--output_dir', type=str, default="pssm_files")
parser.add_argument('-threads', '--num_threads', type=int, default=1)
args = parser.parse_args()
pssm(args.input_sequence, args.blast_database, args.evalue, args.num_iterations, args.output_dir, args.num_threads)
if __name__ == "__main__":
main()
| StarcoderdataPython |
142864 | <gh_stars>0
import numpy
import matplotlib.pyplot as plt
import os
gw_in = numpy.array([[20, 1.4114],
[30, 1.2706],
[40, 1.2018],
[50, 1.1599],
[60, 1.1326]])
gw_out = numpy.array([[20, 1.2103],
[30, 1.1385],
[40, 1.1031],
[50, 1.0815],
[60, 1.0671]])
pbe_in = numpy.array([[20, 1.6458],
[30, 1.4297],
[40, 1.3225],
[50, 1.257],
[60, 1.2139]])
pbe_out = numpy.array([[20, 1.2852],
[30, 1.1892],
[40, 1.1413],
[50, 1.112],
[60, 1.0925]])
plt.style.use("science")
plt.figure(figsize=(6, 3.5))
plt.subplot(221)
plt.xlabel("$L$ ($\\rm{\\AA}$)")
plt.ylabel("$\\varepsilon_{\\rm{SL}}^{\parallel}$")
L_pbe = pbe_in[:, 0]
eps_pbe = pbe_in[:, 1]
L_gw = gw_in[:, 0]
eps_gw = gw_in[:, 1]
plt.plot(L_pbe, eps_pbe, "-o", label="PBE")
plt.plot(L_gw, eps_gw, "-o", label="G$_{0}$W$_{0}$")
plt.legend()
plt.subplot(222)
plt.xlabel("$L$ ($\\rm{\AA}$)")
plt.ylabel("$\\alpha_{\\rm{2D}}^{\parallel}/(4 \pi \\varepsilon_0)$ ($\\rm{\\AA}$)")
alpha_pbe = (eps_pbe - 1) * L_pbe / (4 * numpy.pi)
alpha_gw = (eps_gw - 1) * L_gw / (4 * numpy.pi)
plt.plot(L_pbe, alpha_pbe, "-o", label="PBE")
plt.plot(L_gw, alpha_gw, "-o", label="GW")
plt.ylim(0.5, 1.2)
plt.subplot(223)
plt.xlabel("$L$ ($\\rm{\AA}$)")
plt.ylabel("$\\varepsilon_{\\rm{SL}}^{\perp}$")
L_pbe = pbe_out[:, 0]
eps_pbe = pbe_out[:, 1]
L_gw = gw_out[:, 0]
eps_gw = gw_out[:, 1]
plt.plot(L_pbe, eps_pbe, "-o", label="PBE")
plt.plot(L_gw, eps_gw, "-o", label="PBE")
plt.ylim(1.05, 1.35)
plt.subplot(224)
plt.xlabel("$L$ ($\\rm{\\AA}$)")
plt.ylabel("$\\alpha_{\\rm{2D}}^{\perp}/(4\\pi \\varepsilon_{0})$ ($\\rm{\\AA}$)")
# alpha_pbe = (1 - 1 / eps_pbe) * L_pbe / (4 * numpy.pi)
# alpha_gw = (1 - 1 / eps_gw) * L_gw / (4 * numpy.pi)
alpha_pbe = (eps_pbe - 1) * L_pbe / (4 * numpy.pi) / 2
alpha_gw = ( eps_gw - 1) * L_gw / (4 * numpy.pi) / 2
plt.plot(L_pbe, alpha_pbe, "-o", label="PBE")
plt.plot(L_gw, alpha_gw, "-o", label="PBE")
plt.ylim(0.1, 0.25)
plt.tight_layout()
plt.savefig(os.path.join("../../tmp_img/",
"compare_alpha_zero.svg"))
| StarcoderdataPython |
148702 | <filename>08 Hash/hashTablePrac.py
class HashTable(object):
def __init__(self,size):
self.size = size
self.slots = [None] * self.size
self.data = [None] * self.size
def put(self,key,data):
hashValue = self.hashFunc(key,len(self.slots))
if self.slots[hashValue] == None:
self.slots[hashValue] = key
self.data[hashValue] = data
else:
if self.slots[hashValue] == key:
self.data[hashValue] = data
else:
newHash = reHash(hashValue,len(self.slots))
while self.slots[newHash] != None and self.slots[newHash] != key:
newHash = self.reHash(newHash,len(self.slots))
if self.slots[newHash] == None:
self.slots[newHash] = key
self.data[newHash] = data
else:
self.data[newHash] = data
def get(self,key):
startHash = self.hashFunc(key,len(self.slots))
position = startHash
while self.slots[position] != None:
if self.slots[position] == key:
return self.data[position]
else:
position = self.reHash(position,len(self.slots))
if position == startHash:
return None
def hashFunc(self,key,size):
return key%size
def reHash(self,oldHash,size):
return (oldHash+1)%size
def __getitem__(self,key):
return self.get(key)
def __setitem__(self,key,data):
self.put(key,data)
h = HashTable(5)
h[1] = "harsh"
h[2] = "raj"
h[3] = "mahesh"
print(h[1])
print(h[2])
print(h[3])
h.put(3,"qwerty")
print(h[1])
print(h[2])
print(h[3]) | StarcoderdataPython |
187178 | from flask.json import jsonify
from model.Media import Media
from model.Poster import Poster
from model.Reply import Reply
from model.Thread import Thread
from shared import db
class ThreadPosts:
def get(self, thread_id):
session = db.session
thread = session.query(Thread).filter(Thread.id == thread_id).one()
thread.views += 1
session.add(thread)
session.commit()
return jsonify(self.retrieve(thread_id))
def delete(self, thread_id):
thread = db.session.query(Thread).filter(Thread.id == thread_id).one()
board_id = thread.board
db.session.delete(thread)
db.session.commit()
def retrieve(self, thread_id):
session = db.session
thread = session.query(Thread).filter(Thread.id == thread_id).one()
return self._to_json(thread.posts, thread)
def _to_json(self, posts, thread):
result = []
for index, post in enumerate(posts):
p_dict = dict()
p_dict["body"] = post.body
p_dict["datetime"] = post.datetime
p_dict["id"] = post.id
if index == 0:
p_dict["tags"] = thread.tags
session = db.session
poster = session.query(Poster).filter(Poster.id == post.poster).one()
p_dict["poster"] = poster.hex_string
p_dict["subject"] = post.subject
p_dict["media"] = post.media
if post.media:
p_dict["media_ext"] = session.query(Media).filter(Media.id == post.media).one().ext
else:
p_dict["media_ext"] = None
p_dict["spoiler"] = post.spoiler
p_dict["slip"] = poster.slip
p_dict["replies"] = []
replies = session.query(Reply).filter(Reply.reply_to == post.id).all()
for reply in replies:
p_dict["replies"].append(reply.reply_from)
result.append(p_dict)
return result
| StarcoderdataPython |
1609210 | <gh_stars>0
# Author: <NAME>
# Date: 2015
"""
Visualize the generated localization synthetic
data stored in h5 data-bases
"""
from __future__ import division
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import h5py
from common import *
def viz_textbb(text_im, charBB_list, wordBB, alpha=1.0):
"""
text_im : image containing text
charBB_list : list of 2x4xn_i bounding-box matrices
wordBB : 2x4xm matrix of word coordinates
"""
plt.close(1)
plt.figure(1)
plt.imshow(text_im)
H,W = text_im.shape[:2]
# plot the character-BB:
for i in range(len(charBB_list)):
bbs = charBB_list[i]
ni = bbs.shape[-1]
for j in range(ni):
bb = bbs[:,:,j]
bb = np.c_[bb,bb[:,0]]
plt.plot(bb[0,:], bb[1,:], 'r', alpha=alpha/2)
# plot the word-BB:
for i in range(wordBB.shape[-1]):
bb = wordBB[:,:,i]
bb = np.c_[bb,bb[:,0]]
plt.plot(bb[0,:], bb[1,:], 'g', alpha=alpha)
# visualize the indiv vertices:
vcol = ['r','g','b','k']
for j in range(4):
plt.scatter(bb[0,j],bb[1,j],color=vcol[j])
plt.gca().set_xlim([0,W-1])
plt.gca().set_ylim([H-1,0])
plt.show(block=False)
def main(db_fname):
db = h5py.File(db_fname, 'r')
dsets = sorted(db['data'].keys())
print ("total number of images : ", colorize(Color.RED, len(dsets), highlight=True))
for k in dsets:
rgb = db['data'][k][...]
charBB = db['data'][k].attrs['charBB']
wordBB = db['data'][k].attrs['wordBB']
txt = db['data'][k].attrs['txt']
viz_textbb(rgb, [charBB], wordBB)
print ("image name : ", colorize(Color.RED, k, bold=True))
print (" ** no. of chars : ", colorize(Color.YELLOW, charBB.shape[-1]))
print (" ** no. of words : ", colorize(Color.YELLOW, wordBB.shape[-1]))
print (" ** text : ", colorize(Color.GREEN, txt))
if 'q' in input("next? ('q' to exit) : "):
break
db.close()
if __name__=='__main__':
# main('results/SynthText.h5')
main('gen/dset.h5')
| StarcoderdataPython |
4814647 | #!/usr/bin/env python
"""
@file route_departOffset.py
@author <NAME>
@author <NAME>
@date 11.09.2009
@version $Id$
Applies a given offset to the given route's departure time
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import codecs
import optparse
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(os.path.join(tools))
from sumolib.output import parse
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
def intIfPossible(val):
if int(val) == val:
return int(val)
else:
return val
def get_options(args=None):
optParser = optparse.OptionParser()
optParser.add_option("-r", "--input-file", dest="infile",
help="the input route file (mandatory)")
optParser.add_option("-o", "--output-file", dest="outfile",
help="the output route file (mandatory)")
optParser.add_option("-d", "--depart-offset", dest="offset",
type="float", help="the depart offset to apply")
optParser.add_option("-i", "--depart-interval", dest="interval",
help="time intervals a,b,c,d where all vehicles departing in the interval [a,b[ are mapped to the interval [c,d[")
optParser.add_option("--modify-ids", dest="modify_ids", action="store_true",
default=False, help="whether ids should be modified as well")
optParser.add_option("--heterogeneous", dest="heterogeneous",
action="store_true", default=False, help="whether heterogeneous objects shall be parsed (i.e. vehicles with embeded and referenced routes)")
optParser.add_option("--depart-edges", dest="depart_edges",
help="only modify departure times of vehicles departing on the given edges")
optParser.add_option("--depart-edges.file", dest="depart_edges_file",
help="only modify departure times of vehicles departing on edges or lanes in the given selection file")
optParser.add_option("--arrival-edges", dest="arrival_edges",
help="only modify departure times of vehicles arriving on the given edges")
optParser.add_option("--arrival-edges.file", dest="arrival_edges_file",
help="only modify departure times of vehicles arriving on edges or lanes in the given selection file")
(options, args) = optParser.parse_args(args=args)
if options.infile is None or options.outfile is None:
optParser.print_help()
sys.exit()
if ((options.offset is None and options.interval is None) or
(options.offset is not None and options.interval is not None)):
print(
"Either one of the options --depart-offset or --depart-interval must be given")
sys.exit()
if options.offset is not None:
options.name_suffix = "_%s" % intIfPossible(options.offset)
else:
options.interval = tuple(map(float, options.interval.split(',')))
options.name_suffix = "_%s_%s_%s_%s" % options.interval
if options.depart_edges is not None:
options.depart_edges = options.depart_edges.split(',')
if options.depart_edges_file is not None:
if options.depart_edges is None:
options.depart_edges = []
for line in open(options.depart_edges_file):
line = line.strip()
if line.startswith("edge:"):
options.depart_edges.append(line[5:])
elif line.startswith("lane:"):
options.depart_edges.append(line[5:-2])
else:
options.depart_edges.append(line)
if options.arrival_edges is not None:
options.arrival_edges = options.arrival_edges.split(',')
if options.arrival_edges_file is not None:
if options.arrival_edges is None:
options.arrival_edges = []
for line in open(options.arrival_edges_file):
line = line.strip()
if line.startswith("edge:"):
options.arrival_edges.append(line[5:])
elif line.startswith("lane:"):
options.arrival_edges.append(line[5:-2])
else:
options.arrival_edges.append(line)
return options
def shiftInterval(val, interval):
val = float(val)
if interval[0] <= val < interval[1]:
val = (val - interval[0]) / (interval[1] - interval[0]
) * (interval[3] - interval[2]) + interval[2]
return str(intIfPossible(val))
def main(options):
# cache stand-alone routes
routesDepart = {} # first edge for each route
routesArrival = {} # last edge for each route
with codecs.open(options.outfile, 'w', encoding='utf8') as out:
out.write("<routes>\n")
for route in parse(options.infile, "route"):
if route.hasAttribute('id') and route.id is not None:
routesDepart[route.id] = route.edges.split()[0]
routesArrival[route.id] = route.edges.split()[-1]
out.write(route.toXML(' '))
for obj in parse(options.infile, ['vehicle', 'trip', 'flow', 'vType'],
heterogeneous=options.heterogeneous, warn=False):
if obj.name == 'vType':
# copy
pass
else:
if options.modify_ids:
obj.id += options.name_suffix
# compute depart-edge filter
departEdge = None
if options.depart_edges is not None:
# determine the departEdge of the current vehicle
if obj.name == 'trip':
departEdge = obj.attr_from
elif obj.name == 'vehicle':
if obj.hasAttribute('route') and obj.route is not None:
departEdge = routesDepart[obj.route]
else:
# route child element
departEdge = obj.route[0].edges.split()[0]
elif obj.name == 'flow':
if obj.hasAttribute('attr_from') and obj.attr_from is not None:
departEdge = obj.attr_from
elif obj.hasAttribute('route') and obj.route is not None:
departEdge = routesDepart[obj.route]
else:
# route child element
departEdge = obj.route[0].edges.split()[0]
# compute arrival-edge filter
arrivalEdge = None
if options.arrival_edges is not None:
# determine the arrivalEdge of the current vehicle
if obj.name == 'trip':
arrivalEdge = obj.to
elif obj.name == 'vehicle':
if obj.hasAttribute('route') and obj.route is not None:
arrivalEdge = routesArrival[obj.route]
else:
# route child element
arrivalEdge = obj.route[0].edges.split()[-1]
elif obj.name == 'flow':
if obj.hasAttribute('to') and obj.attr_from is not None:
arrivalEdge = obj.to
elif obj.hasAttribute('route') and obj.route is not None:
arrivalEdge = routesArrival[obj.route]
else:
# route child element
arrivalEdge = obj.route[0].edges.split()[-1]
# modify departure time
if ((departEdge is None or departEdge in options.depart_edges) and
(arrivalEdge is None or arrivalEdge in options.arrival_edges)):
if options.offset is not None:
# shift by offset
if obj.name in ['trip', 'vehicle']:
obj.depart = str(intIfPossible(
float(obj.depart) + options.offset))
else:
obj.begin = str(intIfPossible(
float(obj.begin) + options.offset))
obj.end = str(intIfPossible(
float(obj.end) + options.offset))
else:
# shift by interval
if obj.name in ['trip', 'vehicle']:
obj.depart = shiftInterval(
obj.depart, options.interval)
else:
obj.begin = shiftInterval(
obj.begin, options.interval)
obj.end = shiftInterval(obj.end, options.interval)
out.write(obj.toXML(' '))
out.write("</routes>\n")
if __name__ == "__main__":
main(get_options(sys.argv))
| StarcoderdataPython |
3208860 | <filename>train.py
import time
import os
import copy
import pdb
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import model
from data import guipang
from data import qiafan
from tensorboardX import SummaryWriter
from torch.utils.data import Dataset, DataLoader
assert torch.__version__.split('.')[1] == '4'
######################################
# config
cfg = {
'lr': 0.00001,
'momentum': 0.9,
'weight_decay': 0.000005,
'batch_size': 1,
'max_epoch': 100,
'checkpoint': 20,
'milestones': [30, 50],
'gamma': 0.1,
'bar_scor': 0.7,
'bar_iou': 0.7,
'dataset_guipang': '/repository/gong/qiafan/guipangdata/',
'dataset_qiafan': '/repository/gong/qiafan/dataset/',
'cuda_devices': '4',
'ckpt_root': '/repository/gong/qiafan/',
'depth': 50
}
os.environ['CUDA_VISIBLE_DEVICES'] = cfg['cuda_devices']
time_TrainStart = str(int(time.time()))
the_ckpt_root = cfg['ckpt_root']+time_TrainStart+'/'
os.mkdir(the_ckpt_root)
# tensorboardx
writer = SummaryWriter('runs/'+time_TrainStart+'_'+str(os.getpid()))
def main(args=None):
data_set = {
x: guipang(cfg=cfg['dataset_guipang'], part=x) for x in ['train', 'val']
}
# data_set = {
# x: qiafan(cfg=cfg['dataset_qiafan'], part=x) for x in ['train', 'val']
# }
data_loader = {
x: data.DataLoader(data_set[x], batch_size=cfg['batch_size'],
num_workers=4, shuffle=True, pin_memory=False)
for x in ['train', 'val']
}
# Create the model
if cfg['depth'] == 18:
retinanet = model.resnet18(
num_classes=dataset_train.num_classes(), pretrained=True)
elif cfg['depth'] == 34:
retinanet = model.resnet34(
num_classes=dataset_train.num_classes(), pretrained=True)
elif cfg['depth'] == 50:
retinanet = model.resnet50(
num_classes=dataset_train.num_classes(), pretrained=True)
elif cfg['depth'] == 101:
retinanet = model.resnet101(
num_classes=dataset_train.num_classes(), pretrained=True)
elif cfg['depth'] == 152:
retinanet = model.resnet152(
num_classes=dataset_train.num_classes(), pretrained=True)
else:
raise ValueError(
'Unsupported model depth, must be one of 18, 34, 50, 101, 152')
use_gpu = True
if use_gpu:
retinanet = retinanet.cuda()
retinanet = torch.nn.DataParallel(retinanet).cuda()
optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=cfg['milestones'], gamma=cfg['gamma'])
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
for epoch in range(1, cfg['max_epoch']):
print('epoch: ', epoch)
for phrase in ['train', 'val']:
nowepochiter = (epoch-1)*len(data_loader[phrase])
if phrase == 'train':
scheduler.step()
retinanet.training = True
retinanet.train()
retinanet.module.freeze_bn()
else:
retinanet.training = False
retinanet.eval()
##################################################
epoch_ap = 0.0
##################################################
for i, (images, targets) in enumerate(data_loader[phrase]):
optimizer.zero_grad()
images = torch.cuda.FloatTensor(images.cuda())
targets = torch.cuda.FloatTensor([list(map(float, data['annot']['annotation']['object']['bndbox']['xmin'])),
list(map(float, data['annot']['annotation']['object']['bndbox']['ymin'])),
list(map(float, data['annot']['annotation']['object']['bndbox']['xmax'])),
list(map(float, data['annot']['annotation']['object']['bndbox']['ymax']))])
##################################################
##################################################
##################################################
with torch.set_grad_enabled(phrase == 'train'):
if phrase == 'train':
classification_loss, regression_loss = retinanet([images, targets])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
writer.add_scalar('guipangtrain/loss',
loss.item(), i+nowepochiter)
else:
pass
# scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))
# epoch_ap = apcal_guipang(
# epoch_ap, outputs, targets, cfg['bar_scor'], cfg['bar_iou'])
if phrase == 'val':
epoch_map = epoch_ap/float(data_set['val'].__len__())
writer.add_scalar('guipangval/map', epoch_map, epoch)
if epoch_map > best_map:
best_map = epoch_map
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(best_model_wts, the_ckpt_root+'best.pkl')
if epoch % cfg['checkpoint'] == 0:
torch.save(copy.deepcopy(model.state_dict()),
the_ckpt_root+'{}.pkl'.format(epoch))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1728437 | import datetime
import time
from json_model import fields
from json_model import libs
class Scholarship(libs.JsonModel):
amount = fields.Float(required=True)
currency = fields.String(default='USD')
months = fields.List(required=True)
class Student(libs.JsonModel):
name = fields.String(required=True)
surname = fields.String(required=True)
age = fields.Integer()
day_of_birth = fields.Datetime()
scholarship = fields.ForeignField()
created_at = fields.Timestamp()
if __name__ == '__main__':
scholarship = Scholarship()
scholarship.amount = 500.00
scholarship.months = [1, 2, 3, 4, 5, 8, 9, 10]
student = Student()
student.name = "Andrew"
student.surname = "Gardner"
student.age = 26
student.day_of_birth = datetime.datetime.strptime('Jun 1 1999 1:33PM', '%b %d %Y %I:%M%p')
student.scholarship = scholarship
student.created_at = int(time.time())
print(student.to_json())
student = Student(**{
'name': 'Andrew',
'surname': 'Gardner',
'age': 26,
'day_of_birth': datetime.datetime.strptime('Jun 1 1999 1:33PM', '%b %d %Y %I:%M%p'),
'scholarship': Scholarship(**{'amount': 500.00, 'months': [1, 2, 3, 4]}),
'created_at': int(time.time())
}
)
print(student.to_json()) | StarcoderdataPython |
77721 | <gh_stars>0
f_chr6 = open("/hwfssz1/ST_BIOCHEM/P18Z10200N0032/liyiyan/SV_Caller/HG002/chr6_reads.fastq")
liness = f_chr6.readlines()
reads_list = []
for i in range(1,len(liness),4):
reads_list.append(liness[i])
res = min(reads_list, key=len,default='')
print(len(res)-1)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.