max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
setup.py | monoidic/intelmq-manager | 0 | 12793551 | <reponame>monoidic/intelmq-manager
""" Setup file for intelmq-manager
SPDX-FileCopyrightText: 2020 IntelMQ Team <<EMAIL>>
SPDX-License-Identifier: AGPL-3.0-or-later
"""
from setuptools import find_packages, setup
import pathlib
import shutil
from mako.lookup import TemplateLookup
from intelmq_manager.version import __version__
def render_page(pagename:str, **template_args) -> str:
template_dir = pathlib.Path('intelmq_manager/templates')
template_lookup = TemplateLookup(directories=[template_dir], default_filters=["h"], input_encoding='utf8')
template = template_lookup.get_template(f'{pagename}.mako')
return template.render(pagename=pagename, **template_args)
def buildhtml():
outputdir = pathlib.Path('html')
outputdir.mkdir(parents=True, exist_ok=True)
htmlfiles = ["configs", "management", "monitor", "check", "about", "index"]
for filename in htmlfiles:
print(f"Rendering {filename}.html")
html = render_page(filename)
outputdir.joinpath(f"{filename}.html").write_text(html)
staticfiles = ["css", "images", "js", "plugins", "less"]
for filename in staticfiles:
print(f"Copying {filename} recursively")
src = pathlib.Path('intelmq_manager/static') / filename
dst = outputdir / filename
if dst.exists():
shutil.rmtree(dst)
shutil.copytree(src, dst)
print('rendering dynvar.js')
rendered = render_page('dynvar', allowed_path='/opt/intelmq/var/lib/bots/', controller_cmd='intelmq')
outputdir.joinpath('js/dynvar.js').write_text(rendered)
# Before running setup, we build the html files in any case
buildhtml()
htmlsubdirs = [directory for directory in pathlib.Path('html').glob('**') if directory.is_dir()]
data_files = [(f'/usr/share/intelmq_manager/{directory}', [str(x) for x in directory.glob('*') if x.is_file()]) for directory in htmlsubdirs]
data_files = data_files + [('/usr/share/intelmq_manager/html', [str(x) for x in pathlib.Path('html').iterdir() if x.is_file()])]
data_files = data_files + [('/etc/intelmq', ['contrib/manager-apache.conf'])]
setup(
name="intelmq-manager",
version=__version__,
python_requires='>=3.5',
packages=find_packages(),
install_requires=[
"intelmq-api",
],
include_package_data=True,
url='https://github.com/certtools/intelmq-manager/',
description=("IntelMQ Manager is a graphical interface to manage"
" configurations for the IntelMQ framework."),
data_files=data_files
)
| 1.984375 | 2 |
models/deepWalk.py | nicolas-racchi/hpc2020-graphML | 0 | 12793552 | <reponame>nicolas-racchi/hpc2020-graphML
import time
import pandas as pd
import numpy as np
import stellargraph as sg
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
from utils.visualization import get_TSNE
def sg_DeepWalk(v_sets, e_sets, v_sample, e_sample):
G = sg.StellarDiGraph(v_sets, e_sets)
#### Graph embedding with NODE2VEC and WORD2VEC
print("Running DeepWalk")
rw = sg.data.BiasedRandomWalk(G)
t0 = time.time()
walks = rw.run(
nodes=list(G.nodes()), # root nodes
length=10, # maximum length of a random walk
n=10, # number of random walks per root node
p=0.6, # Defines (unormalised) probability, 1/p, of returning to source node
q=1.7, # Defines (unormalised) probability, 1/q, for moving away from source node
)
t1 = time.time()
print("Number of random walks: {} in {:.2f} s".format(len(walks), (t1-t0)))
str_walks = [[str(n) for n in walk] for walk in walks]
model = Word2Vec(str_walks, size=128, window=5, min_count=0, sg=1, workers=8, iter=5)
# size: length of embedding vector
# The embedding vectors can be retrieved from model.wv using the node ID.
# model.wv["19231"].shape
# Retrieve node embeddings
node_ids = model.wv.index2word # list of node IDs
node_embeddings = (model.wv.vectors) # numpy.ndarray of size number of nodes times embeddings dimensionality
# Retrieve corresponding targets
# from training csv
# core_targets = core_target_sample.loc[[int(node_id) for node_id in node_ids if int(node_id) in list(core_target_sample.index)]].CaseID
# ext_targets = ext_target_sample.loc[[int(node_id) for node_id in node_ids if int(node_id) in list(ext_target_sample.index)]].CaseID
# from vertices' data
core_targets = v_sample.loc[[int(node_id) for node_id in node_ids]].CoreCaseGraphID
ext_targets = v_sample.loc[[int(node_id) for node_id in node_ids]].ExtendedCaseGraphID
t2 = time.time()
print(f"Deepwalk complete: {(t2-t0):.2f} s")
# Visualize embeddings with TSNE
embs_2d = get_TSNE(node_embeddings)
# Draw the embedding points, coloring them by the target label (CaseID)
alpha = 0.6
label_map = {l: i for i, l in enumerate(np.unique(ext_targets), start=10) if pd.notna(l)}
label_map[0] = 1
node_colours = [label_map[target] if pd.notna(target) else 0 for target in ext_targets]
plt.figure(figsize=(15, 15))
plt.axes().set(aspect="equal")
plt.scatter(
embs_2d[:, 0],
embs_2d[:, 1],
c=node_colours,
cmap="jet",
alpha=alpha,
)
plt.title("TSNE visualization of node embeddings w.r.t. Extended Case ID")
plt.show()
return node_ids, node_embeddings, core_targets, ext_targets
| 2.65625 | 3 |
2017/10_Oct/11/04-isnumeric.py | z727354123/pyCharmTest | 0 | 12793553 | myStr = ''
print(myStr.isalnum()) # False 不支持空
myStr = 'abCC'
print(myStr.isalpha()) # True 支持大写
myStr = 'abc*'
print(myStr.isalpha()) # False 不支持 符号
myStr = 'abc1'
print(myStr.isalpha()) # False 不支持 包含num
print(myStr.isalnum()) # True 支持 包含num
myStr = '123'
print(myStr.isnumeric()) # True 只支持 全数字
myStr = '123.123'
print(myStr.isnumeric()) # False
myStr = '0.1'
print(myStr.isnumeric()) # False 不支持 .
print(myStr.isalnum()) # False 不支持 .
myStr = 'abc123.1'
print(myStr.isalnum()) # False 不支持 .
| 3.984375 | 4 |
tex/poster/make_comparative_figure.py | se4u/mvlsa | 12 | 12793554 | <reponame>se4u/mvlsa
from __future__ import division
conditions="Glove W2Vec(Skipgram) MVLSA(Glove+W2Vec) MVLSA(Wiki) MVLSA(Allviews) MVLSA(Allviews+Glove+W2Vec)".split()
viewperf=r"""
MEN & 70.4 & 73.9 & 76.0 & 71.4 & 71.2 & 75.8
RW & 28.1 & 32.9 & 37.2 & 29.0 & 41.7 & 40.5
SCWS & 54.1 & 65.6 & 60.7 & 61.8 & 67.3 & 66.4
SIMLEX & 33.7 & 36.7 & 41.1 & 34.5 & 42.4 & 43.9
WS & 58.6 & 70.8 & 67.4 & 68.0 & 70.8 & 70.1
MTURK & 61.7 & 65.1 & 59.8 & 59.1 & 59.7 & 62.9
WS-REL & 53.4 & 63.6 & 59.6 & 60.1 & 65.1 & 63.5
WS-SEM & 69.0 & 78.4 & 76.1 & 76.8 & 78.8 & 79.2
RG & 73.8 & 78.2 & 80.4 & 71.2 & 74.4 & 80.8
MC & 70.5 & 78.5 & 82.7 & 76.6 & 75.9 & 77.7
An-SYN & 61.8 & 59.8 & 51.0 & 42.7 & 60.0 & 64.3
An-SEM & 80.9 & 73.7 & 73.5 & 36.2 & 38.6 & 77.2
TOEFL & 83.8 & 81.2 & 86.2 & 78.8 & 87.5 & 88.8"""
viewperf=viewperf.split("\n")
colors="gbcykr"
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from pylab import savefig
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
xlab=[]
width=0.8
patch_height=0.7
ba=[]
pa=[]
for idx, row in enumerate(viewperf[1:]):
row=row.split("&")
dataset=row[0]
# The dataset is the xlabel
xlab.append(dataset.strip())
perf=[float(e) for e in row[1:]]
# Plot the last number as a bar
bar_artist,=plt.bar(idx+0.1, perf[-1],
width=width,
color=colors[-1],
edgecolor='black',
linewidth=0.4,
alpha=0.8)
bar_artist.set_label(conditions[-1].replace("(", " (").replace("Allviews", "All Views"))
ba.append(bar_artist)
# Plot the rest of them as boxes on the bar.
for j, p in enumerate(perf[0:-1]):
patch_artist=ax.add_patch(plt.Rectangle((idx+0.01*j, p-patch_height),
width,
patch_height,
facecolor=colors[j],
alpha=0.7,
label=conditions[j].replace("(", " (").replace("Allviews", "All Views"),
edgecolor='white',
linewidth=0.4,
))
pa.append(patch_artist)
ax.set_xlim(xmax=len(xlab))
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.set_ticks_position('both')
ax.set_xticklabels([""]+xlab, rotation=45, ha='left')
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.set_title('Comparison between Word2Vec, Glove and MVLSA',
color='black')
ymin=25
ax.legend(handles=[ba[-1], pa[-3], pa[-1], pa[-2], pa[-5], pa[-4]],
loc=(.08, .74),
prop={'size':9},
shadow=True)
ax.set_ylabel("Correlation")
plt.axvline(x=9.95, color='black')
ax.set_ylim(ymin=ymin)
ax.set_yticklabels(["%.2f"%(e/100) for e in range(ymin-5,100,10)])
ax.text(13.2, ymin+37, 'Accuracy', fontsize=13, rotation=270, color='gray')
savefig("comparative_figure.pdf", bbox_inches='tight')
| 1.953125 | 2 |
dbaas/tsuru/admin/__init__.py | jaeko44/python_dbaas | 0 | 12793555 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from .. import models
from .bind import BindAdmin
admin.site.register(models.Bind, BindAdmin)
| 1.195313 | 1 |
ops.py | Forty-lock/Inpainting_AIHub | 0 | 12793556 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import utils
import math
class conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(conv5x5, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=stride,
padding=2*dilation, dilation=dilation, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(conv3x3, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, padding_mode='reflect', bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(conv1x1, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv_zeros(nn.Module):
def __init__(self, in_channels, out_channels):
super(conv_zeros, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
nn.init.constant_(self.conv.weight, 0)
def forward(self, x):
return self.conv(x)
class PAKA3x3(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(PAKA3x3, self).__init__()
self.conv = PAKA2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class PAKA2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(PAKA2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.weight = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size ** 2, 1, 1))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.conv_c = nn.Sequential(conv1x1(in_channels, in_channels, stride),
nn.ReLU(True),
conv_zeros(in_channels, in_channels),
)
self.conv_d = nn.Sequential(conv3x3(in_channels, in_channels, stride, dilation=dilation),
nn.ReLU(True),
conv_zeros(in_channels, kernel_size ** 2),
)
self.unfold = nn.Unfold(kernel_size, padding=padding, stride=stride, dilation=dilation)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
def forward(self, x):
b, n, h, w = x.shape
return F.conv3d(self.unfold(x).view(b, n, self.kernel_size ** 2, h//self.stride, w//self.stride) * (1 + torch.tanh(self.conv_d(x).unsqueeze(1)+self.conv_c(x).unsqueeze(2))),
self.weight, self.bias).squeeze(2)
class downsample(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(downsample, self).__init__()
self.conv1 = conv3x3(in_channels, hidden_channels)
self.conv2 = conv3x3(hidden_channels, out_channels, stride=2)
def forward(self, x):
h = self.conv1(x)
h = F.elu(h)
h = self.conv2(h)
h = F.elu(h)
return h
class upsample(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsample, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels*4)
self.conv2 = conv3x3(out_channels, out_channels)
def forward(self, x):
h = self.conv1(x)
h = F.pixel_shuffle(h, 2)
h = F.elu(h)
h = self.conv2(h)
h = F.elu(h)
return h
| 2.46875 | 2 |
wiske/event.py | jthistle/wiskesynth | 0 | 12793557 |
from enum import Enum
class EventType(Enum):
NOTE_ON = 1
NOTE_OFF = 2
class Event:
def __init__(self, etype):
self.type = etype
class EventNoteOn(Event):
def __init__(self, midi_note, velocity):
super().__init__(EventType.NOTE_ON)
self.note = midi_note
self.velocity = velocity
class EventNoteOff(Event):
def __init__(self, midi_note):
super().__init__(EventType.NOTE_OFF)
self.note = midi_note
| 3.046875 | 3 |
long-exp.py | nmiculinic/python-hello-world | 0 | 12793558 | <gh_stars>0
#!/usr/bin/python
import time
for i in range(2 * 3600):
print(f"hello world {i}", flush=True)
with open(f"artifacts-{i}.txt", "w") as f:
print("Hello mother, hello father, I'm here!", file=f)
time.sleep(1)
| 2.8125 | 3 |
tests/unit/test_lockfile.py | indhupriya/dvc | 1 | 12793559 | import pytest
from dvc.dvcfile import Lockfile, LockfileCorruptedError
from dvc.stage import PipelineStage
from dvc.utils.serialize import dump_yaml
def test_stage_dump_no_outs_deps(tmp_dir, dvc):
stage = PipelineStage(name="s1", repo=dvc, path="path", cmd="command")
lockfile = Lockfile(dvc, "path.lock")
lockfile.dump(stage)
assert lockfile.load() == {"s1": {"cmd": "command"}}
def test_stage_dump_when_already_exists(tmp_dir, dvc):
data = {"s1": {"cmd": "command", "deps": [], "outs": []}}
dump_yaml("path.lock", data)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile = Lockfile(dvc, "path.lock")
lockfile.dump(stage)
assert lockfile.load() == {
**data,
"s2": {"cmd": "command2"},
}
def test_stage_dump_with_deps_and_outs(tmp_dir, dvc):
data = {
"s1": {
"cmd": "command",
"deps": [{"md5": "1.txt", "path": "checksum"}],
"outs": [{"md5": "2.txt", "path": "checksum"}],
}
}
dump_yaml("path.lock", data)
lockfile = Lockfile(dvc, "path.lock")
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile.dump(stage)
assert lockfile.load() == {
**data,
"s2": {"cmd": "command2"},
}
def test_stage_overwrites_if_already_exists(tmp_dir, dvc):
lockfile = Lockfile(dvc, "path.lock",)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile.dump(stage)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command3")
lockfile.dump(stage)
assert lockfile.load() == {
"s2": {"cmd": "command3"},
}
def test_load_when_lockfile_does_not_exist(tmp_dir, dvc):
assert {} == Lockfile(dvc, "pipelines.lock").load()
@pytest.mark.parametrize(
"corrupt_data",
[
{"s1": {"outs": []}},
{"s1": {}},
{
"s1": {
"cmd": "command",
"outs": [
{"md5": "checksum", "path": "path", "random": "value"}
],
}
},
{"s1": {"cmd": "command", "deps": [{"md5": "checksum"}]}},
],
)
def test_load_when_lockfile_is_corrupted(tmp_dir, dvc, corrupt_data):
dump_yaml("Dvcfile.lock", corrupt_data)
lockfile = Lockfile(dvc, "Dvcfile.lock")
with pytest.raises(LockfileCorruptedError) as exc_info:
lockfile.load()
assert "Dvcfile.lock" in str(exc_info.value)
| 2.171875 | 2 |
datapypes/pype.py | msmathers/datapypes | 1 | 12793560 | <filename>datapypes/pype.py
from set import Set
from model import Model
from source import Source
from store import Store
class Pype(object):
@property
def set(self):
raise NotImplementedError()
@property
def source(self):
raise NotImplementedError()
@property
def store(self):
raise NotImplementedError()
class SourcePype(Pype):
def __init__(self, set, source):
self._set = set
self._source = source
# Actions
def one(self, query_model):
raise NotImplementedError()
def latest(self, query_model):
raise NotImplementedError()
def search(self, query_model):
raise NotImplementedError()
def stream(self, query_model):
raise NotImplementedError()
def all(self, query_model):
raise NotImplementedError()
# Source action invocation
def retrieve_model(self, result):
return self.set.model(**result)
def retrieve_set(self, results):
return self.set(*[self.retrieve_model(r) for r in results])
class StorePype(Pype):
def __init__(self, set, store):
self._set = set
self._store = store
# Actions
def save(self, data_type):
return self._store_method('save', data_type)
def update(self, data_type, query_model=None):
return self._store_method('update', data_type, query_model)
def delete(self, data_type, query_model=None):
return self._store_method('save', data_type, query_model)
# Define bindings in subclass
def save_model(self, data_model):
raise NotImplementedError()
def save_set(self, data_set):
raise NotImplementedError()
def update_model(self, data_model):
raise NotImplementedError()
def update_set(self, data_set, query_model):
raise NotImplementedError()
def delete_model(self, data_model):
raise NotImplementedError()
def delete_set(self, data_set, query_model):
raise NotImplementedError()
# Action invocation
def _store_method(self, method, data_type, query=None):
if isinstance(data_type, Set):
method += "_set"
elif isinstance(data_type, Model):
method += "_model"
else:
raise InvalidPypeDataType(data_type)
kwargs = {} if query is None else {'query_model': query}
getattr(self, method)(data_type, **kwargs)
return self.set
def register_pypes(*pype_classes):
for cls in pype_classes:
if hasattr(cls,'__bases__') and StorePype in cls.__bases__:
Store.__pypes__.setdefault(cls.store,{})[cls.set] = cls
Store.__pypes__.setdefault(cls.store,{})[cls.set.model] = cls
Set.__pypes__.setdefault(cls.set,{})[cls.store] = cls
Model.__pypes__.setdefault(cls.set.model,{})[cls.store] = cls
elif hasattr(cls,'__bases__') and SourcePype in cls.__bases__:
Source.__pypes__.setdefault(cls.source,{})[cls.set] = cls
Source.__pypes__.setdefault(cls.source,{})[cls.set.model] = cls
Set.__pypes__.setdefault(cls.set,{})[cls.source] = cls
Model.__pypes__.setdefault(cls.set.model,{})[cls.source] = cls | 2.59375 | 3 |
xdd-7.0.0.rc-ramses3/contrib/buildbot_master_xdd.py | eunsungc/gt6-RAMSES_8_5 | 1 | 12793561 | <reponame>eunsungc/gt6-RAMSES_8_5
#!/usr/bin/python
#
# The buildbot settings for XDD. We assume the following build slaves are
# defined in the master.cfg:
#
# c['slaves'] = []
# c['slaves'].append(BuildSlave("pod9", "banana"))
# c['slaves'].append(BuildSlave("pod7", "banana"))
# c['slaves'].append(BuildSlave("pod10", "banana"))
# c['slaves'].append(BuildSlave("pod11", "banana"))
# c['slaves'].append(BuildSlave("spry02", "banana"))
# c['slaves'].append(BuildSlave("natureboy", "banana"))
#
# In order to enable these tests, add the
# following lines to the bottom of the default master.cfg
#
####### Import the configuration to build/test XDD
# import buildbot_master_xdd
# reload(buildbot_master_xdd)
# from buildbot_master_xdd import *
# buildbot_master_xdd.loadConfig(config=c)
#
# To retrieve the latest version of this file, run the following command:
#
# git archive --format=tar --prefix=xdd/ --remote=/ccs/proj/csc040/var/git/xdd.git master |tar xf - --strip=2 xdd/contrib/buildbot_master_xdd.py
#
#
# This uses the BuildmasterConfig object referenced in master.cfg
def loadConfig(config):
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Here we point to the buildbot clone of pyflakes.
from buildbot.changes.gitpoller import GitPoller
from buildbot.changes.filter import ChangeFilter
config['change_source'].append( GitPoller(
repourl = '<EMAIL>:ORNL/xdd.git',
workdir='gitpoller-workdir-xdd-master',
pollinterval=120,
branch='master',
project='xdd'))
xdd_filter = ChangeFilter(
project = 'xdd',
branch = 'testing')
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
from buildbot.process.factory import BuildFactory, GNUAutoconf
from buildbot.steps.source import Git
from buildbot.steps.shell import ShellCommand, Configure, Compile, Test
xdd_factory = BuildFactory()
# Check out the source
xdd_factory.addStep(Git(repourl='<EMAIL>:ORNL/xdd.git', mode='copy', branch='master'))
# Generate the test configuration
xdd_factory.addStep(ShellCommand(command=['./contrib/buildbot_gen_test_config.sh'], name="configuring"))
# Compile the code
xdd_factory.addStep(Compile(description=["compiling"]))
# Install the code
xdd_factory.addStep(ShellCommand(command=['make', 'install'], name="make install"))
# Perform make check
xdd_factory.addStep(ShellCommand(command=['make', 'check'], name="make check", maxTime=600))
# Perform make test
xdd_factory.addStep(Test(description=["make test"], maxTime=600))
# Perform cleanup
xdd_factory.addStep(ShellCommand(command=['pkill', '-f', 'xdd', '||', 'echo ""'], name='process cleanup', maxTime=60))
# Add the XDD Build factory to each of the available builders described in the master.cfg
from buildbot.config import BuilderConfig
# config['builders'].append(BuilderConfig(name="xdd-rhel5-x86_64", slavenames=["pod7"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd'))
# config['builders'].append(BuilderConfig(name="xdd-rhel6-x86_64", slavenames=["pod9"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"},category='xdd'))
# config['builders'].append(BuilderConfig(name="xdd-sles10-x86_64", slavenames=["pod10"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd'))
config['builders'].append(BuilderConfig(name="xdd-sles11-x86_64", slavenames=["pod11"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd'))
config['builders'].append(BuilderConfig(name="xdd-osx-10-8", slavenames=["natureboy"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd'))
# config['builders'].append(BuilderConfig(name="xdd-rhel6-ppc64", slavenames=["spry02"], factory=xdd_factory, env={"XDDTEST_TIMEOUT": "900"}, category='xdd'))
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
# Configure the nightly testing so that every test lives in the same buildset
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.timed import Periodic,Nightly
build_nightly_xdd=Nightly(name="xdd-nightly1",
branch = "master",
properties={'owner' : ['<EMAIL>']},
builderNames=["xdd-sles11-x86_64", "xdd-osx-10-8"],
hour = 2,
minute = 3)
config['schedulers'].append(build_nightly_xdd)
# Configure each force build seperately so that they live in differing buildsets
from buildbot.schedulers.forcesched import ForceScheduler
# config['schedulers'].append(ForceScheduler(name="xdd-force1", builderNames=["xdd-rhel5-x86_64"]))
# config['schedulers'].append(ForceScheduler(name="xdd-force2", builderNames=["xdd-rhel6-x86_64"]))
# config['schedulers'].append(ForceScheduler(name="xdd-force3", builderNames=["xdd-sles10-x86_64"]))
config['schedulers'].append(ForceScheduler(name="xdd-force4", builderNames=["xdd-sles11-x86_64"]))
config['schedulers'].append(ForceScheduler(name="xdd-force6", builderNames=["xdd-osx-10-8"]))
# config['schedulers'].append(ForceScheduler(name="xdd-force7", builderNames=["xdd-rhel6-ppc64"]))
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
from buildbot.status.mail import MailNotifier
xddMN = MailNotifier(fromaddr="<EMAIL>",
extraRecipients=['<EMAIL>'],
categories='xdd',
buildSetSummary=True,
messageFormatter=xddSummaryMail)
config['status'].append(xddMN)
#
# Generate the BuildSetSummary mail format for XDD's nightly build
# and test information
#
from buildbot.status.builder import Results
from buildbot.status.results import FAILURE, SUCCESS, WARNINGS, Results
import urllib
def xddSummaryMail(mode, name, build, results, master_status):
"""Generate a buildbot mail message and return a tuple of the subject,
message text, and mail type."""
# Construct the mail subject
subject = ""
if results == SUCCESS:
subject = "[Buildbot] SUCCESS -- XDD Acceptance Test -- SUCCESS"
else:
subject = "[Buildbot] FAILURE -- XDD Acceptance Test -- FAILURE"
# Construct the mail body
body = ""
body += "Build Host: %s (%s)\n" % (build.getSlavename(), name)
body += "Build Result: %s\n" % Results[results]
body += "Build Status: %s\n" % master_status.getURLForThing(build)
#body += "Build Logs available at: %s\n" % urllib.quote(master_status.getBuildbotURL(), '/:')
#body += "Flagged Build: %s\n" % build.getSlavename()
if results != SUCCESS:
body += "Failed tests: %s\n" % build.getText()
body += "--\n\n"
return { 'subject' : subject, 'body' : body, 'type' : 'plain' }
| 1.992188 | 2 |
python/f-gradf.py | blazej-bucha/physical-geodesy-lecture-notes | 0 | 12793562 | # Import modulov
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
# Výpočtová oblasť
xmin = -1.0
xmax = 1.0
xn = 101 # Počet vzorkovacích bodov funkcie "f" na intervale "[xmin, xmax]"
ymin = xmin
ymax = xmax
yn = xn # Počet vzorkovacích bodov funkcie "f" na intervale "[ymin, ymax]"
xngrad = 10 # Zobrazený bude každý "xngrad" vzorkovací bod v smere osi "x"
yngrad = xngrad # Zobrazený bude každý "yngrad" vzorkovací bod v smere osi "y"
# Tvorba gridu
x, y = np.meshgrid(np.linspace(xmin, xmax, xn), np.linspace(ymin, ymax, yn))
# Výpočet funkcie
f = np.sin(2.0 * x) + np.cos(2.0 * y)
# Výpočet derivácií "f" podľa "x" a "y"
fx = 2.0 * np.cos(2.0 * x)
fy = -2.0 * np.sin(2.0 * y)
# Vykreslenie
fig, ax = plt.subplots(figsize=(12.0 / 2.54, 8.0 / 2.54))
im = ax.imshow(f, extent=(xmin, xmax, ymin, ymax), cmap="bwr",
vmin=-np.abs(f).max(), vmax=np.abs(f).max())
ax.quiver( x[::xngrad, ::xngrad], y[::yngrad, ::yngrad],
fx[::xngrad, ::xngrad], fy[::yngrad, ::yngrad])
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_xticks(np.linspace(xmin, xmax, 6))
ax.set_yticks(np.linspace(ymin, ymax, 6))
fig.colorbar(im)
plt.show()
fig.savefig("../latex/fig-f-gradf.pdf")
| 2.390625 | 2 |
GetIcons.py | MrJustPeachy/Font-Awesome-Icon-Scraper | 2 | 12793563 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
# url = 'https://fontawesome.com/cheatsheet/pro'
# req = requests.get(url)
# markup = req.text
# print(markup)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
browser = webdriver.Chrome()
browser.get("https://fontawesome.com/cheatsheet/pro")
delay = 15 # seconds
icon_list = '<select>\n\t<option value="">No icon</option>\n'
def make_icon_format_string(font_awesome_icon):
return "\t<option>" + font_awesome_icon + '</option>'
# Please enter in blacklist items in the following format
blacklist = ['far fa-reply', 'fal fa-reply', 'fas fa-reply', 'far fa-republican', 'fal fa-republican',
'fas fa-republican', 'fab fa-youtube-square', 'fas fa-angle-up',
'fas fa-hand-middle-finger', 'far fa-hand-middle-finger', 'fal fa-hand-middle-finger',
'fas fa-bong', 'fal fa-bong', 'far fa-bong', 'fas fa-cannabis', 'fal fa-cannabis', 'far fa-cannabis',
'fas fa-mosque', 'far fa-mosque', 'fal fa-mosque', 'fal fa-church', 'far fa-church', 'fas fa-church',
'far fa-clipboard', 'far fa-democrat', 'fas fa-democrat', 'fal fa-democrat']
blacklist = [make_icon_format_string(string) for string in blacklist]
try:
myElem = WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.ID, 'reply')))
soup = BeautifulSoup(browser.page_source, features='html.parser')
solid_icons = soup.find("section", {'id': 'solid'}).find_all('article')
solid_icon_values = ['\t<option>fas fa-' + x.attrs['id'] + '</option>' for x in solid_icons
if '\t<option>fas fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(solid_icon_values)
regular_icons = soup.find("section", {'id': 'regular'}).find_all('article')
regular_icon_values = ['\t<option>far fa-' + x.attrs['id'] + '</option>' for x in regular_icons
if '\t<option>far fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(regular_icon_values)
light_icons = soup.find("section", {'id': 'light'}).find_all('article')
light_icon_values = ['\t<option>fal fa-' + x.attrs['id'] + '</option>' for x in light_icons
if '\t<option>fal fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(light_icon_values)
brand_icons = soup.find("section", {'id': 'brands'}).find_all('article')
brand_icon_values = ['\t<option>fab fa-' + x.attrs['id'] + '</option>' for x in brand_icons
if '\t<option>fab fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(brand_icon_values)
except TimeoutException:
print('timeout exception')
icon_list += '\n</select>'
with open('fa-icons.txt', 'w+') as file:
file.write(icon_list) | 2.921875 | 3 |
mimic/utils/exceptions.py | Jimmy2027/MoPoE-MIMIC | 1 | 12793564 | class NaNInLatent(Exception):
pass
class CudaOutOfMemory(Exception):
pass | 1.265625 | 1 |
h1st/core/__init__.py | Shiti/h1st | 0 | 12793565 | <reponame>Shiti/h1st
from .dataclass import NodeInfo, GraphInfo
| 1.101563 | 1 |
ml_api/request_cloud_function.py | r-matsuzaka/mlops-example | 0 | 12793566 | import requests
result = requests.post(
"https://asia-northeast1-mlops-331003.cloudfunctions.net/function-1",
json={"msg": "Hello from cloud functions"},
)
print(result.json())
| 2.46875 | 2 |
utils/start_server.py | FGAUnB-REQ-GM/2021.2-PousadaAnimal | 0 | 12793567 | <filename>utils/start_server.py
from os import system
# Database
system('python3 manage.py makemigrations users pets hosting services message payment host')
system('python3 manage.py migrate')
# Server
system('python3 manage.py runserver localhost:8000') | 1.914063 | 2 |
lib/config.py | GraciousGpal/Colony-Server | 1 | 12793568 | <reponame>GraciousGpal/Colony-Server
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
def get_config():
"""
Loads the configuration file config.ini and returns a dictionary with keys and its values.
:return:
"""
sections = config.sections()
config_dict = {}
for key in sections:
config_dict[key] = dict(config[key])
return config_dict
| 2.953125 | 3 |
tests/test_parser.py | alisonrclarke/raga-pose-estimation-1 | 1 | 12793569 | <reponame>alisonrclarke/raga-pose-estimation-1
import pandas as pd
from raga_pose_estimation.openpose_json_parser import OpenPoseJsonParser
from raga_pose_estimation.openpose_parts import (
OpenPoseParts,
OpenPosePartGroups,
)
def test_parser():
parser = OpenPoseJsonParser(
"example_files/example_3people/output_json/video_000000000093_keypoints.json"
) # choose one where the people are not already sorted
assert parser.get_person_count() == 3
# Check get_person_keypoints
person_keypoints = parser.get_person_keypoints(1)
assert type(person_keypoints) == pd.DataFrame
assert person_keypoints.shape == (len(OpenPoseParts), 3)
assert list(person_keypoints.columns) == parser.COLUMN_NAMES
# Check getting multiple people
all_keypoints = parser.get_multiple_keypoints([0, 1])
assert type(all_keypoints) == pd.DataFrame
assert all_keypoints.shape == (len(OpenPoseParts), 6)
assert list(all_keypoints.columns) == [
"x0",
"y0",
"confidence0",
"x1",
"y1",
"confidence1",
]
# Check that values in person_keypoints are the same as second set of
# columns in all_keypoints (apart from column names)
all_keypoints_person1 = all_keypoints.iloc[:, 3:6]
all_keypoints_person1.columns = person_keypoints.columns
assert all_keypoints_person1.equals(person_keypoints)
# Check getting only upper parts
upper_keypoints = parser.get_person_keypoints(
1, OpenPosePartGroups.UPPER_BODY_PARTS
)
assert type(upper_keypoints) == pd.DataFrame
assert upper_keypoints.shape == (
len(OpenPosePartGroups.UPPER_BODY_PARTS),
3,
)
assert OpenPoseParts.L_ANKLE not in upper_keypoints.index
# Test person ordering (0 is left-most, 1 is next)
sorted_person_keypoints = parser.sort_persons_by_x_position(all_keypoints)
assert (
sorted_person_keypoints.loc[OpenPoseParts.MID_HIP.value].iloc[0]
< sorted_person_keypoints.loc[OpenPoseParts.MID_HIP.value].iloc[3]
)
# Test handing in a confidence threshold (and make sure it replaces values by the same values)
sorted_person_keypoints2 = parser.get_multiple_keypoints(
[0, 1], None, 0.7, sorted_person_keypoints
)
assert sorted_person_keypoints.equals(sorted_person_keypoints2)
| 2.578125 | 3 |
news_outlet/settings.py | dmahon10/django-tiered-membership-web-app | 0 | 12793570 | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
ENIVRONMENT = os.environ.get('ENVIRONMENT', default='development')
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = int(os.environ.get('DEBUG', default=0))
USE_S3 = int(os.environ.get('USE_S3', default=1))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic', # whitenoise
'django.contrib.staticfiles',
'django.contrib.sites',
# Third party
'crispy_forms',
'allauth',
'allauth.account',
#'storages',
'ckeditor',
'ckeditor_uploader',
'debug_toolbar',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'articles.apps.ArticlesConfig',
'payments.apps.PaymentsConfig',
]
MIDDLEWARE = [
#'django.middleware.cache.UpdateCacheMiddleware', # caching
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware', # whitenoise
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware', # caching
]
ROOT_URLCONF = 'news_outlet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'news_outlet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Django all-auth
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Static files storage
if USE_S3:
# AWS settings
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = 'public-read'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# s3 static settings
AWS_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{AWS_LOCATION}/'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
else:
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# STATICFILES_FINDERS = [
# "django.contrib.staticfiles.finders.FileSystemFinder",
# "django.contrib.staticfiles.finders.AppDirectoriesFinder",
# ]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# emails
if int(os.environ.get('EMAIL')):
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_USE_TLS = int(os.environ.get('EMAIL_USE_TLS'))
EMAIL_PORT = int(os.environ.get('EMAIL_PORT'))
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '<EMAIL>'
#production
if ENIVRONMENT == 'production':
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#ckEditor
X_FRAME_OPTIONS = 'SAMEORIGIN'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
'extraPlugins': 'codesnippet',
},
}
# Stripe
STRIPE_TEST_PUBLISHABLE_KEY=os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY')
STRIPE_TEST_SECRET_KEY=os.environ.get('STRIPE_TEST_SECRET_KEY')
# Caching
# CACHE_MIDDLEWARE_ALIAS = 'default'
# CACHE_MIDDLEWARE_SECONDS = 604800
# CACHE_MIDDLEWARE_KEY_PREFIX = ''
# django-debug-toolbar
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [ip[:-1] + "1" for ip in ips]
# Heroku
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
| 1.84375 | 2 |
Misc Learning/HackerRank 30 Days of Code/14 - Scopes.py | hamil168/Learning-Data-Science | 0 | 12793571 | <reponame>hamil168/Learning-Data-Science<gh_stars>0
# -*- coding: utf-8 -*-
"""
Hacker Rank 30 Days of Code 14 - Scope
Created on Sun Jul 22 23:53:27 2018
@author: DRB4
Task:
complete Difference class
- class constructor that takes an array of integers and
saves it to an instance variable named elements
- computeDifference method that finds the maximum absolute different between
any 2 numbers in N and stores it in the maximumDifference instance variable
1 <= N <= 10
1 <= elements[i] <= 100, where 0 <= i <= N - 1
"""
### MY CODE ###
class Difference:
def __init__(self, a):
self.__elements = a
def computeDifference(self):
self.maximumDifference = 0
# Need the difference of every element with each other
# count over i
for i in range(len(self.elements)):
# count over i again, only call it j
for j in range(len(self.elements) - i):
# len(elements) - 1 to keep from double counting
diff = abs(self.elements[i] - self.elements[j])
if diff > self.maximumDifference:
self.maximumDifference = diff
return self.maximumDifference
pass
# End of Difference class
################# HR CODE ##################
_ = input()
a = [int(e) for e in input().split(' ')]
d = Difference(a)
d.computeDifference()
print(d.maximumDifference)
# TEST CASES
# input [1 2 5] output: 4 SUCCESS
# input [8 19 3 2 7] output: 17 SUCCESS | 3.859375 | 4 |
dsatools/operators/_ecdf.py | diarmaidocualain/dsatools | 31 | 12793572 | import numpy as np
import scipy
from ._hist import take_bins
__all__ = ['ecdf']
__EPSILON__ = 1e-8
#--------------------------------------------------------------------
def ecdf(x,y=None):
'''
Empirical Cumulative Density Function (ECDF).
Parameters
-----------
* x,y: 1d ndarrays,
if y is None, than ecdf only by x will be taken.
Returns
--------
* if y is not None -> (bins,out_x, out_y);
* if y is None -> (bins,out_x).
Notes
-------
* Based on scipy implementation.
* If y is not None, ECDF will be constructed on the joint x and y.
* If y is None, only bins and cdf(x) (2 argument) will be returned.
* ECDF is calculated as:
bins = sort(concatenate(x,y)),
cdf_x = (serch&past bins in sort(x))/size(x),
cdf_y = (serch&past bins in sort(y))/size(y),
where:
* bins - bins for cdfs (if y is not None, joint bins).
'''
x = np.array(x)
x = np.sort(x)
ret2 =True
if (y is not None):
y = np.array(y)
y = np.sort(y)
else:
ret2 = False
y=np.array([])
bins = np.concatenate((x,y))
bins=np.sort(bins)
x_cdf = np.searchsorted(x,bins, 'right')
y_cdf = np.searchsorted(y,bins, 'right')
x_cdf = (x_cdf) / x.shape[0]
y_cdf = (y_cdf) / y.shape[0]
out = (bins,x_cdf)
if (ret2):
out= (bins,x_cdf,y_cdf)
return out
#--------------------------------------------------------------------
def hist2cdf(hist_x, normalize = True):
'''
The cumulative density function made by histogram.
Parameters:
* hist_x 1d histogram (ndarray).
Returns:
* cfd(hist_x) (Cumulative Density Function).
'''
hist_x = np.asarray(hist_x)
out = np.cumsum(hist_x)
if(normalize):
out /=np.max(out)
# TODO: out /=x.size # more simple!
return out
#--------------------------------------------------------------------
def cdf_by_hist(x,y=None,n_bins = None, bins = None, take_mean=False):
'''
Cumulative density function constructed by histogram.
Parameters:
* x,y: 1d ndarrays;
* n_bins: required number of uniformly distributed bins,
* work only if bins is None.
* bins: grid of prepared bins (can be ununiform)
* take_mean: sustrauct mean if ture.
Returns:
* y is not None -> (out_x, out_y,bins)
* y is None -> (out_x,bins)
Notes:
* If bins is None and n_bins is None:
bins = np.sort(np.concatenate((x,y))).
This case make the same result as ecdf!
* If bins is None and n_bins <=0: n_bins = x.shape[0];
The case of uniform bins grid! (Differ from ECDF).
* For tests: modes n_bins = 't10' and n_bins = 't5'
for obtaining uniform bins with x shape/10 and /5 correspondingly
'''
#FIXME: the results are sligthly differ from ecdf
# TODO: the case xy is the same as for ecfd, but uniform bins may be more valid (see tests)
if(bins is None and n_bins is None):
bins = take_bins(x,y, n_bins='xy')
elif(n_bins == 't10' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//10)
elif(n_bins == 't5' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//5)
if(y is None):
bins, out_x = hist(x,y=None,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out = (bins, out_x )
else:
bins, out_x, out_y = hist(x,y=y,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out_y = hist2cdf(out_y, normalize = True)
out = (bins,out_x, out_y)
return out
| 2.921875 | 3 |
nevernoip/P1422.py | GalvinGao/2019-ProgrammingCourse | 0 | 12793573 |
def main(x):
if x <= 150:
return x * .4463
elif x <= 400:
return (x - 150) * .4663 + 150 * .4463
else:
return 250 * .4663 + 150 * .4463 + (x - 400) * .5663
print("{0:.2f}".format(main(int(input()))))
| 3.421875 | 3 |
Directory-observer/test/__init__.py | hiroki8080/MyPythonLibrary | 0 | 12793574 | <filename>Directory-observer/test/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import test_engine
__author__ = 'Shishou' | 0.945313 | 1 |
server/apps/devicelocation/tests/test_device_location.py | iotile/iotile_cloud | 0 | 12793575 | import datetime
import json
import dateutil.parser
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from apps.physicaldevice.models import Device
from apps.streamfilter.models import *
from apps.utils.gid.convert import *
from apps.utils.test_util import TestMixin
from ..models import *
user_model = get_user_model()
class DeviceLocationTestCase(TestMixin, TestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p2, label='d2', template=self.dt1, created_by=self.u3)
def tearDown(self):
DeviceLocation.objects.all().delete()
Device.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testLocation(self):
location = DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=self.pd1.slug,
user=self.u2
)
self.assertIsNotNone(location)
self.assertEqual(location.target.id, self.pd1.id)
def testMemberPermissions(self):
"""
Test that people with no permissions cannot access
"""
map_url = reverse('devicelocation:map', kwargs={'slug': self.pd1.slug})
self.client.login(email='<EMAIL>', password='<PASSWORD>')
membership = self.p1.org.register_user(self.u3, role='m1')
membership.permissions['can_read_device_locations'] = False
membership.save()
resp = self.client.get(map_url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
membership.permissions['can_read_device_locations'] = True
membership.permissions['can_access_classic'] = False
membership.save()
resp = self.client.get(map_url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
| 2.203125 | 2 |
accounts/utils.py | shunnyjang/SM-ChooIT-DRF | 0 | 12793576 | <filename>accounts/utils.py
from random import choice
from accounts.models import Nickname, NicknameArchive
def get_nickname():
nickname = ""
count = 1
adj_list = Nickname.objects.filter(part='a').values_list('content')
adj = choice(adj_list)[0]
noun_list = Nickname.objects.filter(part='n').values_list('content', 'emoji')
noun = choice(noun_list)
emoji = noun[1]
nickname = adj + noun[0]
try:
archive = NicknameArchive.objects.get(nickname=nickname)
count = archive.count
archive.count += 1
archive.save()
except NicknameArchive.DoesNotExist:
NicknameArchive.objects.create(nickname=nickname)
return emoji, nickname+str(count)
| 2.421875 | 2 |
_setup/management/commands/setup.py | marcoEDU/HackerspaceWebsiteTemplate | 9 | 12793577 | <reponame>marcoEDU/HackerspaceWebsiteTemplate<filename>_setup/management/commands/setup.py<gh_stars>1-10
from django.core.management.base import BaseCommand
from _setup.models import Setup
class Command(BaseCommand):
help = "start the setup"
def handle(self, *args, **options):
Setup()._menu()
| 1.617188 | 2 |
python/frost_rcmrd.py | vightel/FloodMapsWorkshop | 24 | 12793578 | #!/usr/bin/env python
#
# From <NAME>, <EMAIL>
# RCMRD Nairobi, Kenya
# Minor teaks for MacOSX Pat Cappelaere - Vightel Corporation
#
# Here is the link where you can get the original hdfs and the resulting tif files
# http://172.16.17.32/frostmaps/
# http://172.16.17.32/frostmaps/
import time
import datetime
import glob,os, fnmatch
#import arcpy
#import smtplib
#from email.MIMEMultipart import MIMEMultipart
#from email.MIMEBase import MIMEBase
#from email.MIMEText import MIMEText
#from email.Utils import COMMASPACE, formatdate
#from email import Encoders
#import shutil
import config
one_day = datetime.timedelta(days=1)
#_today = datetime.date.today()- one_day
# PGC Debug
_today = datetime.date(2014,10,2)
_month = _today.month
_day = _today.day
_year = str(_today.year)
_yrDay = str(_today.timetuple()[7])
if len(_yrDay)==1:
_yrDay = "00" + _yrDay
elif len(_yrDay)==2:
_yrDay = "0" + _yrDay
else:
_yrDay=_yrDay
BASE_DIR = config.FROST_DIR
outPtDir = os.path.join(BASE_DIR, _year, _yrDay, 'output')
if not os.path.exists(outPtDir):
os.makedirs(outPtDir)
srcPath = os.path.join(BASE_DIR, _year)
if not os.path.exists(srcPath):
os.makedirs(srcPath)
resources = os.path.join(BASE_DIR, 'resources')
templateMXD = os.path.join(resources, 'Frost2.mxd') #"H:\\Frost\\_resources\\Frost2.mxd"
targetMXD = os.path.join(resources, 'Frost3.mxd') #"H:\\Frost\\_resources\\Frost3.mxd"
symbologyLayerFile = os.path.join(resources, 'LST2.lyr') #"H:\\Frost\\_resources\\LST2.lyr"
frostMapTitle = "Estimated Frost Occurrences on " + str(_today + one_day)
#ouputMapFileName = "H:\\Frost\\_workingDir\\maps\\Frost_" + str(_today + one_day)
ouputMapFileName = os.path.join(BASE_DIR, _year, _yrDay, "Frost_" + str(_today + one_day))
print (_today)
#......................................................................................................................................................................
def send_mail(send_from, send_to, subject, text, files=[], server="192.168.0.243"):
assert type(send_to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.set_debuglevel(1)
smtp.ehlo()
smtp.starttls()
#smtp.ehlo()
smtp.login('servir', 'servir2013')
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
#..............................................................................................................................
def _getFrostFiles(tifPath):
frostFiles =[]
try:
dirList=os.listdir(tifPath)
for fname in dirList:
if fnmatch.fnmatch(fname, '*.tif'):
#Process: Build Pyramids And Statistics for the TIF file
arcpy.BuildPyramidsandStatistics_management(srcPath + _yrDay + "\\output\\" + fname, "INCLUDE_SUBDIRECTORIES", "BUILD_PYRAMIDS", "CALCULATE_STATISTICS", "NONE")
#Process: Get Raster Properties and determine the maxmum cell value
#maxCellValue = arcpy.GetRasterProperties_management(srcPath + "\\" + fname, "MAXIMUM")
rst = arcpy.Raster(srcPath + _yrDay + "\\output\\" + fname)
maxCellValue = rst.maximum
if str(maxCellValue) == "0.0":
print str(maxCellValue) + "T"
else:
print str(maxCellValue) + "F"
frostFiles.append(fname)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
return frostFiles
#print _getFrostFiles(srcPath)[0]
#.....................................................................................................................................................................
def _mapping(tmp_mxdPath, symbologyLayer, target_mxdPath, MapTitle, outPutFileName):
try:
mxd = arcpy.mapping.MapDocument(tmp_mxdPath) #("D:\\Modis_LST\\Frost\\Frost2.mxd")
df = arcpy.mapping.ListDataFrames(mxd, "Layers")[0]
#Add frost layers to the map document
print "Adding frost layers"
for tifFile in _getFrostFiles(srcPath + _yrDay + "\\output\\" ):
print tifFile
result = arcpy.MakeRasterLayer_management(srcPath + _yrDay + "\\output\\" + tifFile, tifFile + ".lyr")
print result.getOutput(0)
addLayer = result.getOutput(0)
#addLayer = arcpy.mapping.Layer(srcPath +"\\" + tifFile)
arcpy.mapping.AddLayer(df, addLayer, "BOTTOM")
#Apply Frost symbology to the layers
print "Applying symbology"
lryIndx = 0
for lyr in arcpy.mapping.ListLayers(mxd, "", df):
if lryIndx > 1:
arcpy.ApplySymbologyFromLayer_management(lyr,symbologyLayer)
lryIndx=lryIndx+1
#Add new Map title
print "Titling map"
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.name == "map":
elm.text=MapTitle
print elm.text
if elm.name == "day":
elm.text="Map Reference no :- " + _yrDay
print elm.text
mxd.saveACopy(target_mxdPath) #("D:\\Modis_LST\\Frost\\Frost3.mxd")
del mxd
#Exprot to pdf and JPG
print "Exporting maps"
mappingMxd = arcpy.mapping.MapDocument(target_mxdPath)
arcpy.mapping.ExportToPDF(mappingMxd, outPutFileName + ".pdf")
arcpy.mapping.ExportToJPEG(mappingMxd, outPutFileName + ".jpg")
#Email the maps
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
#.......................................................................................................................................................................
def _getLSTFile(_time):
global _yrDay, _year
lstfname='MYD11_L2.A'
try:
if len(_yrDay) == 2:
_yrDay = "0" + _yrDay
print _yrDay
lstfname= os.path.join(_yrDay, "lst", lstfname +_year + _yrDay + "." + _time +".005.NRT.hdf")
print lstfname
except IOError as e:
print e
return lstfname
#.......................................................................................................................................................................
def _getGeolocationFile(_time):
global _yrDay, _year
lstfname='MYD03.A'
try:
if len(_yrDay) == 2:
_yrDay = "0" + _yrDay
print _yrDay
lstfname= os.path.join(_yrDay, "geo", lstfname +_year + _yrDay + "."+ _time +".005.NRT.hdf")
print lstfname
except IOError as e:
print e
return lstfname
#.......................................................................................................................................................................
def _getOutputFile(_time):
global _yrDay, _year
lstfname='Frost_'
try:
if len(_yrDay) == 2:
_yrDay = "0" + _yrDay
print _yrDay
lstfname= os.path.join(_yrDay, "output", lstfname +_year + _yrDay + "."+ _time +".tif")
print lstfname
except IOError as e:
print e
return lstfname
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
def _mrtSwath2Gird( inPutLST, OutPuTIF, inPutGeoloc):
try:
#cmd1='swath2grid -if=D:\\Modis_LST\\2014\\027\\lst\\MYD11_L2.A2013027.0030.005.NRT.hdf -of=D:\\Modis_LST\\2014\\027\\output\\output1.tif -gf=D:\\Modis_LST\\2014\\027\\geo\\MYD03.A2013027.0030.005.NRT.hdf -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="33.0 5.5" -olr="42.0 -5.5" -osst=LAT_LONG -osp=8'
#cmd='swath2grid -if='+ inPutLST + ' -of='+OutPuTIF+' -gf='+inPutGeoloc+' -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="33.0 5.5" -olr="42.0 -5.5" -osst=LAT_LONG -osp=8'
cmd='swath2grid -if='+ inPutLST + ' -of='+OutPuTIF+' -gf='+inPutGeoloc+' -off=GEOTIFF_FMT -sds=LST -kk=NN -oproj=GEO -oprm="0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0" -oul="14.5 15.5" -olr="51.5 -13.5" -osst=LAT_LONG -osp=8'
os.system(cmd)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def _theMain(theTime):
try:
lstDir = srcPath
_lstFname = _getLSTFile(theTime)
_geoLocFname = _getGeolocationFile(theTime)
_outPuttif = _getOutputFile(theTime)
inLst = os.path.join(lstDir, _lstFname) #'D:\\Modis_LST\\2013\\027\\lst\\MYD11_L2.A2013027.0030.005.NRT.hdf'
outTif = os.path.join(lstDir, _outPuttif) #'D:\\Modis_LST\\2013\\027\\output\\output1.tif'
inGeoloc = os.path.join(lstDir, _geoLocFname) #'D:\\Modis_LST\\2013\\027\\geo\\MYD03.A2013027.0030.005.NRT.hdf'
if ( not os.path.isfile(inLst)) or ( not os.path.isfile(inGeoloc)):
print("Error: %s file not found" % inLst )
print("Or Error: %s file not found" % inGeoloc)
else:
_mrtSwath2Gird(inLst, outTif, inGeoloc)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
_hr=0
while _hr < 24:
_min=0
hrStr=str(_hr)
if len(str(_hr)) == 1:
hrStr = "0" + str(_hr)
while _min < 60:
if len(str(_min)) == 1:
minStr = "0" + str(_min)
else:
minStr=str(_min)
_thhr = hrStr + minStr
_theMain(_thhr)
#print _thhr
_min=_min+5
_hr = _hr+1
#_mapping(templateMXD, symbologyLayerFile, targetMXD, frostMapTitle, ouputMapFileName)
#Send frost products to users
#filesToAttch = [ouputMapFileName +".pdf", ouputMapFileName +".jpg"]
#recp = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
#recp = ["<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>" ]
#recp2 = ["<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>"]
#send_mail(send_from, send_to, subject, text, files=[], server="192.168.0.243"):
#send_mail("<EMAIL>", recp, "Frost Map for " + str(_today + one_day), "Please find the attached Frost map for " + str(_today + one_day) + ". You can also find the same map on http://172.16.17.32/frostmaps/ This email was automatically send by Frost Monitoring System." , filesToAttch, "192.168.0.243:25")
#send_mail("<EMAIL>", recp2, "Frost Map for " + str(_today + one_day), "Please find the attached Frost map for " + str(_today + one_day) + ". You can also find the same map on http://172.16.17.32/frostmaps/ This email was automatically send by Frost Monitoring System." , filesToAttch, "192.168.0.243:25")
| 2.140625 | 2 |
Receive.py | jackSN8/Jack_Hydrogen_line_software | 0 | 12793579 | import operator
import math
import numpy as np
from rtlsdr import RtlSdr
import matplotlib.pyplot as plt
# Available sample rates
'''
3200000Hz
2800000Hz
2560000Hz
2400000Hz
2048000Hz
1920000Hz
1800000Hz
1400000Hz
1024000Hz
900001Hz
250000Hz
'''
# Receiver class. This needs receiving parameters and will receive data from the SDR
class Receiver:
def __init__(self, sample_rate, ppm, resolution, num_FFT, num_med):
self.sdr = RtlSdr()
# configure SDR
self.sdr.sample_rate = sample_rate
self.sdr.center_freq = 1420405000
# For some reason the SDR doesn't want to set the offset PPM to 0 so we avoid that
if ppm != 0:
self.sdr.freq_correction = ppm
self.sdr.gain = 'auto'
self.resolution = 2**resolution
self.num_FFT = num_FFT
self.num_med = num_med
# Reads data from SDR, processes and writes it
def receive(self):
print(f'Receiving {self.num_FFT} bins of {self.resolution} samples each...')
data_PSD = self.sample()
# Observed frequency range
start_freq = self.sdr.center_freq - self.sdr.sample_rate/2
stop_freq = self.sdr.center_freq + self.sdr.sample_rate/2
freqs = np.linspace(start = start_freq, stop = stop_freq, num = self.resolution)
# Samples a blank spectrum to callibrate spectrum with.
self.sdr.center_freq = self.sdr.center_freq + 3000000
blank_PSD = self.sample()
SNR_spectrum = self.estimate_SNR(data = data_PSD, blank = blank_PSD)
SNR_median = self.median(SNR_spectrum) if self.num_med != 0 else SNR_spectrum
# Close the SDR
self.sdr.close()
return freqs, SNR_median
# Returns numpy array with PSD values averaged from "num_FFT" datasets
def sample(self):
counter = 0.0
PSD_summed = (0, )* self.resolution
while (counter < self.num_FFT):
samples = self.sdr.read_samples(self.resolution)
# Applies window to samples in time domain before performing FFT
window = np.hanning(self.resolution)
windowed_samples = samples * window
# Perform FFT and PSD-analysis
PSD = np.abs(np.fft.fft(windowed_samples)/self.sdr.sample_rate)**2
PSD_checked = self.check_for_zero(PSD)
PSD_log = 10*np.log10(PSD_checked)
PSD_summed = tuple(map(operator.add, PSD_summed, np.fft.fftshift(PSD_log)))
counter += 1.0
averaged_PSD = tuple(sample/counter for sample in PSD_summed)
return averaged_PSD
# Calculates SNR from spectrum and H-line SNR
def estimate_SNR(self, data, blank):
SNR = np.array(data)-np.array(blank)
# Ghetto noise floor estimate:
noise_floor = sum(SNR[0:10])/10
shifted_SNR = SNR-noise_floor
return shifted_SNR
# Median filter for rfi-removal
def median(self, data):
for i in range(len(data)):
data[i] = np.mean(data[i:i+self.num_med])
return data
# Checks if samples have been dropped and replaces 0.0 with next value
def check_for_zero(self, PSD):
try:
index = list(PSD).index(0.0)
print('Dropped sample was recovered!')
PSD[index] = (PSD[index+1]+PSD[index-1])/2
return PSD
except:
return PSD
| 3.015625 | 3 |
experiments/3.py | seyfullah/stockprediction | 0 | 12793580 | from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.learning import PostPre
source_layer = Input(n=100, traces=True)
target_layer = LIFNodes(n=1000, traces=True)
connection = Connection(
source=source_layer,
target=target_layer,
update_rule=PostPre,
nu=(1e-4, 1e-2))
| 2.046875 | 2 |
ext/candc/src/api/nlp/__init__.py | TeamSPoon/logicmoo_nlu | 6 | 12793581 | # C&C NLP tools
# Copyright (c) Universities of Edinburgh, Oxford and Sydney
# Copyright (c) <NAME>
#
# This software is covered by a non-commercial use licence.
# See LICENCE.txt for the full text of the licence.
#
# If LICENCE.txt is not included in this distribution
# please email <EMAIL> to obtain a copy.
from base import *
import config
import io
import model
import tagger
import ccg
def load(super, parser, load_model = True):
int_cfg = ccg.IntegrationConfig()
super_cfg = tagger.SuperConfig()
super_cfg.path.value = super
parser_cfg = ccg.ParserConfig()
parser_cfg.path.value = parser
return ccg.Integration(int_cfg, super_cfg, parser_cfg, Sentence())
def read(sent, s):
tokens = [tuple(x.split('|')) for x in s.split()]
sent.words = [t[0] for t in tokens]
sent.pos = [t[1] for t in tokens]
sent.msuper = [[t[2]] for t in tokens]
| 2.375 | 2 |
blog/models.py | minielectron/portfolio | 0 | 12793582 | from django.db import models
# Create your models here.
class Blog(models.Model):
"""
Represents a project model in home page.
"""
title = models.CharField(max_length=50)
description = models.CharField(max_length=150)
date = models.DateField()
def __str__(self):
return self.title | 2.703125 | 3 |
simple_client.py | stefmarais/haas-ngc-simulator | 0 | 12793583 | import telnetlib
import time
tn = telnetlib.Telnet('192.168.137.226', 5051)
#tn.write(b"Client")
time.sleep(1)
for i in range(5):
print("Now writing ?Q102")
tn.write(b"?Q102\n")
status = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status}")
print("Now writing ?Q104")
tn.write(b"?Q104\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
print("Now writing ?Q200")
tn.write(b"?Q200\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
print("Now writing ?Q500")
tn.write(b"?Q500\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
tn.close()
| 2.84375 | 3 |
src/main.py | SantaSpeen/CLI-in-Python | 3 | 12793584 | <filename>src/main.py
import getpass
import logging
import os
import platform
from console import Console, ConsoleIO
# Init modules
cli = Console(prompt_in=">",
prompt_out="]:",
not_found="Command \"%s\" not found in alias.",
file=ConsoleIO,
debug=False)
logging.basicConfig(level=logging.NOTSET, format="%(asctime)s - %(name)-5s - %(levelname)-7s - %(message)s")
def cli_print():
""" How can I write text to the console? Read below! """
cli.log("cli.og")
cli.write("cli.write")
print(end="\n\n\n")
def logger_preview():
""" I use logging and want its output to be in the console! """
cli.logger_hook()
# All calls below will be implemented via Console
logging.debug("Debug log")
logging.warning('Warning log')
logging.error("Error log")
logging.info("Info log")
print(end="\n\n\n")
def builtins_preview():
""" I want print to be output like cli.log """
# Output below without hook
print("No builtins_hook here")
cli.builtins_hook()
# Output below from the hook
# After hook cli = console
print("builtins_hook here")
console.write("console.write")
console.log("console.log")
console['[] log']
console << "<< log"
ConsoleIO.write("\n\n") # Or console.get_IO.write("\n\n")
def cli_echo(argv: list):
""" Help message here """
message = f"argv: {argv}"
return message
def cli_error():
""" Print error message """
raise Exception("Test error message")
def cli_exit():
""" Kill process """
pid = os.getpid()
print(f"\r$ kill {pid}")
os.system(f"kill {pid}")
def cli_uname():
""" Print uname information """
uname = platform.uname()
user = getpass.getuser()
return f"{user}@{uname.node} -> {uname.system} {uname.release} ({uname.version})"
def cli_mode():
ConsoleIO.write("\rtype help\n")
cli.add("echo", cli_echo, argv=True)
cli.add("error", cli_error)
cli.add("exit", cli_exit)
cli.add("uname", cli_uname)
cli.run()
# Or you may use
# cli.run_while(lambda: <some code>)
if __name__ == '__main__':
cli_print()
logger_preview()
builtins_preview()
cli_mode()
| 3.78125 | 4 |
src/pypiserver_testing/_version.py | pypiserver/pypiserver-testing-common | 0 | 12793585 | """Define version constants."""
import re
__version__ = '1.0.0'
__version_info__ = tuple(re.split('[.-]', __version__))
| 1.96875 | 2 |
tests/__init__.py | mportesdev/handpick | 0 | 12793586 | <filename>tests/__init__.py
def is_even(n):
return n % 2 == 0
def is_positive(n):
return n > 0
# basic sequences (tuple, list, str, bytes, bytearray)
SEQUENCES = (
[
[
"hand",
],
b"pick",
(
42,
b"hand",
),
],
(
"3.14",
(1.414,),
[
"15",
bytearray(b"pick"),
],
),
)
# similar to above, modified to contain dictionaries
SEQS_DICTS = (
[
[
"hand",
],
b"pick",
{
42: b"hand",
},
],
(
"3.14",
(1.414,),
{
("15",): bytearray(b"pick"),
},
),
)
# similar to above, modified to contain set and frozenset
COLLECTIONS = (
[
{
"hand",
},
b"pick",
{
42: b"hand",
},
],
(
"3.14",
(frozenset({1.414}),),
{
("15",): bytearray(b"pick"),
},
),
)
| 3.234375 | 3 |
solr-admin-app/config.py | sumesh-aot/namex | 4 | 12793587 | <filename>solr-admin-app/config.py
import os
import dotenv
dotenv.load_dotenv(dotenv.find_dotenv(), override=True)
CONFIGURATION = {
'development': 'config.DevConfig',
'testing': 'config.TestConfig',
'production': 'config.Config',
'default': 'config.Config'
}
class Config(object):
SECRET_KEY = 'My Secret'
# Normal Keycloak parameters.
OIDC_CLIENT_SECRETS = os.getenv('SOLR_ADMIN_APP_OIDC_CLIENT_SECRETS', 'solr-admin-app/keycloak_client_secrets/secrets.json')
OIDC_SCOPES = ['openid', 'email', 'profile']
OIDC_VALID_ISSUERS = [os.getenv('SOLR_ADMIN_APP_OIDC_VALID_ISSUERS', 'http://localhost:8081/auth/realms/master')]
OVERWRITE_REDIRECT_URI = os.getenv('SOLR_ADMIN_APP_OVERWRITE_REDIRECT_URI', '')
print("OIDC" + OIDC_CLIENT_SECRETS)
# Undocumented Keycloak parameter: allows sending cookies without the secure flag, which we need for the local
# non-TLS HTTP server. Set this to non-"True" for local development, and use the default everywhere else.
OIDC_ID_TOKEN_COOKIE_SECURE = os.getenv('SOLR_ADMIN_APP_OIDC_ID_TOKEN_COOKIE_SECURE', 'True') == 'True'
# Turn this off to get rid of warning messages. In future versions of SQLAlchemy, False will be the default and
# this can be removed.
SQLALCHEMY_TRACK_MODIFICATIONS = False
# PostgreSQL Connection information.
DATABASE_USER = os.getenv('NAMES_ADMIN_DATABASE_USERNAME', '')
DATABASE_PASSWORD = os.getenv('NAMES_ADMIN_DATABASE_PASSWORD', '')
DATABASE_HOST = os.getenv('NAMES_ADMIN_DATABASE_HOST', '')
DATABASE_PORT = os.getenv('NAMES_ADMIN_DATABASE_PORT', '5432')
DATABASE_NAME = os.getenv('NAMES_ADMIN_DATABASE_NAME', '')
SQLALCHEMY_DATABASE_URI = 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=DATABASE_USER,
password=<PASSWORD>,
host=DATABASE_HOST,
port=int(DATABASE_PORT),
name=DATABASE_NAME)
SYNONYMS_DATABASE_USER = os.getenv('NAMES_ADMIN_SYNONYMS_DATABASE_USERNAME', '')
SYNONYMS_DATABASE_PASSWORD = os.getenv('NAMES_ADMIN_SYNONYMS_DATABASE_PASSWORD', '')
SYNONYMS_DATABASE_HOST = os.getenv('NAMES_ADMIN_SYNONYMS_DATABASE_HOST', '')
SYNONYMS_DATABASE_PORT = os.getenv('NAMES_ADMIN_SYNONYMS_DATABASE_PORT', '5432')
SYNONYMS_DATABASE_NAME = os.getenv('NAMES_ADMIN_SYNONYMS_DATABASE_NAME', 'synonyms')
SQLALCHEMY_BINDS = {
'synonyms': 'postgresql://{user}:{password}@{host}:{port}/{name}'.format(
user=SYNONYMS_DATABASE_USER,
password=<PASSWORD>,
host=SYNONYMS_DATABASE_HOST,
port=int(SYNONYMS_DATABASE_PORT),
name=SYNONYMS_DATABASE_NAME)
}
DEBUG = False
TESTING = False
class DevConfig(Config):
DEBUG = True
TESTING = True
# SQLALCHEMY_ECHO = True
class TestConfig(Config):
DEBUG = True
TESTING = True
| 2.1875 | 2 |
python/testData/inspections/PyDictDuplicateKeysInspection/test.py | teddywest32/intellij-community | 2 | 12793588 | <filename>python/testData/inspections/PyDictDuplicateKeysInspection/test.py<gh_stars>1-10
dict = {<warning descr="Dictionary contains duplicate keys key_1">key_1</warning> : 1, key_2: 2, <warning descr="Dictionary contains duplicate keys key_1">key_1</warning> : 3}
dict = {'key_1' : 1, <warning descr="Dictionary contains duplicate keys 'key_2'">'key_2'</warning>: 2, <warning descr="Dictionary contains duplicate keys 'key_2'">'key_2'</warning> : 3}
a = {}
{'key_1' : 1, 'key_2': 2}
import random
def foo():
return random.random()
{foo(): 1, foo():2}
# PY-2511
dict = dict([(<warning descr="Dictionary contains duplicate keys key">'key'</warning>, 666), (<warning descr="Dictionary contains duplicate keys key">'key'</warning>, 123)])
dict = dict(((<warning descr="Dictionary contains duplicate keys key">'key'</warning>, 666), (<warning descr="Dictionary contains duplicate keys key">'key'</warning>, 123)))
dict = dict(((<warning descr="Dictionary contains duplicate keys key">'key'</warning>, 666), ('k', 123)), <warning descr="Dictionary contains duplicate keys key">key</warning>=4)
dict([('key', 666), ('ky', 123)])
| 2.875 | 3 |
tests/test_node.py | jherland/browson | 0 | 12793589 | <filename>tests/test_node.py<gh_stars>0
import textwrap
from browson.node import Node
class TestNode_build:
def verify_scalar(self, n, expect_kind, expect_value, expect_name=""):
assert n.name == expect_name
assert n.kind is expect_kind
assert n.value == expect_value
assert n.is_leaf
def verify_collection(self, n, expect_kind, expect_value, expect_name=""):
assert n.name == expect_name
assert n.kind is expect_kind
assert n.value == expect_value
assert n.kind in {list, tuple, set, dict}
assert not n.is_leaf
assert len(n.children) == len(expect_value)
if n.kind is dict:
expect_children = [
Node(f"{expect_name}.{k}", type(v), v, parent=n, key=k)
for k, v in expect_value.items()
]
else:
expect_children = [
Node(f"{expect_name}[{i}]", type(c), c, parent=n)
for i, c in enumerate(expect_value)
]
assert n.children == expect_children
# singletons
def test_None(self):
n = Node.build(None)
self.verify_scalar(n, type(None), None)
def test_True(self):
n = Node.build(True)
self.verify_scalar(n, bool, True)
def test_False(self):
n = Node.build(False)
self.verify_scalar(n, bool, False)
# numbers
def test_zero(self):
n = Node.build(0)
self.verify_scalar(n, int, 0)
def test_positive_int(self):
n = Node.build(1234)
self.verify_scalar(n, int, 1234)
def test_negative_int(self):
n = Node.build(-5678)
self.verify_scalar(n, int, -5678)
def test_float_zero(self):
n = Node.build(0.0)
self.verify_scalar(n, float, 0.0)
def test_float_nonzero(self):
n = Node.build(1.234)
self.verify_scalar(n, float, 1.234)
def test_float_negative_inf(self):
n = Node.build(float("-inf"))
self.verify_scalar(n, float, float("-inf"))
def test_float_nan(self):
n = Node.build(float("nan"))
# NaN cannot be compared to itself
assert n.name == ""
assert n.kind is float
assert str(n.value) == "nan"
assert n.is_leaf
# strings
def test_empty_string(self):
n = Node.build("")
self.verify_scalar(n, str, "")
def test_short_string(self):
n = Node.build("foo")
self.verify_scalar(n, str, "foo")
# lists
def test_list_empty(self):
n = Node.build([])
self.verify_collection(n, list, [])
def test_list_single_item(self):
n = Node.build([123])
self.verify_collection(n, list, [123])
def test_list_of_singletons(self):
n = Node.build([None, True, False])
self.verify_collection(n, list, [None, True, False])
def test_list_of_ints(self):
n = Node.build([123, -456, 789])
self.verify_collection(n, list, [123, -456, 789])
# nested lists
def test_list_of_empty_list(self):
n = Node.build([[]])
assert n.name == ""
assert n.kind is list
assert n.value == [[]]
assert not n.is_leaf
assert len(n.children) == 1
n2 = n.children[0]
assert n2.name == "[0]"
assert n2.kind is list
assert n2.value == []
assert not n2.is_leaf
assert len(n2.children) == 0
self.verify_collection(n2, list, [], "[0]")
def test_list_of_list_of_list_of_one_string(self):
n = Node.build([[["foo"]]])
assert n.name == ""
assert n.kind is list
assert n.value == [[["foo"]]]
assert not n.is_leaf
assert len(n.children) == 1
n2 = n.children[0]
assert n2.name == "[0]"
assert n2.kind is list
assert n2.value == [["foo"]]
assert not n2.is_leaf
assert len(n2.children) == 1
n3 = n2.children[0]
self.verify_collection(n3, list, ["foo"], "[0][0]")
# tuples
def test_tuple_empty(self):
n = Node.build(())
self.verify_collection(n, tuple, ())
def test_tuple_single_item(self):
n = Node.build(("foo",))
self.verify_collection(n, tuple, ("foo",))
def test_tuple_heterogeneous(self):
n = Node.build((None, "foo", -321))
self.verify_collection(n, tuple, (None, "foo", -321))
# sets
def test_set_empty(self):
n = Node.build(set())
self.verify_collection(n, set, set())
def test_set_single_item(self):
n = Node.build({"foo"})
self.verify_collection(n, set, {"foo"})
def test_set_multiple(self):
n = Node.build({"foo", 456, "bar", 123})
self.verify_collection(n, set, {"foo", 456, "bar", 123})
# dicts
def test_dict_empty(self):
n = Node.build({})
self.verify_collection(n, dict, {})
def test_dict_single_item(self):
n = Node.build({"foo": 123})
self.verify_collection(n, dict, {"foo": 123})
def test_dict_multiple_items(self):
n = Node.build({"foo": 123, "bar": 456, "baz": 789})
self.verify_collection(n, dict, {"foo": 123, "bar": 456, "baz": 789})
class TestNode_dfwalk:
def test_leaf_node(self):
n = Node.build("foo")
assert list(n.dfwalk()) == [Node("", str, "foo")]
def test_simple_list(self):
n = Node.build(["foo", 123, True])
assert list(n.dfwalk()) == [
n,
Node("[0]", str, "foo", parent=n),
Node("[1]", int, 123, parent=n),
Node("[2]", bool, True, parent=n),
]
def test_simple_dict(self):
n = Node.build({"foo": 123, "bar": 456, "baz": 789})
assert list(n.dfwalk()) == [
n,
Node(".foo", int, 123, key="foo", parent=n),
Node(".bar", int, 456, key="bar", parent=n),
Node(".baz", int, 789, key="baz", parent=n),
]
def test_nested_dict(self):
n = Node.build({"foo": {"a": 1, "b": 2}, "bar": [3, 4], "baz": {5, 6}})
foo = Node(
".foo", dict, {"a": 1, "b": 2}, parent=n, key="foo", children=[]
)
foo_a = Node(".foo.a", int, 1, parent=foo, key="a")
foo_b = Node(".foo.b", int, 2, parent=foo, key="b")
foo.children.extend([foo_a, foo_b])
bar = Node(".bar", list, [3, 4], parent=n, key="bar", children=[])
bar_0 = Node(".bar[0]", int, 3, parent=bar)
bar_1 = Node(".bar[1]", int, 4, parent=bar)
bar.children.extend([bar_0, bar_1])
baz = Node(".baz", set, {5, 6}, parent=n, key="baz", children=[])
baz_0 = Node(".baz[0]", int, 5, parent=baz)
baz_1 = Node(".baz[1]", int, 6, parent=baz)
baz.children.extend([baz_0, baz_1])
assert list(n.dfwalk()) == [
n,
foo,
foo_a,
foo_b,
bar,
bar_0,
bar_1,
baz,
baz_0,
baz_1,
]
def test_str_visit_heterogeneous_structure(self):
n = Node.build(
{
"dict": {"key": 321, "other_key": None, "last_key": False},
"list": [1, 2, 3],
"tuple": (4, 5, 6),
"set": {7, 8, 9},
"nested": ([{"key": {"value"}}],),
}
)
def yield_str(node):
yield str(node)
assert "\n".join(n.dfwalk(yield_str)) == textwrap.dedent(
"""\
/dict/5
.dict/dict/3
.dict.key/int/*
.dict.other_key/NoneType/*
.dict.last_key/bool/*
.list/list/3
.list[0]/int/*
.list[1]/int/*
.list[2]/int/*
.tuple/tuple/3
.tuple[0]/int/*
.tuple[1]/int/*
.tuple[2]/int/*
.set/set/3
.set[0]/int/*
.set[1]/int/*
.set[2]/int/*
.nested/tuple/1
.nested[0]/list/1
.nested[0][0]/dict/1
.nested[0][0].key/set/1
.nested[0][0].key[0]/str/*"""
)
def test_node_ancestors():
n = Node.build({"foo": {"bar": {"baz": "xyzzy"}}})
assert list(n.ancestors()) == []
foo = n.children[0]
assert list(foo.ancestors()) == [n]
bar = foo.children[0]
assert list(bar.ancestors()) == [foo, n]
baz = bar.children[0]
assert list(baz.ancestors()) == [bar, foo, n]
| 2.515625 | 3 |
scripts/parse_kif.py | SakodaShintaro/Miacis | 10 | 12793590 | <gh_stars>1-10
#!/usr/bin/env python3
import glob
import codecs
import numpy as np
import matplotlib.pyplot as plt
import japanize_matplotlib
from natsort import natsorted
# もし序盤が弱い→序盤から悪くしてそのまま負ける
# もし終盤が弱い→序盤・中盤は良いのに終盤で負ける
turns = list()
BIN_SIZE = 31
BIN_WIDTH = 2 / BIN_SIZE
result_points = [list() for _ in range(BIN_SIZE)]
PHASE_NUM = 3
result_points_each_phase = [[list() for _ in range(BIN_SIZE)] for i in range(PHASE_NUM)]
total_result_for_miacis = [0, 0, 0]
file_names = natsorted(glob.glob("./*.kif"))
for file_name in file_names:
f = codecs.open(file_name, 'r', 'shift_jis')
date = f.readline().strip()
startpos = f.readline().strip()
black = f.readline().strip()
white = f.readline().strip()
label = f.readline().strip()
is_miacis_black = "Miacis" in black
miacis_scores = list()
result = None
while True:
# 指し手が記述されている行を読み込み
line1 = f.readline().strip()
elements1 = line1.split()
# 評価値が記述されている行を読み込み
line2 = f.readline().strip()
elements2 = line2.split()
# 指し手を取得
turn = int(elements1[0])
move = elements1[1]
# 同* という行動は"同 *"と記録されるため分割されてしまう
if move == "同":
move += elements1[2]
if move == "投了":
# print(turn, move, end=" ")
turns.append(turn - 1)
# 読み筋が記録されている場合があるのでコメントだったら読み込み直す
if line2[0:2] == "**":
line2 = f.readline().strip()
# 勝敗を解釈
if "先手の勝ち" in line2:
# print(f"勝者:{black}")
result = 1
elif "後手の勝ち" in line2:
# print(f"勝者:{white}")
result = -1
else:
print(line2)
assert False
break
elif move == "入玉宣言":
print(file_name)
print(turn, move, end=" ")
turns.append(turn - 1)
# 読み筋が記録されている場合があるのでコメントだったら読み込み直す
if line2[0:2] == "**":
line2 = f.readline().strip()
# 勝敗を解釈
if turn % 2 == 1:
print(f"勝者:{black}")
result = 1
else:
print(f"勝者:{white}")
result = -1
break
elif move == "持将棋":
print(file_name, move)
turns.append(turn - 1)
# 読み筋が記録されている場合があるのでコメントだったら読み込み直す
if line2[0:2] == "**":
line2 = f.readline().strip()
# 勝敗を解釈
result = 0
break
# 評価値を取得
score_index = elements2.index("評価値") + 1
score = elements2[score_index]
# 詰みだとスペース区切りで次に手数が記録されるため分割されている
if "詰" in score:
score += elements2[score_index + 1]
# print(turn, move, score)
if (turn % 2 == 1 and is_miacis_black) or (turn % 2 == 0 and not is_miacis_black):
miacis_scores.append(float(score) / 5000)
result_for_miacis = result if is_miacis_black else -result
total_result_for_miacis[int(1 - result_for_miacis)] += 1
for i, score in enumerate(miacis_scores):
index = min(int((score + 1) // BIN_WIDTH), BIN_SIZE - 1)
result_points[index].append(result)
phase = min(i * PHASE_NUM // len(miacis_scores), PHASE_NUM - 1)
result_points_each_phase[phase][index].append(result)
print(f"対局数 {len(turns)}")
print(f"最小手数 {np.min(turns)}")
print(f"最大手数 {np.max(turns)}")
print(f"平均手数 {np.mean(turns)}")
print(f"標準偏差 {np.std(turns)}")
print("Miacisから見た勝敗")
print(f"{total_result_for_miacis[0]}勝 {total_result_for_miacis[1]}引き分け {total_result_for_miacis[2]}敗")
x = [-1 + BIN_WIDTH * (i + 0.5) for i in range(BIN_SIZE)]
y = list()
y_each_phase = [list() for _ in range(PHASE_NUM)]
for i in range(BIN_SIZE):
y.append(np.mean(result_points[i]))
for p in range(PHASE_NUM):
y_each_phase[p].append(np.mean(result_points_each_phase[p][i]))
plt.plot(x, y, marker=".", label="Miacisの探索結果")
# for p in range(PHASE_NUM):
# plt.plot(x, y_each_phase[p], marker=".", label=f"Miacis探索結果{p}")
plt.plot(x, x, linestyle="dashed", label="理論値")
plt.legend()
plt.xlabel("評価値(探索結果)")
plt.ylabel("平均報酬")
plt.savefig("evaluation_curve.png", bbox_inches="tight", pad_inches=0.05)
| 3.09375 | 3 |
staging/commands/dev/repos/push.py | cligraphy/cligraphy | 5 | 12793591 | #!/usr/bin/env python
# Copyright 2013 Netflix
"""Push all repos to stash
"""
from nflx_oc.commands.dev.repos import run_for_all_repos
def main():
run_for_all_repos('git push origin master')
| 1.5625 | 2 |
lace/integrity.py | bodylabs/lace | 2 | 12793592 | from __future__ import print_function
import numpy as np
def faces_with_repeated_vertices(f):
if f.shape[1] == 3:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 2])[0],
]))
else:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 0] == f[:, 3])[0],
np.where(f[:, 1] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 3])[0],
np.where(f[:, 2] == f[:, 3])[0],
]))
def faces_with_out_of_range_vertices(f, v):
return np.unique(np.concatenate([
np.where(f < 0)[0],
np.where(f >= len(v))[0],
]))
def check_integrity(mesh):
errors = []
for f_index in faces_with_out_of_range_vertices(mesh.f, mesh.v):
errors.append(("f", f_index, "Vertex out of range"))
for f_index in faces_with_repeated_vertices(mesh.f):
errors.append(("f", f_index, "Repeated vertex"))
return errors
def print_integrity_errors(errors, mesh):
for attr, index, message in errors:
try:
data = getattr(mesh, attr)[index]
except (AttributeError, IndexError):
data = ''
print("{} {} {} {}".format(attr, index, message, data))
| 2.671875 | 3 |
src/lab3/infer_resnet50_loadtest.py | aws-samples/aws-inf1-gcr-workshop | 2 | 12793593 | <reponame>aws-samples/aws-inf1-gcr-workshop
import os
import time
import torch
import torch_neuron
import json
import numpy as np
from concurrent import futures
from urllib import request
from torchvision import models, transforms, datasets
## Create an image directory containing a small kitten
os.makedirs("./torch_neuron_test/images", exist_ok=True)
request.urlretrieve("https://raw.githubusercontent.com/awslabs/mxnet-model-server/master/docs/images/kitten_small.jpg",
"./torch_neuron_test/images/kitten_small.jpg")
## Fetch labels to output the top classifications
request.urlretrieve("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json","imagenet_class_index.json")
idx2label = []
with open("imagenet_class_index.json", "r") as read_file:
class_idx = json.load(read_file)
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
## Import a sample image and normalize it into a tensor
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eval_dataset = datasets.ImageFolder(
os.path.dirname("./torch_neuron_test/"),
transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
normalize,
])
)
image, _ = eval_dataset[0]
image = torch.tensor(image.numpy()[np.newaxis, ...])
# begin of infer once
## Load model
#model_neuron = torch.jit.load( 'resnet50_neuron.pt' )
## Predict
#results = model_neuron( image )
# Get the top 5 results
#top5_idx = results[0].sort()[1][-5:]
# Lookup and print the top 5 labels
#top5_labels = [idx2label[idx] for idx in top5_idx]
#print("Top 5 labels:\n {}".format(top5_labels) )
# end of infer once
USER_BATCH_SIZE = 50
NUM_LOOPS_PER_THREAD = 100
pred_list = [torch.jit.load( 'resnet50_neuron.pt' ) for _ in range(4)]
pred_list = [
pred_list[0], pred_list[0], pred_list[0], pred_list[0],
pred_list[1], pred_list[1], pred_list[1], pred_list[1],
pred_list[2], pred_list[2], pred_list[2], pred_list[2],
pred_list[3], pred_list[3], pred_list[3], pred_list[3],
]
num_infer_per_thread = []
for i in range(len(pred_list)):
num_infer_per_thread.append(0)
def one_thread(pred, input_batch, index):
global num_infer_per_thread
for _ in range(NUM_LOOPS_PER_THREAD):
with torch.no_grad():
result = pred(input_batch)
num_infer_per_thread[index] += USER_BATCH_SIZE
# print("result",result)
def current_throughput():
global num_infer_per_thread
num_infer = 0
last_num_infer = num_infer
print("NUM THREADS: ", len(pred_list))
print("NUM_LOOPS_PER_THREAD: ", NUM_LOOPS_PER_THREAD)
print("USER_BATCH_SIZE: ", USER_BATCH_SIZE)
while num_infer < NUM_LOOPS_PER_THREAD * USER_BATCH_SIZE * len(pred_list):
num_infer = 0
for i in range(len(pred_list)):
num_infer = num_infer + num_infer_per_thread[i]
current_num_infer = num_infer
throughput = current_num_infer - last_num_infer
print('current throughput: {} images/sec'.format(throughput))
last_num_infer = current_num_infer
time.sleep(1.0)
# Run inference
#model_feed_dict={'input_1:0': img_arr3}
executor = futures.ThreadPoolExecutor(max_workers=16+1)
executor.submit(current_throughput)
for i,pred in enumerate(pred_list):
executor.submit(one_thread, pred, image, i)
| 2.5 | 2 |
services/cal/test/test_service.py | Ovakefali13/buerro | 2 | 12793594 | import unittest
import os
from icalendar import Calendar
import random
import string
from datetime import timedelta, datetime as dt
import pytz
from util import Singleton
from .. import CalService, CalRemote, iCloudCaldavRemote, Event
@Singleton
class CalMockRemote(CalRemote):
def create_calendar(self):
self.calendar = Calendar()
self.calendar.add("prodid", "-//My calendar product//mxm.dk//")
self.calendar.add("version", "2.0")
def __init__(self):
self.create_calendar()
def add_event(self, event: Event):
self.calendar.add_component(event)
def events(self):
events = self.calendar.subcomponents
return list(map(lambda e: Event(e), events))
def purge(self):
self.create_calendar()
def date_search(self, start, end=None):
events = self.events()
if end is None:
end = pytz.utc.localize(dt.max)
def _starts_between(e: Event, start, end):
return end > e["dtstart"].dt and e["dtstart"].dt > start
return list(filter(lambda e: _starts_between(e, start, end), events))
class TestCalService(unittest.TestCase):
@classmethod
def setUpClass(self):
if "DONOTMOCK" in os.environ:
purgable_calendar = os.getenv("CALDAV_PURGABLE_CALENDAR")
self.cal_service = CalService.instance(
iCloudCaldavRemote.instance(purgable_calendar)
)
else:
self.cal_service = CalService.instance(CalMockRemote.instance())
print("Mocking Remote...")
def setUp(self):
self.cal_service.purge()
def now(self):
return pytz.utc.localize(dt.now())
def test_cant_add_with_too_few_params(self):
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event = Event()
event.add("summary", summary)
self.assertRaises(Exception, self.cal_service.add_event, event)
def test_add_and_get_event(self):
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event = Event()
event.add("summary", summary)
event.add("dtstart", pytz.utc.localize(dt(2020, 2, 26, 18, 00)))
event.add("dtend", pytz.utc.localize(dt(2020, 2, 26, 19, 00)))
event.add("location", "My Hood")
event.set_reminder(timedelta(minutes=10))
self.cal_service.add_event(event)
all_events = self.cal_service.get_all_events()
self.assertTrue(len(all_events) > 0)
self.assertIsInstance(all_events[0], Event)
self.assertTrue(any(e["summary"] == summary for e in all_events))
def test_get_events_between(self):
event = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event.add("summary", summary)
event.add("dtstart", self.now() + timedelta(minutes=2))
event.add("dtend", self.now() + timedelta(minutes=12))
self.cal_service.add_event(event)
start = self.now()
end = self.now() + timedelta(minutes=15)
all_events = self.cal_service.get_events_between(start, end)
self.assertTrue(len(all_events) > 0)
self.assertIsInstance(all_events[0], Event)
self.assertTrue(any(e["summary"] == summary for e in all_events))
def test_get_next_events(self):
event = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event.add("summary", summary)
event.add("dtstart", self.now() + timedelta(minutes=1))
event.add("dtend", self.now() + timedelta(minutes=10))
self.cal_service.add_event(event)
event2 = Event()
summary2 = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event2.add("summary", summary2)
event2.add("dtstart", self.now() + timedelta(minutes=2))
event2.add("dtend", self.now() + timedelta(minutes=10))
self.cal_service.add_event(event2)
next_events = self.cal_service.get_next_events()
self.assertIsInstance(next_events[0], Event)
self.assertEqual(next_events[0]["summary"], summary)
self.assertEqual(next_events[1]["summary"], summary2)
def test_get_max_available_time_between(self):
def _chop_dt(date: dt):
return date.replace(microsecond=0)
start_time = self.now()
end_time = self.now() + timedelta(hours=4)
with self.subTest("no events today"):
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertEqual(max_time, end_time - start_time)
self.assertEqual(before, start_time)
self.assertEqual(after, end_time)
event1 = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event1.add("summary", summary)
event1.add("dtstart", start_time + timedelta(minutes=15))
event1.add("dtend", start_time + timedelta(minutes=30))
self.cal_service.add_event(event1)
# which is 30 minutes from
event2 = Event()
summary2 = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event2_start_time = event1.get_end() + timedelta(minutes=30)
event2.add("summary", summary2)
event2.add("dtstart", event2_start_time)
event2.add("dtend", event2_start_time + timedelta(minutes=15))
self.cal_service.add_event(event2)
with self.subTest(msg="rest of the day is empty"):
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertGreater(max_time, timedelta(minutes=30))
self.assertEqual(_chop_dt(before), _chop_dt(event2.get_end()))
self.assertEqual(after, end_time)
with self.subTest(msg="rest of the day with events of shorter delta"):
# each of which are 15 minutes apart
next_event_start_time = event2.get_end() + timedelta(minutes=15)
while next_event_start_time < end_time:
next_ev_summary = "".join(
random.choices(string.ascii_uppercase + string.digits, k=6)
)
next_event = Event()
next_event.add("summary", next_event)
next_event.add("dtstart", next_event_start_time)
next_event.add("dtend", next_event_start_time + timedelta(minutes=15))
self.cal_service.add_event(next_event)
next_event_start_time = next_event.get_end() + timedelta(minutes=15)
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertEqual(timedelta(minutes=30), max_time)
self.assertEqual(_chop_dt(before), _chop_dt(event1.get_end()))
self.assertEqual(_chop_dt(after), _chop_dt(event2.get_start()))
| 2.546875 | 3 |
catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/manifolds/tests/__init__.py | johnson880319/Software | 0 | 12793595 | # coding=utf-8
import itertools
from contracts.utils import raise_wrapped
from nose.tools import nottest
from geometry import MatrixLieGroup, RandomManifold, all_manifolds, logger
from .checks_generation import *
def list_manifolds():
return all_manifolds
@nottest
def get_test_points(M, num_random=2):
interesting = M.interesting_points()
if isinstance(M, RandomManifold):
for i in range(num_random): # @UnusedVariable
interesting.append(M.sample_uniform())
if len(interesting) == 0:
logger.warning('No test points for %s and not random.' % M)
return interesting
def list_manifold_point():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_manifolds():
interesting = get_test_points(M)
num_examples = len(interesting)
for i in range(num_examples):
point = interesting[i]
try:
M.belongs(point)
except Exception as e:
msg = 'M %s does not contain %s: %s' % (M, point, e)
raise_wrapped(Exception, e, msg)
yield M, point, i, num_examples
def list_mgroup():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_manifolds():
if not isinstance(M, MatrixLieGroup):
continue
yield M
def list_mgroup_point():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_mgroup():
interesting = get_test_points(M)
num_examples = len(interesting)
for i in range(num_examples):
point = interesting[i]
try:
M.belongs(point)
except Exception as e:
msg = 'M %s does not contain %s: %s' % (M, point, e)
raise_wrapped(Exception, e, msg)
yield M, point, i, num_examples
def list_manifold_points():
""" Yields all possible (M, point1, point2, i, num) tests we have """
for M in list_manifolds():
interesting = get_test_points(M)
num_examples = len(interesting) * len(interesting)
k = 0
for p1, p2 in itertools.product(interesting, interesting):
yield M, p1, p2, k, num_examples
k += 1
for_all_manifolds = fancy_test_decorator(lister=lambda: all_manifolds,
arguments=lambda M: (M,),
attributes=lambda M: dict(manifolds=1, manifold=str(M)))
def _args0(x):
(M, p, i, n) = x
return M, p
def _attrs0(x):
(M, p, i, n) = x
return dict(manifolds=1,
manifold=M,
point=p)
for_all_manifold_point = fancy_test_decorator(lister=list_manifold_point,
arguments=_args0,
attributes=_attrs0)
def _args1(x):
(M, p, i, n) = x
return M, p
def _attrs1(x):
(M, p, i, n) = x
return dict(manifolds=1,
matrixgroups=1,
manifold=M, point=p)
for_all_mgroup_point = fancy_test_decorator(lister=list_mgroup_point,
arguments=_args1,
attributes=_attrs1)
for_all_mgroup = fancy_test_decorator(lister=list_mgroup,
arguments=lambda M: (M,),
attributes=lambda M: dict(manifolds=1, matrixgroups=1,
manifold=M))
def _args(x):
(M, p1, p2, k, n) = x
return M, p1, p2
def _attrs(x):
(M, p1, p2, k, n) = x
return dict(type='manifolds', manifold=M, point1=p1, point2=p2)
for_all_manifold_pairs = fancy_test_decorator(lister=list_manifold_points,
arguments=_args,
attributes=_attrs)
| 2.65625 | 3 |
Graph/Solutions_Four.py | daniel-zeiler/potential-happiness | 0 | 12793596 | import collections
import heapq
from typing import List
def find_town_judge(n: int, trust: List[List[int]]) -> int:
trusts = {i + 1: 0 for i in range(n)}
outgoing = {i + 1 for i in range(n)}
for origin, destination in trust:
if origin in outgoing:
outgoing.remove(origin)
trusts[destination] += 1
if len(outgoing) == 1 and trusts[list(outgoing)[0]] == n - 1:
return list(outgoing)[0]
return -1
def all_paths_source_to_target(graph):
result = []
def traverse(node_id, path, visited):
if node_id in visited:
return 1
visited.add(node_id)
if node_id == len(graph) - 1:
result.append(path)
else:
if any([traverse(adjacent, path + [adjacent], visited | {node_id}) for adjacent in graph[node_id]]):
return 1
return 0
if traverse(0, [0], set()) == 1:
return []
return result
def minimum_vertices_reach_all_nodes(n: int, edges: List[List[int]]) -> List[int]:
in_degree = {i: 0 for i in range(n)}
for origin, destination in edges:
in_degree[destination] += 1
return list(filter(lambda x: in_degree[x] == 0, in_degree.keys()))
def keys_and_rooms(rooms: List[List[int]]) -> bool:
visited = {0}
queue = collections.deque([0])
while queue:
node_id = queue.popleft()
for adjacent in rooms[node_id]:
if adjacent not in visited:
visited.add(adjacent)
queue.append(adjacent)
return len(rooms) == len(visited)
def number_of_provinces(is_connected):
parents = [i for i in range(len(is_connected))]
rank = [1 for _ in range(len(is_connected))]
def find(node_id):
if parents[node_id] != node_id:
parents[node_id] = find(parents[node_id])
return parents[node_id]
def union(node_a, node_b):
parent_a = find(node_a)
parent_b = find(node_b)
if parent_a == parent_b:
return
rank_a = rank[parent_a]
rank_b = rank[parent_b]
if rank_a > rank_b:
parents[parent_b] = parent_a
rank[parent_a] += 1
else:
parents[parent_a] = parent_b
rank[parent_b] += 1
for x, row in enumerate(is_connected):
for y, value in enumerate(row):
if y > x and value == 1:
union(x, y)
for x in range(len(is_connected)):
find(x)
return len(set(parents))
def redundant_connections(edges):
parents = [i for i in range(len(edges) + 1)]
rank = [1 for _ in range(len(edges) + 1)]
def find(node_id):
if parents[node_id] != node_id:
parents[node_id] = find(parents[node_id])
return parents[node_id]
def union(node_a, node_b):
parent_a = find(node_a)
parent_b = find(node_b)
if parent_a == parent_b:
return True
rank_a = rank[parent_a]
rank_b = rank[parent_b]
if rank_a > rank_b:
rank[parent_a] += 1
parents[node_b] = parent_a
else:
parents[node_a] = parent_b
rank[parent_b] += 1
return False
result = []
for origin, destination in edges:
if union(origin, destination):
result = [origin, destination]
return result
def maximal_network_rank(n, roads):
def get_graph():
graph = collections.defaultdict(set)
for origin, destination in roads:
graph[origin].add(destination)
graph[destination].add(origin)
return graph
graph = get_graph()
max_rank = 0
for x in range(n):
for y in range(x + 1, n):
rank = len(graph[x]) + len(graph[y])
if x in graph[y]:
rank -= 1
max_rank = max(max_rank, rank)
return max_rank
def find_eventual_safe_nodes(graph):
safe = set()
unsafe = set()
def traverse(node_id, visited):
if node_id in visited:
unsafe.add(node_id)
return False
for adjacent in graph[node_id]:
if adjacent in unsafe:
unsafe.add(node_id)
return False
if adjacent not in safe and not traverse(adjacent, visited | {node_id}):
unsafe.add(node_id)
return False
safe.add(node_id)
return True
for node_id in range(len(graph)):
if node_id not in safe and node_id not in unsafe:
traverse(node_id, set())
return list(safe)
def is_graph_bipartite(graph):
colors = collections.defaultdict(bool)
def traverse(node_id, color):
colors[node_id] = color
for adjacent in graph[node_id]:
if adjacent in colors and colors[adjacent] == color:
return False
if adjacent not in colors and not traverse(adjacent, not color):
return False
return True
for node_id in range(len(graph)):
if node_id not in colors:
if not traverse(node_id, True):
return False
return True
def flower_planting_no_adjacent(n, paths):
flowers = collections.defaultdict(int)
flower_colors = {1, 2, 3, 4}
def get_graph():
graph = collections.defaultdict(list)
for origin, destination in paths:
graph[origin].append(destination)
graph[destination].append(origin)
return graph
graph = get_graph()
def get_color(node_id):
colors = set()
for adjacent in graph[node_id]:
if adjacent in flowers:
colors.add(flowers[adjacent])
return list(flower_colors.difference(colors))[0]
def traverse(node_id):
flowers[node_id] = get_color(node_id)
for adjacent in graph[node_id]:
if adjacent not in flowers:
traverse(adjacent)
for node_id in range(1, n + 1):
if node_id not in flowers:
traverse(node_id)
result = [None for _ in range(n)]
for key, value in flowers.items():
result[key - 1] = value
return result
def network_delay_time(times, n, k):
queue = [[0, k]]
visited = set()
def get_graph():
graph = collections.defaultdict(list)
for origin, destination, weight in times:
graph[origin].append([weight, destination])
return graph
graph = get_graph()
while queue:
total_time, node_id = heapq.heappop(queue)
visited.add(node_id)
if len(visited) == n:
return total_time
for adjacent_weight, adjacent_node in graph[node_id]:
if adjacent_node not in visited:
heapq.heappush(queue, [total_time + adjacent_weight, adjacent_node])
return -1
def course_schedule_two(num_courses, prerequisites):
def get_graph():
graph = collections.defaultdict(list)
in_degree = {x: 0 for x in range(num_courses)}
for destination, origin in prerequisites:
graph[origin].append(destination)
in_degree[destination] += 1
return graph, in_degree
graph, in_degree = get_graph()
queue = collections.deque(list(filter(lambda x: in_degree[x] == 0, in_degree.keys())))
result = []
while queue:
node_id = queue.popleft()
result.append(node_id)
for adjacent in graph[node_id]:
in_degree[adjacent] -= 1
if in_degree[adjacent] == 0:
queue.append(adjacent)
if len(result) == num_courses:
return result
return []
def calcEquation(equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
def get_graph():
graph = collections.defaultdict(list)
for [origin, destination], value in zip(equations, values):
graph[origin].append([value, destination])
graph[destination].append([1 / value, origin])
return graph
graph = get_graph()
def traverse(node_id, target_node, temp_result, visited):
if node_id == target_node:
return temp_result
for weight, adjacent in graph[node_id]:
if adjacent not in visited:
result = traverse(adjacent, target_node, temp_result * weight, visited | {node_id})
if result != -1:
return result
return -1
result = []
for node_id, target_id in queries:
if node_id not in graph or target_id not in graph:
result.append(float(-1))
else:
result.append(traverse(node_id, target_id, 1, set()))
return result
def numBusesToDestination(routes: List[List[int]], source: int, target: int) -> int:
def get_graph():
bus_graph = collections.defaultdict(list)
stop_graph = collections.defaultdict(list)
for i, stops in enumerate(routes):
for stop in stops:
bus_graph[i + 1].append(stop)
stop_graph[stop].append(i + 1)
return bus_graph, stop_graph
bus_graph, stop_graph = get_graph()
bus_visited, stop_visited = set(), set()
queue = collections.deque([[0, source, 0]])
while queue:
total, location_id, turn = queue.popleft()
if turn == 0:
if location_id == target:
return total
for adjacent in stop_graph[location_id]:
if adjacent not in bus_visited:
bus_visited.add(adjacent)
queue.append([total + 1, adjacent, 1])
else:
for adjacent in bus_graph[location_id]:
if adjacent not in stop_visited:
stop_visited.add(adjacent)
queue.append([total, adjacent, 0])
return -1
def kSimilarity(s1: str, s2: str) -> int:
visited = set()
def get_neighbors(input_string):
neighbors = []
for x in range(len(input_string)):
for y in range(x + 1, len(input_string)):
temp_string = list(input_string)
temp_string[x], temp_string[y] = temp_string[y], temp_string[x]
neighbors.append(''.join(temp_string))
return neighbors
queue = collections.deque([[0, s1]])
visited.add(s1)
while queue:
value, input_string = queue.popleft()
if input_string == s2:
return value
for neighbor in get_neighbors(input_string):
if neighbor not in visited:
visited.add(neighbor)
queue.append([value + 1, neighbor])
return -1
def ladderLength(beginWord: str, endWord: str, wordList: List[str]) -> int:
def get_graph():
graph = collections.defaultdict(list)
for word in wordList + [beginWord]:
for i, letter in enumerate(word):
graph[word[:i] + '*' + word[i + 1:]].append(word)
return graph
if endWord not in wordList:
return -1
graph = get_graph()
visited = {beginWord}
queue = collections.deque([[1, beginWord]])
while queue:
distance, word = queue.popleft()
if word == endWord:
return distance + 1
for i, letter in enumerate(word):
transform = word[:i] + '*' + word[i + 1:]
for word in graph[transform]:
if word not in visited:
visited.add(word)
queue.append([distance + 1, word])
return -1
| 3.359375 | 3 |
Basic Algorithms/Basic Algorithms/heap_introduction_2_solution.py | michal0janczyk/udacity_data_structures_and_algorithms_nanodegree | 1 | 12793597 | <reponame>michal0janczyk/udacity_data_structures_and_algorithms_nanodegree
class Heap:
def __init__(self, initial_size=10):
self.cbt = [None for _ in range(initial_size)] # initialize arrays
self.next_index = 0 # denotes next index where new element should go
def _down_heapify(self):
parent_index = 0
while parent_index < self.next_index:
left_child_index = 2 * parent_index + 1
right_child_index = 2 * parent_index + 2
parent = self.cbt[parent_index]
left_child = None
right_child = None
min_element = parent
# check if left child exists
if left_child_index < self.next_index:
left_child = self.cbt[left_child_index]
# check if right child exists
if right_child_index < self.next_index:
right_child = self.cbt[right_child_index]
# compare with left child
if left_child is not None:
min_element = min(parent, left_child)
# compare with right child
if right_child is not None:
min_element = min(right_child, min_element)
# check if parent is rightly placed
if min_element == parent:
return
if min_element == left_child:
self.cbt[left_child_index] = parent
self.cbt[parent_index] = min_element
parent = left_child_index
elif min_element == right_child:
self.cbt[right_child_index] = parent
self.cbt[parent_index] = min_element
parent = right_child_index
def size(self):
return self.next_index
def remove(self):
"""
Remove and return the element at the top of the heap
"""
if self.size() == 0:
return None
self.next_index -= 1
to_remove = self.cbt[0]
last_element = self.cbt[self.next_index]
# place last element of the cbt at the root
self.cbt[0] = last_element
# we do not remove the elementm, rather we allow next `insert` operation to overwrite it
self.cbt[self.next_index] = to_remove
self._down_heapify()
return to_remove | 3.921875 | 4 |
src/apps/about/models/katalog.py | rko619619/Skidon | 0 | 12793598 | <reponame>rko619619/Skidon
from django.db import models as m
class Katalog(m.Model):
title = m.TextField(unique=True)
content = m.TextField(unique=True)
media = m.URLField(unique=True)
adress = m.TextField(null=True, blank=True)
class Meta:
verbose_name_plural = "katalog"
ordering = ["id", "title", "content", "media", "adress"]
def __repr__(self):
return f"Zavedeniya # {self.pk}: '{self.title}'"
def __str__(self):
return f"{self.pk}: '{self.title}'"
| 2.171875 | 2 |
printer/mean_printer.py | kuanhsunchen/Suspension | 1 | 12793599 | <reponame>kuanhsunchen/Suspension
import matplotlib
matplotlib.use('Agg')
import matplotlib.patches as mpatches
import random
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import itertools
from matplotlib import rcParams
from matplotlib.backends.backend_pdf import PdfPages
from scipy.stats.mstats import gmean
x1 = []
y1 = []
x2 = []
y2 = []
x3 = []
y3 = []
x4 = []
y4 = []
x5 = []
y5 = []
x6 = []
y6 = []
x7 = []
y7 = []
x8 = []
y8 = []
x9 = []
y9 = []
x10 = []
y10 = []
x11 = []
y11 = []
x12 = []
y12 = []
x13 = []
y13 = []
x14 = []
y14 = []
x15 = []
y15 = []
x16 = []
y16 = []
x17 = []
y17 = []
resTotal1 = []
resTotal2 = []
resTotal3 = []
resTotal4 = []
resTotal5 = []
resTotal6 = []
resTotal7 = []
resTotal8 = []
resTotal9 = []
resTotal10 = []
resTotal11 = []
resTotal12 = []
resTotal13 = []
resTotal14 = []
resTotal15 = []
resTotal16 = []
resTotal17 = []
def init():
global x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17
global y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13, y14, y15, y16, y17
global resTotal1, resTotal2, resTotal3, resTotal4, resTotal5, resTotal6, resTotal7, resTotal8, resTotal9, resTotal10, resTotal11, resTotal12, resTotal13, resTotal14, resTotal15, resTotal16, resTotal17
x1 = []
y1 = []
x2 = []
y2 = []
x3 = []
y3 = []
x4 = []
y4 = []
x5 = []
y5 = []
x6 = []
y6 = []
x7 = []
y7 = []
x8 = []
y8 = []
x9 = []
y9 = []
x10 = []
y10 = []
x11 = []
y11 = []
x12 = []
y12 = []
x13 = []
y13 = []
x14 = []
y14 = []
x15 = []
y15 = []
x16 = []
y16 = []
x17 = []
y17 = []
resTotal1 = []
resTotal2 = []
resTotal3 = []
resTotal4 = []
resTotal5 = []
resTotal6 = []
resTotal7 = []
resTotal8 = []
resTotal9 = []
resTotal10 = []
resTotal11 = []
resTotal12 = []
resTotal13 = []
resTotal14 = []
resTotal15 = []
resTotal16 = []
resTotal17 = []
def fileInput(var1, group, s):
fileidx = 0
utililist = []
flag = 0
while fileidx < group:
tmpUtil = []
f1 = open(var1+".txt", 'r')
count = -1
flag = 0
tmpRes1 = []
tmpRes2 = []
tmpRes3 = []
tmpRes4 = []
tmpRes5 = []
tmpRes6 = []
tmpRes7 = []
tmpRes8 = []
tmpRes9 = []
tmpRes10 = []
tmpRes11 = []
tmpRes12 = []
tmpRes13 = []
tmpRes14 = []
tmpRes15 = []
tmpRes16 = []
tmpRes17 = []
for line in f1:
if count == -1:
#filename to get utilization:
filename = line.split('_')
#print filename
tmpUtil.append(int(filename[1]))
#Content to get Arithmetic mean and Gmean
if 0 <count < s*2:
if count%2==1:
strline = line.replace('[','')
strline = strline.replace(']','')
strline = strline.replace('\n','')
strline = strline.split(',')
#prechecking
#strline[x] x = 0-16
#[ILPcarry, ILPblock, ILPjit, Inflation, ILPbaseline, Combo, TDA, TDAcarry, TDAblock, TDAjit, TDAjitblock, TDAmix, CTbaseline, CTcarry, CTblock, CTjit, CTmix]
#ILPcarry
tmpRes1.append(int(strline[0]))
#ILPblock
tmpRes2.append(int(strline[1]))
#ILPjit
tmpRes3.append(int(strline[2]))
#Inflation
tmpRes4.append(int(strline[3]))
#ILPbaseline
tmpRes5.append(int(strline[4]))
#Combo
tmpRes6.append(int(strline[5]))
#TDAbaseline
tmpRes7.append(int(strline[6]))
#TDAcarry
tmpRes8.append(int(strline[7]))
#TDAblock
tmpRes9.append(int(strline[8]))
#TDAjit
tmpRes10.append(int(strline[9]))
#TDAjitblock
tmpRes11.append(int(strline[10]))
#TDAmix
tmpRes12.append(int(strline[11]))
#CTbaseline
tmpRes13.append(int(strline[12]))
#CTbarry
tmpRes14.append(int(strline[13]))
#CTblock
tmpRes15.append(int(strline[14]))
#CTjit
tmpRes16.append(int(strline[15]))
#CTmix
tmpRes17.append(int(strline[16]))
if count == s*2+1:
'''
#print 'Gmean:'+line
strline = line.replace('[','')
strline = strline.replace(']','')
strline = strline.replace('\n','')
strline = strline.split(',')
print strline
#strline[x] x = 0-16
y1.append(float(strline[0]))
'''
count = -1
continue
count += 1
f1.close()
resTotal1.append(tmpRes1)
resTotal2.append(tmpRes2)
resTotal3.append(tmpRes3)
resTotal4.append(tmpRes4)
resTotal5.append(tmpRes5)
resTotal6.append(tmpRes6)
resTotal7.append(tmpRes7)
resTotal8.append(tmpRes8)
resTotal9.append(tmpRes9)
resTotal10.append(tmpRes10)
resTotal11.append(tmpRes11)
resTotal12.append(tmpRes12)
resTotal13.append(tmpRes13)
resTotal14.append(tmpRes14)
resTotal15.append(tmpRes15)
resTotal16.append(tmpRes16)
resTotal17.append(tmpRes17)
utililist.append(tmpUtil)
fileidx += 1
return utililist
#print resTotal6
def getResPerUtili(res, numinSets, num): #work for tasks 10 an 20
utililist = []
if num == 40:
readyres = [[] for i in range(6)]
elif num == 30:
readyres = [[] for i in range(7)]
else:
readyres = [[] for i in range(8)]
count = 0
for ind, i in enumerate(res): #each file
#print ""
#print i
#print len(i)
tmp = []
icount = 0
for j in i: #every numinSets input for each utilization
tmp.append(j)
count+=1
#print icount
if count > numinSets-1:
readyres[icount]=readyres[icount]+tmp
tmp = []
count = 0
if num == 40:
icount = (icount+1)%6
elif num == 30:
icount = (icount+1)%7
else:
icount = (icount+1)%8
icount = 0
count = 0
for i in readyres:
utililist.append(i)
return utililist
def Ameanratio(results, baseline):
res = []
if baseline ==0:
return 1
for i in results:
if i == 0:
res.append(1)
elif baseline >= i :
res.append(float(i)/float(baseline))
else:
res.append(1)
if len(results) == 0:
return 1
return np.mean(res)
def Gmeanratio(results, baseline):
res = []
if baseline == 0:
return 1
for i in results:
if i == 0:
res.append(1)
elif i < 0:
continue
elif baseline >= i :
if i/baseline <= 1:
res.append(float(i)/float(baseline))
else:
res.append(1)
else:
res.append(1)
if len(results) == 0:
return 1
return gmean(res)
# wayofMean(np.mean, 10, 'Amean', 'S', 100, 0)
# Now assume all the results are for Limited-preemptive scheduling so # of arguments is 6.
def wayofMean(way, num, atitle, typ, s, MST, btype = 'N', mode = 'REP'):
init()
typ.replace("'", '')
if MST == 3:
target = 'worst/Results-tasks'+repr(num)+'_stype'+typ+'_btype'+btype
elif MST == 2:
target = 'best/Results-tasks'+repr(num)+'_stype'+typ+'_btype'+btype
elif MST == 1:
target = 'outputM_completed/Results-tasks'+repr(num)+'_stype'+typ+'_btype'+btype
else:
target = 'output_completed/Results-tasks'+repr(num)+'_stype'+typ+'_btype'+btype
utili = fileInput(target, g, s)
for i in utili[0]:
x1.append(i)
x2.append(i)
x3.append(i)
x4.append(i)
x5.append(i)
x6.append(i)
x7.append(i)
x8.append(i)
x9.append(i)
x10.append(i)
x11.append(i)
x12.append(i)
x13.append(i)
x14.append(i)
x15.append(i)
x16.append(i)
x17.append(i)
if MST == 1:
fileName = 'First-M'+atitle+'-tasks'+repr(num)+'_stype_'+repr(typ)+'_btype'+btype
elif MST == 2: #best
fileName = 'Best-M'+atitle+'-tasks'+repr(num)+'_stype_'+repr(typ)+'_btype'+btype
elif MST == 3: #worst
fileName = 'Worst-M'+atitle+'-tasks'+repr(num)+'_stype_'+repr(typ)+'_btype'+btype
else:
fileName = atitle+'-tasks'+repr(num)+'_stype_'+repr(typ)+'_btype'+btype
print fileName
Mbaseline = 0
for i in getResPerUtili(resTotal4,s, num): #when g = 6 Inflation
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y4.append(way(i, num))
else:
y4.append(way(i, 0))
else:
y4.append(way(i))
Mbaseline = max(y4)
tmpy4 = []
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
for i in getResPerUtili(resTotal4,s, num):
tmpy4.append(np.mean(i))
Mbaseline = max(tmpy4)
for i in getResPerUtili(resTotal1,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y1.append(way(i, num))
else:
y1.append(way(i, Mbaseline ))
else:
y1.append(way(i))
for i in getResPerUtili(resTotal2,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y2.append(way(i, num))
else:
y2.append(way(i, Mbaseline ))
else:
y2.append(way(i))
for i in getResPerUtili(resTotal3,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y3.append(way(i, num))
else:
y3.append(way(i, Mbaseline ))
else:
y3.append(way(i))
for i in getResPerUtili(resTotal5,s, num): #when g = 6 ILPbaseline
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y5.append(way(i, num))
else:
y5.append(way(i, Mbaseline ))
else:
y5.append(way(i))
for i in getResPerUtili(resTotal6,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y6.append(way(i, num))
else:
y6.append(way(i, Mbaseline ))
else:
y6.append(way(i))
for i in getResPerUtili(resTotal7,s, num): #when g = 6 TDAbaseline
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y7.append(way(i, num))
else:
y7.append(way(i, 0))
else:
y7.append(way(i))
Mbaseline = max(y7)
tmpy7 = []
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
for i in getResPerUtili(resTotal7,s, num):
tmpy7.append(np.mean(i))
if Mbaseline == 0:
Mbaseline = max(tmpy7)
for i in getResPerUtili(resTotal8,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y8.append(way(i, num))
else:
y8.append(way(i, Mbaseline ))
else:
y8.append(way(i))
for i in getResPerUtili(resTotal9,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y9.append(way(i, num))
else:
y9.append(way(i, Mbaseline ))
else:
y9.append(way(i))
for i in getResPerUtili(resTotal10,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y10.append(way(i, num))
else:
y10.append(way(i, Mbaseline ))
else:
y10.append(way(i))
for i in getResPerUtili(resTotal11,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y11.append(way(i, num))
else:
y11.append(way(i, Mbaseline ))
else:
y11.append(way(i))
for i in getResPerUtili(resTotal12,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y12.append(way(i, num))
else:
y12.append(way(i, Mbaseline ))
else:
y12.append(way(i))
for i in getResPerUtili(resTotal13,s, num): #when g = 6 CTbaseline
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y13.append(way(i, num))
else:
y13.append(way(i, 0))
else:
y13.append(way(i))
Mbaseline = max(y13)
tmpy13 = []
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
for i in getResPerUtili(resTotal13,s, num):
tmpy13.append(np.mean(i))
if Mbaseline == 0:
Mbaseline = max(tmpy13)
for i in getResPerUtili(resTotal14,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
#print i, num
#print way(i, num)
y14.append(way(i, num))
else:
y14.append(way(i, Mbaseline ))
else:
y14.append(way(i))
for i in getResPerUtili(resTotal15,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y15.append(way(i, num))
else:
y15.append(way(i, Mbaseline ))
else:
y15.append(way(i))
for i in getResPerUtili(resTotal16,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y16.append(way(i, num))
else:
y16.append(way(i, Mbaseline ))
else:
y16.append(way(i))
for i in getResPerUtili(resTotal17,s, num): #when g = 6
if atitle == 'Ameanratio' or atitle == 'Gmeanratio':
if MST == 0:
y17.append(way(i, num))
else:
y17.append(way(i, Mbaseline ))
else:
y17.append(way(i))
# plot in pdf
pp = PdfPages(folder + fileName + '.pdf')
if btype != 'N':
atitle = "Limited-"+atitle
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')'
if MST == 1:
if mode == 'ILP':
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')'
else:
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')-First-Fit'
elif MST == 2:
if mode == 'ILP':
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')'
else:
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')-Best-Fit'
elif MST == 3:
if mode == 'ILP':
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')'
else:
title = atitle+'-'+repr(num)+'Tasks-e('+typ+')-b('+btype+')-Worst-Fit'
plt.title(title, fontsize=20)
plt.grid(True)
#plt.ylabel('Geometric Mean', fontsize=20)
#plt.xlabel('Approaches($U^*$)', fontsize=20)
ax = plt.subplot()
ax.tick_params(axis='both', which='major',labelsize=16)
#way of means
if atitle == 'Amean':
ax.set_ylabel("Arithmetic Mean", size=20)
elif atitle == 'Gmean':
ax.set_ylabel("Geometric Mean", size=20)
elif atitle == 'Ameanratio':
ax.set_ylabel("Normalized Arithmetic Mean", size=20)
elif atitle == 'Gmeanratio':
ax.set_ylabel("Normalized Geometric Mean", size=20)
ax.set_xlabel("Utilization (%)", size=20)
marker = itertools.cycle(('D', 'd', 'o', 's', 'v'))
try:
if MST == 0:
if mode == 'REP':
if num < 30:
ax.plot( x4, y4, '-', marker = marker.next(), label='ILP-Inflation', linewidth=2.0)
ax.plot( x6, y6, '-', marker = marker.next(), label='ILP-Combo', linewidth=2.0)
pass
ax.plot( x7, y7, '-', marker = marker.next(), label='PST-FF-TDA(Baseline)', linewidth=2.0)
ax.plot( x12, y12, '-', marker = marker.next(), label='PST-FF-TDA(Mixed)', linewidth=2.0)
ax.plot( x13, y13, '-', marker = marker.next(), label='PST-FF-CT(Baseline)', linewidth=2.0)
ax.plot( x17, y17, '-', marker = marker.next(), label='PST-FF-CT(Mixed)', linewidth=2.0)
elif mode == 'ILP':
if num < 30:
ax.plot( x1, y1, '-', marker = marker.next(), label='ILP-Carry', linewidth=2.0)
#ax.plot( x2, y2, '-', marker = marker.next(), label='ILP-Block', linewidth=2.0)
ax.plot( x3, y3, '-', marker = marker.next(), label='ILP-Jit', linewidth=2.0)
ax.plot( x4, y4, '-', marker = marker.next(), label='ILP-Inflation', linewidth=2.0)
ax.plot( x5, y5, '-', marker = marker.next(), label='ILP-Baseline', linewidth=2.0)
ax.plot( x6, y6, '-', marker = marker.next(), label='ILP-Combo', linewidth=2.0)
pass
elif mode == 'TDA':
ax.plot( x7, y7, '-', marker = marker.next(), label='PST-FF-TDA(Baseline)', linewidth=2.0)
ax.plot( x8, y8, '-', marker = marker.next(), label='PST-FF-TDA(Carry)', linewidth=2.0)
#ax.plot( x9, y9, '-', marker = marker.next(), label='PST-FF-TDA(Block)', linewidth=2.0)
ax.plot( x10, y10, '-', marker = marker.next(), label='PST-FF-TDA(Jit)', linewidth=2.0)
#ax.plot( x11, y11, '-', marker = marker.next(), label='PST-FF-TDA(Jitblock)', linewidth=2.0)
ax.plot( x12, y12, '-', marker = marker.next(), label='PST-FF-TDA(Mixed)', linewidth=2.0)
elif mode == 'CT':
ax.plot( x13, y13, '-', marker = marker.next(), label='PST-FF-CT(Baseline)', linewidth=2.0)
ax.plot( x14, y14, '-', marker = marker.next(), label='PST-FF-CT(Carry)', linewidth=2.0)
#ax.plot( x15, y15, '-', marker = marker.next(), label='PST-FF-CT(Block)', linewidth=2.0)
ax.plot( x16, y16, '-', marker = marker.next(), label='PST-FF-CT(Jit)', linewidth=2.0)
ax.plot( x17, y17, '-', marker = marker.next(), label='PST-FF-CT(Mixed)', linewidth=2.0)
else:
if mode == 'REP':
if num < 30:
ax.plot( x4, y4, '-', marker = marker.next(), label='ILP-Inflation', linewidth=2.0)
ax.plot( x6, y6, '-', marker = marker.next(), label='ILP-Combo', linewidth=2.0)
pass
ax.plot( x7, y7, '-', marker = marker.next(), label='PST-BF-TDA(Baseline)', linewidth=2.0)
ax.plot( x12, y12, '-', marker = marker.next(), label='PST-BF-TDA(Mixed)', linewidth=2.0)
ax.plot( x13, y13, '-', marker = marker.next(), label='PST-BF-CT(Baseline)', linewidth=2.0)
ax.plot( x17, y17, '-', marker = marker.next(), label='PST-BF-CT(Mixed)', linewidth=2.0)
elif mode == 'ILP':
if num < 30:
ax.plot( x1, y1, '-', marker = marker.next(), label='ILP-Carry', linewidth=2.0)
#ax.plot( x2, y2, '-', marker = marker.next(), label='ILP-Block', linewidth=2.0)
ax.plot( x3, y3, '-', marker = marker.next(), label='ILP-Jit', linewidth=2.0)
ax.plot( x4, y4, '-', marker = marker.next(), label='ILP-Inflation', linewidth=2.0)
ax.plot( x5, y5, '-', marker = marker.next(), label='ILP-Baseline', linewidth=2.0)
ax.plot( x6, y6, '-', marker = marker.next(), label='ILP-Combo', linewidth=2.0)
pass
elif mode == 'TDA':
ax.plot( x7, y7, '-', marker = marker.next(), label='PST-BF-TDA(Baseline)', linewidth=2.0)
ax.plot( x8, y8, '-', marker = marker.next(), label='PST-BF-TDA(Carry)', linewidth=2.0)
#ax.plot( x9, y9, '-', marker = marker.next(), label='PST-FF-TDA(Block)', linewidth=2.0)
ax.plot( x10, y10, '-', marker = marker.next(), label='PST-BF-TDA(Jit)', linewidth=2.0)
#ax.plot( x11, y11, '-', marker = marker.next(), label='PST-FF-TDA(Jitblock)', linewidth=2.0)
ax.plot( x12, y12, '-', marker = marker.next(), label='PST-BF-TDA(Mixed)', linewidth=2.0)
elif mode == 'CT':
ax.plot( x13, y13, '-', marker = marker.next(), label='PST-BF-CT(Baseline)', linewidth=2.0)
ax.plot( x14, y14, '-', marker = marker.next(), label='PST-BF-CT(Carry)', linewidth=2.0)
#ax.plot( x15, y15, '-', marker = marker.next(), label='PST-FF-CT(Block)', linewidth=2.0)
ax.plot( x16, y16, '-', marker = marker.next(), label='PST-BF-CT(Jit)', linewidth=2.0)
ax.plot( x17, y17, '-', marker = marker.next(), label='PST-BF-CT(Mixed)', linewidth=2.0)
except ValueError:
print "ValueError"
#ax.vlines(0.5, 0, 1, transform=ax.transAxes )
#ax.text(0.35, 0.04, "$U^*=60\%$", transform=ax.transAxes, size=16 )
#ax.text(0.85, 0.04, "$U^*=70\%$", transform=ax.transAxes, size=16 )
ax.legend(loc=0, prop={'size':14})
figure = plt.gcf()
figure.set_size_inches([10, 6])
pp.savefig()
plt.clf()
plt.show()
pp.close()
folder = 'plots/'
g = 1
def main():
args = sys.argv
if len(args) < 1 or len(args) > 2:
print "Usage: python mean_printer.py [representative/ILP/TDA/CT]"
return -1
mode = args[1]
#after this, 6 sets of methods are prepared
'''
wayofMean(np.mean, 10, 'Amean', 'S', 100, 0)
wayofMean(gmean, 10, 'Gmean', 'S', 100, 0)
wayofMean(np.mean, 10, 'Amean', 'M', 100, 0)
wayofMean(gmean, 10, 'Gmean', 'M', 100, 0)
wayofMean(np.mean, 10, 'Amean', 'L', 100, 0)
wayofMean(gmean, 10, 'Gmean', 'L', 100, 0)
wayofMean(np.mean, 20, 'Amean', 'S', 100, 0)
wayofMean(gmean, 20, 'Gmean', 'S', 100, 0)
wayofMean(np.mean, 20, 'Amean', 'M', 100, 0)
wayofMean(gmean, 20, 'Gmean', 'M', 100, 0)
wayofMean(np.mean, 20, 'Amean', 'L', 100, 0)
wayofMean(gmean, 20, 'Gmean', 'L', 100, 0)
wayofMean(np.mean, 30, 'Amean', 'S', 100, 0)
wayofMean(gmean, 30, 'Gmean', 'S', 100, 0)
wayofMean(np.mean, 30, 'Amean', 'M', 100, 0)
wayofMean(gmean, 30, 'Gmean', 'M', 100, 0)
wayofMean(np.mean, 30, 'Amean', 'L', 100, 0)
wayofMean(gmean, 30, 'Gmean', 'L', 100, 0)
wayofMean(np.mean, 40, 'Amean', 'S', 100, 0)
wayofMean(gmean, 40, 'Gmean', 'S', 100, 0)
wayofMean(np.mean, 40, 'Amean', 'M', 100, 0)
wayofMean(gmean, 40, 'Gmean', 'M', 100, 0)
wayofMean(np.mean, 40, 'Amean', 'L', 100, 0)
wayofMean(gmean, 40, 'Gmean', 'L', 100, 0)
#ratio
wayofMean(Ameanratio, 10, 'Ameanratio', 'S', 100, 0)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 100, 0)
wayofMean(Ameanratio, 10, 'Ameanratio', 'M', 100, 0)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'M', 100, 0)
wayofMean(Ameanratio, 10, 'Ameanratio', 'L', 100, 0)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 100, 0)
wayofMean(Ameanratio, 20, 'Ameanratio', 'S', 100, 0)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 100, 0)
wayofMean(Ameanratio, 20, 'Ameanratio', 'M', 100, 0)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'M', 100, 0)
wayofMean(Ameanratio, 20, 'Ameanratio', 'L', 100, 0)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 0)
wayofMean(Ameanratio, 30, 'Ameanratio', 'S', 100, 0)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 100, 0)
wayofMean(Ameanratio, 30, 'Ameanratio', 'M', 100, 0)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'M', 100, 0)
wayofMean(Ameanratio, 30, 'Ameanratio', 'L', 100, 0)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 100, 0)
wayofMean(Ameanratio, 40, 'Ameanratio', 'S', 100, 0)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 100, 0)
wayofMean(Ameanratio, 40, 'Ameanratio', 'M', 100, 0)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'M', 100, 0)
wayofMean(Ameanratio, 40, 'Ameanratio', 'L', 100, 0)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 100, 0)
#MST
wayofMean(np.mean, 10, 'Amean', 'S', 100, 1)
wayofMean(gmean, 10, 'Gmean', 'S', 100, 1)
wayofMean(np.mean, 10, 'Amean', 'M', 100, 1)
wayofMean(gmean, 10, 'Gmean', 'M', 100, 1)
wayofMean(np.mean, 10, 'Amean', 'L', 100, 1)
wayofMean(gmean, 10, 'Gmean', 'L', 100, 1)
wayofMean(np.mean, 20, 'Amean', 'S', 100, 1)
wayofMean(gmean, 20, 'Gmean', 'S', 100, 1)
wayofMean(np.mean, 20, 'Amean', 'M', 100, 1)
wayofMean(gmean, 20, 'Gmean', 'M', 100, 1)
wayofMean(np.mean, 20, 'Amean', 'L', 100, 1)
wayofMean(gmean, 20, 'Gmean', 'L', 100, 1)
wayofMean(np.mean, 30, 'Amean', 'S', 100, 1)
wayofMean(gmean, 30, 'Gmean', 'S', 100, 1)
wayofMean(np.mean, 30, 'Amean', 'M', 100, 1)
wayofMean(gmean, 30, 'Gmean', 'M', 100, 1)
wayofMean(np.mean, 30, 'Amean', 'L', 100, 1)
wayofMean(gmean, 30, 'Gmean', 'L', 100, 1)
wayofMean(np.mean, 40, 'Amean', 'S', 100, 1)
wayofMean(gmean, 40, 'Gmean', 'S', 100, 1)
wayofMean(np.mean, 40, 'Amean', 'M', 100, 1)
wayofMean(gmean, 40, 'Gmean', 'M', 100, 1)
wayofMean(np.mean, 40, 'Amean', 'L', 100, 1)
wayofMean(gmean, 40, 'Gmean', 'L', 100, 1)
#ratio
#wayofMean(Ameanratio, 10, 'Ameanratio', 'S', 100, 1)
#wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 100, 1)
#wayofMean(Ameanratio, 10, 'Ameanratio', 'M', 100, 1)
#wayofMean(Gmeanratio, 10, 'Gmeanratio', 'M', 100, 1)
#wayofMean(Ameanratio, 10, 'Ameanratio', 'L', 100, 1)
#wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 100, 1)
#wayofMean(Ameanratio, 20, 'Ameanratio', 'S', 100, 1)
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 100, 1)
#wayofMean(Ameanratio, 20, 'Ameanratio', 'M', 100, 1)
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'M', 100, 1)
#wayofMean(Ameanratio, 20, 'Ameanratio', 'L', 100, 1)
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 1)
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 2)
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 3)
#wayofMean(Ameanratio, 30, 'Ameanratio', 'S', 100, 1)
#wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 100, 1)
#wayofMean(Ameanratio, 30, 'Ameanratio', 'M', 100, 1)
#wayofMean(Gmeanratio, 30, 'Gmeanratio', 'M', 100, 1)
#wayofMean(Ameanratio, 30, 'Ameanratio', 'L', 100, 1)
#wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 100, 1)
#wayofMean(Ameanratio, 40, 'Ameanratio', 'S', 100, 1)
#wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 100, 1)
#wayofMean(Ameanratio, 40, 'Ameanratio', 'M', 100, 1)
#wayofMean(Gmeanratio, 40, 'Gmeanratio', 'M', 100, 1)
#wayofMean(Ameanratio, 40, 'Ameanratio', 'L', 100, 1)
#wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 100, 1)
'''
#wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 2, 'L', mode)
#Limited-preemptive
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'M', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'M', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'M', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 100, 2, 'L', mode)
#
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'M', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'M', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'M', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 100, 2, 'L', mode)
#
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'M', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'M', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'M', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 100, 2, 'L', mode)
#
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'M', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'M', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'M', 100, 2, 'L', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 100, 2, 'S', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 100, 2, 'M', mode)
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 100, 2, 'L', mode)
'''
#Limited-preemptive
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 10, 1, 'S')
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'S', 10, 1, 'L')
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 10, 1, 'S')
wayofMean(Gmeanratio, 10, 'Gmeanratio', 'L', 10, 1, 'L')
#
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 10, 1, 'S')
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'S', 10, 1, 'L')
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 10, 1, 'S')
wayofMean(Gmeanratio, 20, 'Gmeanratio', 'L', 10, 1, 'L')
#
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 10, 1, 'S')
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'S', 10, 1, 'L')
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 10, 1, 'S')
wayofMean(Gmeanratio, 30, 'Gmeanratio', 'L', 10, 1, 'L')
#
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 10, 1, 'S')
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'S', 10, 1, 'L')
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 10, 1, 'S')
wayofMean(Gmeanratio, 40, 'Gmeanratio', 'L', 10, 1, 'L')
'''
if __name__ == "__main__":
main()
| 1.765625 | 2 |
yt_dlp/WS_Extractor/arte.py | evolution-ant/local-youtube-dl | 0 | 12793600 | # encoding: utf-8
import re
import base64
from ..utils import int_or_none
from ..extractor.arte import ArteTVBaseIE
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
qualities,
try_get,
unified_strdate,
)
def _extract_from_json_url(self, json_url, video_id, lang, title=None):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
vsr = try_get(player_info, lambda x: x['VSR'], dict)
if not vsr:
error = None
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
error = try_get(
player_info, lambda x: x['custom_msg']['msg'], compat_str)
if not error:
error = 'Video %s is not available' % player_info.get('VID') or video_id
raise ExtractorError(error, expected=True)
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
title = (player_info.get('VTI') or title or player_info['VID']).strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
info_dict = {
'id': player_info['VID'],
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
LANGS = {
'fr': 'F',
'de': 'A',
'en': 'E[ANG]',
'es': 'E[ESP]',
}
langcode = LANGS.get(lang, lang)
formats = []
temp = {format_id : format_dict for format_id, format_dict in list(vsr.items()) if dict(format_dict).get('versionShortLibelle').lower() == lang}
if temp:
vsr = temp
for format_id, format_dict in list(vsr.items()):
f = dict(format_dict)
versionCode = f.get('versionCode')
l = re.escape(langcode)
# Language preference from most to least priority
# Reference: section 5.6.3 of
# http://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-05.pdf
PREFERENCES = (
# original version in requested language, without subtitles
r'VO{0}$'.format(l),
# original version in requested language, with partial subtitles in requested language
r'VO{0}-ST{0}$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO{0}-STM{0}$'.format(l),
# non-original (dubbed) version in requested language, without subtitles
r'V{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
r'V{0}-ST{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'V{0}-STM{0}$'.format(l),
# original version in requested language, with partial subtitles in different language
r'VO{0}-ST(?!{0}).+?$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
r'VO{0}-STM(?!{0}).+?$'.format(l),
# original version in different language, with partial subtitles in requested language
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
# original version in different language, without subtitles
r'VO(?:(?!{0}))?$'.format(l),
# original version in different language, with partial subtitles in different language
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
)
for pref, p in enumerate(PREFERENCES):
if re.match(p, versionCode):
lang_pref = len(PREFERENCES) - pref
break
else:
lang_pref = -1
format = {
'format_id': format_id,
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
}
if f.get('mediaType') == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
self._check_formats(formats, video_id)
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
ArteTVBaseIE._extract_from_json_url = _extract_from_json_url | 2.046875 | 2 |
mlapp/MLAPP_CODE/MLAPP-C4-Code/GaussInterpDemo.py | xishansnow/MLAPP | 0 | 12793601 | <filename>mlapp/MLAPP_CODE/MLAPP-C4-Code/GaussInterpDemo.py<gh_stars>0
"""根据已有观察值,对函数进行插值处理"""
import numpy as np
from functools import reduce
from scipy.sparse import spdiags
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(1) # 设置随机种子
D = 150 # 数据的总量(含观测和未观测到的值)
n_obs = 10 # 观测到的样本点的数量
xs = np.linspace(0, 1, D) # 定义函数的支撑集
perm = np.random.permutation(D) # 索引号打乱
obs_index = perm[range(10)] #观测值的索引号
hid_index = np.array(list(set(perm)-set(obs_index))) # 未观测值的索引号
x_obs = np.random.randn(n_obs)[:, np.newaxis] # 生成n_obs个观测值
data = np.array([[-1]*D, [2]*D, [-1]*D])
diags = np.array([0, 1, 2])
all_matrix = spdiags(data, diags, D, D).toarray()
L = (1/2)*all_matrix[0:D-2]
print(L)
# 先验精度值lambda 仅仅影响方差
lambdas = [30, 0.01]
lambda_index = 0
L = lambdas[lambda_index]*L
L1 = L[:, hid_index]
L2 = L[:, obs_index]
laml1 = np.dot(L1.T, L1)
laml2 = np.dot(L1.T, L2)
postdist_sigma = np.linalg.inv(laml1)
postdist_mu = reduce(np.dot,(-np.linalg.inv(laml1), laml2, x_obs))
### 绘图
plt.figure()
plt.style.use('ggplot')
plt.plot(xs[hid_index], postdist_mu, linewidth=2)
plt.plot(xs[obs_index], x_obs, 'ro', markersize=12)
plt.title(r'$\lambda$={}'.format(lambdas[lambda_index]))
xbar = np.zeros(D)
xbar[hid_index] = postdist_mu.flatten()
xbar[obs_index] = x_obs.flatten()
sigma = np.zeros(D)
sigma[hid_index] = (np.diag(postdist_sigma))**0.5
sigma[obs_index] = 0
# 绘制边缘后验分布的标准误差带
plt.figure()
plt.style.use('ggplot')
f1 = xbar + 2*sigma
f2 = xbar - 2*sigma
plt.fill_between(xs, f2, f1, color=(0.8,0.8,0.8))
plt.plot(xs[hid_index], postdist_mu, linewidth=2)
plt.plot(xs[obs_index], x_obs, 'ro', markersize=12)
#plt.ylim([-5,5])
plt.title(r'$\lambda$={}'.format(lambdas[lambda_index]))
for i in range(3):
fs = np.zeros(D)
# for j, single_index in enumerate(hid_index):
fs[hid_index] = stats.multivariate_normal.rvs(postdist_mu.flatten(), postdist_sigma, 1)
fs[obs_index] = x_obs.flatten()
plt.plot(xs, fs,'k-',linewidth=1)
plt.show()
| 2.0625 | 2 |
eval_covid20cases_timm-regnetx_002_CoarseDropout.py | BrunoKrinski/segtool | 0 | 12793602 | import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_CoarseDropout.yml",
]
for l in ls:
os.system(l) | 1.554688 | 2 |
predict.py | cswin/CADA | 5 | 12793603 | import argparse
import numpy as np
from packaging import version
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
from PIL import Image
import matplotlib.pyplot as plt
import cv2
from skimage.transform import rotate
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils import data
from models.unet import UNet
from dataset.refuge import REFUGE
NUM_CLASSES = 3
NUM_STEPS = 512 # Number of images in the validation set.
RESTORE_FROM = '/home/charlietran/CADA_Tutorial/Model_Weights/Trial1/UNet1000_v18_weightedclass.pth'
SAVE_PATH = '/home/charlietran/CADA_Tutorial/result/Trial1/'
MODEL = 'Unet'
BATCH_SIZE = 1
is_polar = False #If need to transfer the image and labels to polar coordinates: MICCAI version is False
ROI_size = 700 #ROI size
from evaluation.evaluation_segmentation import *
print(RESTORE_FROM)
palette=[
255, 255, 255, # black background
128, 128, 128, # index 1 is red
0, 0, 0, # index 2 is yellow
0, 0 , 0 # index 3 is orange
]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Unet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice Unet.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--is_polar", type=bool, default=False,
help="If proceed images in polar coordinate. MICCAI version is false")
parser.add_argument("--ROI_size", type=int, default=460,
help="Size of ROI.")
parser.add_argument('--t', type=int, default=3, help='t for Recurrent step of R2U_Net or R2AttU_Net')
return parser.parse_args()
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
gpu0 = args.gpu
if not os.path.exists(args.save):
os.makedirs(args.save)
model = UNet(3, n_classes=args.num_classes)
saved_state_dict = torch.load(args.restore_from)
model.load_state_dict(saved_state_dict)
model.cuda(gpu0)
model.train()
testloader = data.DataLoader(REFUGE(False, domain='REFUGE_TEST', is_transform=True),
batch_size=args.batch_size, shuffle=False, pin_memory=True)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear')
for index, batch in enumerate(testloader):
if index % 100 == 0:
print('%d processd' % index)
image, label, _, _, name = batch
if args.model == 'Unet':
_,_,_,_, output2 = model(Variable(image, volatile=True).cuda(gpu0))
output = interp(output2).cpu().data.numpy()
for idx, one_name in enumerate(name):
pred = output[idx]
pred = pred.transpose(1,2,0)
pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
output_col = colorize_mask(pred)
print(output_col.size)
one_name = one_name.split('/')[-1]
output_col = output_col.convert('L')
output_col.save('%s/%s.bmp' % (args.save, one_name))
if __name__ == '__main__':
main()
results_folder = SAVE_PATH
gt_folder = '/DATA/charlie/AWC/CADA_Tutorial_Image/Target_Test/mask/'
output_path = results_folder
export_table = True
evaluate_segmentation_results(results_folder, gt_folder, output_path, export_table)
| 2.234375 | 2 |
paper2tmb/tests/test_manipulator.py | sotetsuk/paper2img | 1 | 12793604 | import os
import unittest
import subprocess
from paper2tmb.manipulator import Manipulator
class TestManipulator(unittest.TestCase):
def test_init(self):
with Manipulator('test.pdf') as m:
self.assertTrue(os.path.isdir(m.dirname))
def test_pdf2png(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png()
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_trim(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="100x100")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_density(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(density="20")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_both_trim_density(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="300x300", density="10")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_stack(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png()
m.stack(4, 2)
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack_row_0.png")))
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack_row_1.png")))
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "stack.png"))
def test_stack(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="100x60")
m.stack(6, 2)
m.resize("x400")
self.assertTrue(os.path.exists(os.path.join(m.dirname, "resize_x400.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "resize_x400.png"))
def test_top(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="400x240", density="300x300")
m.top("60%")
self.assertTrue(os.path.exists(os.path.join(m.dirname, "top_60%-0.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "top_60%-0.png"))
def test_out(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
target = "paper2tmb/tests/testdata/out.pdf"
m.out(target)
self.assertTrue(os.path.exists(target))
subprocess.call(["rm", target])
| 2.6875 | 3 |
py-simspark/effectors.py | edison-moreland/py-simspark | 2 | 12793605 | <gh_stars>1-10
# TODO(MESSAGES) Turn into actual classes the parse_preceptors can return
def message_factory(effector_string):
"""Makes messages easy to define"""
def message(**kwargs):
return effector_string.format(**kwargs)
return message
create = message_factory("(scene {filename})")
hinge_joint = message_factory("({name} {ax1})")
universal_joint = message_factory("({name {ax1} {ax2}})")
synchronize = message_factory("(syn)")
init = message_factory("(init (unum {playernumber}) (teamname {teamname}))")
beam = message_factory("(beam {x} {y} {rot})")
say = message_factory("(say {message})")
| 2.46875 | 2 |
analysis/example_utils.py | liuzh91/DEVELOP | 73 | 12793606 | from rdkit import Chem
def mol_with_atom_index(mol):
atoms = mol.GetNumAtoms()
tmp_mol = Chem.Mol(mol)
for idx in range(atoms):
tmp_mol.GetAtomWithIdx(idx).SetProp('molAtomMapNumber', str(tmp_mol.GetAtomWithIdx(idx).GetIdx()))
return tmp_mol
def unique_mols(sequence):
seen = set()
return [x for x in sequence if not (tuple(x) in seen or seen.add(tuple(x)))]
| 2.65625 | 3 |
external/frozendict.py | MPvHarmelen/MarkdownCiteCompletions | 0 | 12793607 | # The frozendict is originally available under the following license:
#
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import copy
_iteritems = getattr(dict, 'iteritems', dict.items) # py2-3 compatibility
class frozendict(collections.Mapping):
"""
An immutable wrapper around dictionaries that implements the complete
:py:class:`collections.Mapping` interface.
It can be used as a drop-in replacement for dictionaries where immutability
is desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
item = self._dict[key]
if isinstance(item, dict):
item = self._dict[key] = frozendict(**item)
elif isinstance(item, list):
item = self._dict[key] = tuple(item)
elif isinstance(item, set):
item = self._dict[key] = frozenset(item)
elif hasattr(item, '__dict__') or hasattr(item, '__slots__'):
return copy.copy(item)
return item
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
result.__dict__.update(dict((
(k, copy.deepcopy(v, memo)) for k, v in self.__dict__.items())))
return result
def __hash__(self):
if self._hash is None:
h = 0
for key, value in _iteritems(self._dict):
h ^= hash((key, value))
self._hash = h
return self._hash
| 1.96875 | 2 |
stream_alert_cli/manage_lambda/rollback.py | opsbay/streamalert | 0 | 12793608 | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from stream_alert_cli.logger import LOGGER_CLI
import boto3
from botocore.exceptions import ClientError
def _rollback_production(lambda_client, function_name):
"""Rollback the production alias for the given function name."""
version = lambda_client.get_alias(
FunctionName=function_name, Name='production')['FunctionVersion']
if version == '$LATEST':
# This won't happen with Terraform, but the alias could have been manually changed.
LOGGER_CLI.error('%s:production is pointing to $LATEST instead of a published version',
function_name)
return
current_version = int(version)
if current_version == 1:
LOGGER_CLI.warn('%s:production is already at version 1', function_name)
return
LOGGER_CLI.info('Rolling back %s:production from version %d => %d',
function_name, current_version, current_version - 1)
try:
lambda_client.update_alias(
FunctionName=function_name, Name='production', FunctionVersion=str(current_version - 1))
except ClientError:
LOGGER_CLI.exception('version not updated')
def rollback(options, config):
"""Rollback the current production Lambda version(s) by 1.
Args:
options: Argparse parsed options
config (dict): Parsed configuration from conf/
"""
rollback_all = 'all' in options.processor
prefix = config['global']['account']['prefix']
clusters = sorted(options.clusters or config.clusters())
client = boto3.client('lambda')
if rollback_all or 'alert' in options.processor:
_rollback_production(client, '{}_streamalert_alert_processor'.format(prefix))
if rollback_all or 'alert_merger' in options.processor:
_rollback_production(client, '{}_streamalert_alert_merger'.format(prefix))
if rollback_all or 'apps' in options.processor:
for cluster in clusters:
apps_config = config['clusters'][cluster]['modules'].get('stream_alert_apps', {})
for lambda_name in sorted(apps_config):
_rollback_production(client, lambda_name)
if rollback_all or 'athena' in options.processor:
_rollback_production(client, '{}_streamalert_athena_partition_refresh'.format(prefix))
if rollback_all or 'rule' in options.processor:
for cluster in clusters:
_rollback_production(client, '{}_{}_streamalert_rule_processor'.format(prefix, cluster))
if rollback_all or 'threat_intel_downloader' in options.processor:
_rollback_production(client, '{}_streamalert_threat_intel_downloader'.format(prefix))
| 1.820313 | 2 |
gtc-model-using-SCIP.py | mgorav/linear-programing | 1 | 12793609 | from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
def main():
solver = pywraplp.Solver.CreateSolver('SCIP')
infinity = solver.infinity()
# wrenches
wrenches = solver.IntVar(0.0, infinity, 'wrenches')
# pliers
pliers = solver.IntVar(0.0, infinity, 'pliers')
print('Number of variables =', solver.NumVariables())
# constraints
# steel
solver.Add(1.5 * wrenches + pliers <= 27000)
# molding
solver.Add(1.0 * wrenches + pliers <= 21000)
# assembly
solver.Add(0.3 * wrenches + 0.5 * pliers <= 9000)
# demand1
solver.Add(wrenches <= 15000)
# demand2
solver.Add(pliers <= 16000)
print('Number of constraints =', solver.NumConstraints())
# objective function
solver.Maximize(0.13 * wrenches + 0.10 * pliers)
status = solver.Solve()
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
print('Wrenches =', wrenches.solution_value())
print('Pliers =', pliers.solution_value())
print('Slack steel', (27000 - (1.5 * wrenches.solution_value() + pliers.solution_value())))
print('Slack molding', (21000 - (1.0 * wrenches.solution_value() + pliers.solution_value())))
print('Slack assembly',(9000 -(0.3 * wrenches.solution_value() + 0.5 * pliers.solution_value())))
print('Slack demand1',(15000 - wrenches.solution_value()))
print('Slack demand2',(16000 - pliers.solution_value()))
else:
print('The problem does not have an optimal solution.')
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
if __name__ == '__main__':
main() | 2.703125 | 3 |
urls.py | j-ollivier/sonov-main | 0 | 12793610 | from django.urls import path, include
from . import views
urlpatterns = [
path('accounts/', include('registration.backends.simple.urls')),
path('', views.FrontPage, name='FrontPage'),
path('tags', views.TagList, name='TagList'),
path('clips', views.ClipList, name='ClipList'),
path('playlist/<str:tag_title>', views.Playlist, name='Playlist'),
path('subscribe', views.Subscribe, name='Subscribe'),
path('upload', views.UploadSon, name='UploadSon'),
path('soundcloud_iframe/<str:soundcloud_id>', views.SoundcloudIframe),
path('youtube_iframe/<str:youtube_id>', views.YoutubeIframe),
path('vimeo_iframe/<str:vimeo_id>', views.VimeoIframe),
] | 1.71875 | 2 |
Data_Science/chatbotPreprocessing.py | BasilcM/Short_URL | 0 | 12793611 | # -*- coding: utf-8 -*-
import os
import json
import nltk
import gensim
import numpy as np
from gensim import corpora, models, similarities
import pickle
os.chdir("D:\semicolon\Deep Learning\chatbot");
model = gensim.models.Word2Vec.load('word2vec.bin');
path2="corpus";
file=open(path2+'/conversation.json');
data = json.load(file)
cor=data["conversations"];
x=[]
y=[]
path2="corpus";
for i in range(len(cor)):
for j in range(len(cor[i])):
if j<len(cor[i])-1:
x.append(cor[i][j]);
y.append(cor[i][j+1]);
tok_x=[]
tok_y=[]
for i in range(len(x)):
tok_x.append(nltk.word_tokenize(x[i].lower()))
tok_y.append(nltk.word_tokenize(y[i].lower()))
sentend=np.ones((300L,),dtype=np.float32)
vec_x=[]
for sent in tok_x:
sentvec = [model[w] for w in sent if w in model.vocab]
vec_x.append(sentvec)
vec_y=[]
for sent in tok_y:
sentvec = [model[w] for w in sent if w in model.vocab]
vec_y.append(sentvec)
for tok_sent in vec_x:
tok_sent[14:]=[]
tok_sent.append(sentend)
for tok_sent in vec_x:
if len(tok_sent)<15:
for i in range(15-len(tok_sent)):
tok_sent.append(sentend)
for tok_sent in vec_y:
tok_sent[14:]=[]
tok_sent.append(sentend)
for tok_sent in vec_y:
if len(tok_sent)<15:
for i in range(15-len(tok_sent)):
tok_sent.append(sentend)
with open('conversation.pickle','w') as f:
pickle.dump([vec_x,vec_y],f)
| 2.5 | 2 |
itg-tests/es-it/TraceContainers.py | Hemankita/refarch-kc | 0 | 12793612 | '''
Trace container events to validate, events are published
'''
import sys,os
import time,json
import signal,asyncio
from confluent_kafka import KafkaError, Consumer
try:
KAFKA_BROKERS = os.environ['KAFKA_BROKERS']
except KeyError:
print("The KAFKA_BROKERS environment variable needs to be set.")
exit
try:
KAFKA_APIKEY = os.environ['KAFKA_APIKEY']
except KeyError:
print("The KAFKA_APIKEY environment variable not set... assume local deployment")
TOPIC_NAME = "containers"
def parseArguments():
if len(sys.argv) <= 1:
print("Set the number of container ID to send and the event type")
NB_EVENTS = int(sys.argv[1])
EVT_TYPE = sys.argv[2]
print("The arguments are: " , str(sys.argv))
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
def prepareProducer():
producer_options = {
'bootstrap.servers': KAFKA_BROKERS,
'security.protocol': 'SASL_SSL',
'ssl.ca.location': '/etc/ssl/certs',
'sasl.mechanisms': 'PLAIN',
'sasl.username': 'token',
'sasl.password': <PASSWORD>,
'api.version.request': True,
'broker.version.fallback': '0.10.2.1',
'log.connection.close' : False,
'client.id': 'kafka-python-container-test-producer',
}
return Producer( producer_options)
def sendContainerEvent(producer,eventType,idx):
for i in range(0,idx,1):
cid = "c_" + str(i)
data = {"timestamp": int(time.time()),
"type": eventType,
"version":"1",
"containerID": cid,
"payload": {"containerID": cid,
"type": "Reefer",
"status": "atDock",
"city": "Oakland",
"brand": "brand-reefer",
"capacity": 100}}
dataStr = json.dumps(data)
producer.produce(TOPIC_NAME,dataStr.encode('utf-8'), callback=delivery_report)
producer.flush()
if __name__ == '__main__':
parseArguments()
producer=prepareProducer()
sendContainerEvent(producer,EVT_TYPE,NB_EVENTS) | 2.328125 | 2 |
scrapers/competitor_prices/models.py | vlandham/social_shopper | 0 | 12793613 | <filename>scrapers/competitor_prices/models.py
from sqlalchemy import create_engine, Column, Integer, String, Numeric
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
import settings
DeclarativeBase = declarative_base()
def create_competitor_prices_table(engine):
DeclarativeBase.metadata.create_all(engine)
def db_connect():
"""
Performs database connection using settings from settings.py.
Returns sqlalchemy engine instance.
"""
return create_engine(URL(**settings.DATABASE))
class CompetitorPrices(DeclarativeBase):
"""
sqlalchemy competitor_prices model
"""
__tablename__ = "competitor_prices"
product_id = Column(Integer, primary_key = True)
product_name = Column('product_name', String, nullable = True)
brand = Column('brand', String, nullable = True)
price_high = Column('price_high', Numeric(10, 2), nullable = True)
price_low = Column('price_low', Numeric(10, 2), nullable = True)
| 2.890625 | 3 |
winery/dead_seg.py | H-B-P/DURKON | 0 | 12793614 | import pandas as pd
import numpy as np
import math
import util
def gimme_pseudo_winsors(inputDf, col, pw=0.05):
return util.round_to_sf(inputDf[col].quantile(pw),3), util.round_to_sf(inputDf[col].quantile(1-pw),3)
def gimme_starting_affect(inputDf, col, segs):
x = inputDf[col]
x1 = float(segs[0])
x2 = float(segs[1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x<x1)] = 1
affectedness.loc[(x>=x1) & (x<x2)] = (x2 - x)/(x2 - x1)
return sum(affectedness)
def gimme_normie_affect(inputDf, col, segs, posn):
x = inputDf[col]
x1 = float(segs[posn-1])
x2 = float(segs[posn])
x3 = float(segs[posn+1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x>=x1) & (x<x2)] = (x - x1)/(x2 - x1)
affectedness.loc[(x>=x2) & (x<x3)] = (x3 - x)/(x3 - x2)
return sum(affectedness)
def gimme_ending_affect(inputDf, col, segs):
x = inputDf[col]
x1 = float(segs[-2])
x2 = float(segs[-1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x>=x2)] = 1
affectedness.loc[(x>=x1) & (x<x2)] = (x - x1)/(x2 - x1)
return sum(affectedness)
def gimme_sa_optimizing_func(inputDf, col, segsSoFar):
def sa_optimizing_func(x):
return gimme_starting_affect(inputDf, col, segsSoFar+[x])
return sa_optimizing_func
def gimme_na_optimizing_func(inputDf, col, segsSoFar):
def na_optimizing_func(x):
return gimme_normie_affect(inputDf, col, segsSoFar+[x], len(segsSoFar)-1)
return na_optimizing_func
def gimme_pa_optimizing_func(inputDf, col, segsSoFar, end):
def pa_optimizing_func(x):
return gimme_normie_affect(inputDf, col, segsSoFar+[x]+[end], len(segsSoFar))
return pa_optimizing_func
if __name__ == "__main__":
dyct = {"x":list(range(100))}
df=pd.DataFrame(dyct)
start, end = gimme_pseudo_winsors(df, "x")
print(start, end)
targetLen=5
goodAmt=float(len(df))/targetLen
segs = [start]
print(segs)
if targetLen>2:
optFunc = gimme_sa_optimizing_func(df, "x", segs)
next = util.target_input_with_output(optFunc, goodAmt, start, end)
segs.append(util.round_to_sf(next,3))
print(segs)
for i in range(targetLen-3):
optFunc = gimme_na_optimizing_func(df, "x", segs)
next = util.target_input_with_output(optFunc, goodAmt, start, end)
segs.append(util.round_to_sf(next,3))
print(segs)
segs.append(end)
print(segs)
print([gimme_starting_affect(df, "x", segs), gimme_normie_affect(df, "x", segs, 1), gimme_normie_affect(df, "x", segs, 2), gimme_normie_affect(df, "x", segs, 3), gimme_ending_affect(df, "x", segs)])
| 2.65625 | 3 |
dsf_utils/evaluation.py | ltsaprounis/dsf-ts-forecasting | 18 | 12793615 | <reponame>ltsaprounis/dsf-ts-forecasting
"""Evaluation Functions"""
import pandas as pd
import numpy as np
from sktime.forecasting.model_evaluation import evaluate
from sktime.forecasting.model_selection import (
CutoffSplitter,
SlidingWindowSplitter,
ExpandingWindowSplitter,
SingleWindowSplitter,
)
from typing import Union
from IPython.display import display
from copy import deepcopy
def evaluate_forecasters_on_cutoffs(
time_series: pd.Series,
cutoffs: list,
forecasters_dict: dict,
metrics_dict: dict,
fh: np.array = np.arange(3) + 1,
window_length: int = 5 * 52,
) -> pd.DataFrame:
_df_list = []
for cutoff in cutoffs:
_ts = time_series.copy()
for fcaster_name, forecaster in forecasters_dict.items():
for metric_name, metric in metrics_dict.items():
_forecaster = deepcopy(forecaster)
cv = CutoffSplitter(
cutoffs=np.array([cutoff]),
fh=fh,
window_length=window_length,
)
_df = evaluate(
forecaster=_forecaster,
y=_ts,
cv=cv,
strategy="refit",
return_data=True,
scoring=metric,
)
_df["Forecaster"] = fcaster_name
_df["Metric"] = metric_name
_df = _df.rename(columns={f"test_{metric.name}": "Score"})
_df_list.append(_df)
return pd.concat(_df_list)
def evaluate_forecasters(
time_series: pd.Series,
cv: Union[
CutoffSplitter,
SlidingWindowSplitter,
ExpandingWindowSplitter,
SingleWindowSplitter,
],
forecasters_dict: dict,
metrics_dict: dict,
) -> pd.DataFrame:
_df_list = []
_ts = time_series.copy()
for fcaster_name, forecaster in forecasters_dict.items():
for metric_name, metric in metrics_dict.items():
_forecaster = deepcopy(forecaster)
cv = deepcopy(cv)
_df = evaluate(
forecaster=_forecaster,
y=_ts,
cv=cv,
strategy="refit",
return_data=True,
scoring=metric,
)
_df["Forecaster"] = fcaster_name
_df["Metric"] = metric_name
_df = _df.rename(columns={f"test_{metric.name}": "Score"})
_df_list.append(_df)
return pd.concat(_df_list)
def display_results(df, axis=0):
results = df.groupby(["Forecaster", "Metric"], as_index=False)["Score"].mean()
results = results.pivot(index="Forecaster", columns="Metric", values="Score")
def highlight_min(s, props=""):
return np.where(s == np.nanmin(s.values), props, "")
results = results.applymap("{:,.2f}".format).style.apply(
highlight_min, props="color:white;background-color:purple", axis=axis
)
display(results)
def evaluate_panel_forecaster_on_cutoffs(
panel_df: pd.DataFrame,
cutoffs: list,
forecaster,
metric,
fh: np.array = np.arange(3) + 1,
window_length: int = 5 * 52,
freq="W-SUN",
ts_id_col="REGION",
target="ILITOTAL",
) -> pd.DataFrame:
_panel_df = panel_df.copy()
_panel_df = _panel_df.sort_values(by=[ts_id_col]).sort_index()
ts_list = list(panel_df[ts_id_col].unique())
results = pd.DataFrame()
for cutoff in cutoffs:
_forecaster = deepcopy(forecaster)
cutoff = pd.Period(cutoff, freq=freq) + 1
train_df = _panel_df[
(_panel_df.index <= cutoff) & (_panel_df.index > cutoff - window_length)
].sort_index()
min_test_date = cutoff + int(np.min(fh))
max_test_date = cutoff + int(np.max(fh))
test_df = _panel_df[
(_panel_df.index >= min_test_date) & (_panel_df.index <= max_test_date)
]
# if forecaster doesn't need fh in fit fh will be ignored.
_forecaster.fit(train_df, fh=fh)
pred_df = _forecaster.predict(fh=fh)
# loop over regions to get region level metrics and y_preds
for ts in ts_list:
_pred = pred_df[pred_df[ts_id_col] == ts]["y_pred"]
_test = test_df[test_df[ts_id_col] == ts][target]
_train = train_df[train_df[ts_id_col] == ts][target].sort_index()
score = metric(y_true=_test, y_pred=_pred, y_train=_train)
results = results.append(
{
ts_id_col: ts,
"cutoff": cutoff,
"Metric": metric.name,
"Score": score,
"y_test": _test,
"y_pred": _pred,
},
ignore_index=True,
)
return results
| 2.625 | 3 |
app/core/tests/test_models.py | georgecretu26/recipe-app-api | 0 | 12793616 | <reponame>georgecretu26/recipe-app-api
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating new user with an email is successful"""
email = "<EMAIL>"
password = "<PASSWORD>"
user = get_user_model().objects.create_user(
email=email,
password=password,
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password, password)
def test_new_user_email_normalized(self):
"""test the email for a new user is normilized"""
email = '<EMAIL>'
user = get_user_model().objects.create_user(
email=email,
)
self.assertEqual(user.email, email.lower())
def test_new_user_email_invalid(self):
"""test if the email is invalid"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'asdqw12')
def test_create_new_super_user(self):
"""Test create new super user"""
user = get_user_model().objects.create_superuser(
'<EMAIL>',
'test123',
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 2.953125 | 3 |
project/20-custom-training-loops.py | marknhenry/tf_starter_kit | 0 | 12793617 | <reponame>marknhenry/tf_starter_kit
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.datasets import mnist
import tensorflow_datasets as tfds
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, ReLU
from utils import style
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # change to 2
os.system('clear')
print(style.YELLOW + f'Tensorflow version: {tf.__version__}\n')
print(style.GREEN, end='')
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
return tf.cast(image, tf.float32)/255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
# Setting up training dataset
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
# Setting up test dataset
ds_test = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_train.batch(BATCH_SIZE)
ds_test = ds_train.prefetch(AUTOTUNE)
# Building the Model
model = keras.Sequential(
[
Input((28, 28, 1)),
Conv2D(32, 3, activation='relu'),
Flatten(),
Dense(10, activation='softmax'),
]
)
print(style.GREEN, end='')
num_epochs = 5
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(learning_rate=3e-4)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
for epoch in range(num_epochs):
print(f'\nStart of Training Epoch {epoch}')
for batch_idx, (x_batch, y_batch) in enumerate(ds_train):
with tf.GradientTape() as tape:
y_pred = model(x_batch, training=True)
loss = loss_fn(y_batch, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
acc_metric.update_state(y_batch, y_pred)
train_acc = acc_metric.result()
print(f'Accuracy over epoch {train_acc}')
acc_metric.reset_states()
for batch_idx, (x_batch, y_batch) in enumerate(ds_test):
y_pred = model(x_batch, training=False)
acc_metric.update_state(y_batch, y_pred)
train_acc = acc_metric.result()
print(f'Accuracy over test set: {train_acc}')
acc_metric.reset_states()
| 2.609375 | 3 |
13_multiprocessing/05_remote_server.py | varshashivhare/Mastering-Python | 30 | 12793618 | <filename>13_multiprocessing/05_remote_server.py
constants = __import__('05_remote_processor')
import multiprocessing
from multiprocessing import managers
queue = multiprocessing.Queue()
manager = managers.BaseManager(address=('', constants.port),
authkey=constants.password)
manager.register('queue', callable=lambda: queue)
manager.register('primes', callable=constants.primes)
server = manager.get_server()
server.serve_forever()
| 2.09375 | 2 |
catkin_ws_assignments/src/week2/src/Scripts/surname.py | ritvik506/Robotics-Automation-QSTP-2021 | 0 | 12793619 | <gh_stars>0
#!/usr/bin/env python2
import rospy
from std_msgs.msg import String
rospy.init_node("surname")
pub=rospy.Publisher("surname",String)
rate=rospy.Rate(3)
surname="Puranik"
while not rospy.is_shutdown():
pub.publish(surname)
rate.sleep() | 2.359375 | 2 |
threedod/benchmark_scripts/utils/box_utils.py | Levintsky/ARKitScenes | 237 | 12793620 | # TODO: Explain 8 corners logic at the top and use it consistently
# Add comments of explanation
import numpy as np
import scipy.spatial
from .rotation import rotate_points_along_z
def get_size(box):
"""
Args:
box: 8x3
Returns:
size: [dx, dy, dz]
"""
distance = scipy.spatial.distance.cdist(box[0:1, :], box[1:5, :])
l = distance[0, 2]
w = distance[0, 0]
h = distance[0, 3]
return [l, w, h]
def get_heading_angle(box):
"""
Args:
box: (8, 3)
Returns:
heading_angle: float
"""
a = box[0, 0] - box[1, 0]
b = box[0, 1] - box[1, 1]
heading_angle = np.arctan2(a, b)
return heading_angle
def compute_box_3d(size, center, rotmat):
"""Compute corners of a single box from rotation matrix
Args:
size: list of float [dx, dy, dz]
center: np.array [x, y, z]
rotmat: np.array (3, 3)
Returns:
corners: (8, 3)
"""
l, h, w = [i / 2 for i in size]
center = np.reshape(center, (-1, 3))
center = center.reshape(3)
x_corners = [l, l, -l, -l, l, l, -l, -l]
y_corners = [h, -h, -h, h, h, -h, -h, h]
z_corners = [w, w, w, w, -w, -w, -w, -w]
corners_3d = np.dot(
np.transpose(rotmat), np.vstack([x_corners, y_corners, z_corners])
)
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
def corners_to_boxes(corners3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
corners: (N, 8, 3), vertex order shown in figure above
Returns:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading]
with (x, y, z) is the box center
(dx, dy, dz) as the box size
and heading as the clockwise rotation angle
"""
boxes3d = np.zeros((corners3d.shape[0], 7))
for i in range(corners3d.shape[0]):
boxes3d[i, :3] = np.mean(corners3d[i, :, :], axis=0)
boxes3d[i, 3:6] = get_size(corners3d[i, :, :])
boxes3d[i, 6] = get_heading_angle(corners3d[i, :, :])
return boxes3d
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading],
(x, y, z) is the box center
Returns:
corners: (N, 8, 3)
"""
template = np.array([[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1]]
) / 2.
# corners3d: of shape (N, 3, 8)
corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3), boxes3d[:, 6]).reshape(
-1, 8, 3
)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def points_in_boxes(points, boxes):
"""
Args:
pc: np.array (n, 3+d)
boxes: np.array (m, 8, 3)
Returns:
mask: np.array (n, m) of type bool
"""
if len(boxes) == 0:
return np.zeros([points.shape[0], 1], dtype=np.bool)
points = points[:, :3] # get xyz
# u = p6 - p5
u = boxes[:, 6, :] - boxes[:, 5, :] # (m, 3)
# v = p6 - p7
v = boxes[:, 6, :] - boxes[:, 7, :] # (m, 3)
# w = p6 - p2
w = boxes[:, 6, :] - boxes[:, 2, :] # (m, 3)
# ux, vx, wx
ux = np.matmul(points, u.T) # (n, m)
vx = np.matmul(points, v.T)
wx = np.matmul(points, w.T)
# up6, up5, vp6, vp7, wp6, wp2
up6 = np.sum(u * boxes[:, 6, :], axis=1)
up5 = np.sum(u * boxes[:, 5, :], axis=1)
vp6 = np.sum(v * boxes[:, 6, :], axis=1)
vp7 = np.sum(v * boxes[:, 7, :], axis=1)
wp6 = np.sum(w * boxes[:, 6, :], axis=1)
wp2 = np.sum(w * boxes[:, 2, :], axis=1)
mask_u = np.logical_and(ux <= up6, ux >= up5) # (1024, n)
mask_v = np.logical_and(vx <= vp6, vx >= vp7)
mask_w = np.logical_and(wx <= wp6, wx >= wp2)
mask = mask_u & mask_v & mask_w # (10240, n)
return mask
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return (outputList)
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = scipy.spatial.ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[:,2].max(), corners2[:,2].max())
ymin = max(corners1[:,2].min(), corners2[:,2].min())
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou | 3.8125 | 4 |
merc/features/rfc1459/motd.py | merc-devel/merc | 4 | 12793621 | from merc import config
from merc import feature
from merc import message
class MotdFeature(feature.Feature):
NAME = __name__
CONFIG_SECTION = 'motd'
install = MotdFeature.install
@MotdFeature.register_config_checker
def check_config(section):
return config.validate(section, str)
class MotdReply(message.Reply):
NAME = "372"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, line, *args):
self.line = line
def as_reply_params(self):
return [self.line]
class MotdStart(message.Reply):
NAME = "375"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, reason, *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
class EndOfMotd(message.Reply):
NAME = "376"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, reason="End of /MOTD command", *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
@MotdFeature.register_user_command
class Motd(message.Command):
NAME = "MOTD"
MIN_ARITY = 0
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
motd = app.features.get_config_section(__name__)
user.send_reply(MotdStart(
"- {} Message of the Day".format(app.server.name)))
for line in motd.splitlines():
user.send_reply(MotdReply("- " + line))
user.send_reply(EndOfMotd())
@MotdFeature.hook("user.welcome")
def send_motd_on_welcome(app, user):
user.on_message(app, user.hostmask, Motd())
| 2.3125 | 2 |
app.py | jessicagtz/Project-2-Chicago-Communities | 0 | 12793622 | <reponame>jessicagtz/Project-2-Chicago-Communities
import datetime as dt
import numpy as np
import pandas as pd
from flask import (
Flask,
render_template,
jsonify,
request,
redirect)
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///chi_db.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Communities = Base.classes.comm_names
Neighborhoods = Base.classes.neighborhoods
Twitter = Base.classes.twitter
Population = Base.classes.population
Race = Base.classes.race
Crime = Base.classes.crime
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Routes
#################################################
@app.route("/")
def index():
#render the index template
return render_template("index.html")
@app.route("/dash/<ID>")
def dash(ID):
# query for the community name based off of ID selected
names = session.query(Communities).filter(Communities.id == ID)
all_names = []
for name in names:
comm_dict = {}
comm_dict["name"] = name.community
all_names.append(comm_dict)
name = jsonify(all_names)
# query for community twitter handle based off of ID
twitters = session.query(Twitter).filter(Twitter.id == ID)
handles = []
for handle in twitters:
handle_dict = {}
handle_dict["handle"] = handle.twitter_handle
handles.append(handle_dict)
twitter_handle = jsonify(handles)
# query the population data for chart based off of ID
results = session.query(Population).filter(Population.id == ID)
totals = session.query(Population).filter(Population.id == 78)
pop = []
for population in results:
pop_dict = {}
pop_dict["ID"] = population.id
pop_dict["_1930"] = population._1930
pop_dict["_1940"] = population._1940
pop_dict["_1950"] = population._1950
pop_dict["_1960"] = population._1960
pop_dict["_1970"] = population._1970
pop_dict["_1980"] = population._1980
pop_dict["_1990"] = population._1990
pop_dict["_2000"] = population._2000
pop_dict["_2010"] = population._2010
pop_dict["_2015"] = population._2015
for total in totals:
pop_dict["all_1930"] = total._1930
pop_dict["all_1940"] = total._1940
pop_dict["all_1950"] = total._1950
pop_dict["all_1960"] = total._1960
pop_dict["all_1970"] = total._1970
pop_dict["all_1980"] = total._1980
pop_dict["all_1990"] = total._1990
pop_dict["all_2000"] = total._2000
pop_dict["all_2010"] = total._2010
pop_dict["all_2015"] = total._2015
pop.append(pop_dict)
population_data = jsonify(pop)
# query the race data for chart based off of ID
demographics = session.query(Race).filter(Race.id == ID)
race = []
for demo in demographics:
demo_dict = {}
demo_dict["ID"] = demo.id
demo_dict["asian2015"] = demo.asian2015
demo_dict["black2015"] = demo.black2015
demo_dict["hispanic2015"] = demo.hispanic2015
demo_dict["other2015"] = demo.other2015
demo_dict["white2015"] = demo.white2015
race.append(demo_dict)
race_data = jsonify(race)
# query the neighborhood data for chart based off of ID
neighborhoods = session.query(Neighborhoods).filter(Neighborhoods.ID ==ID)
all_neighborhoods = []
for hood in neighborhoods:
hood_dict = {}
hood_dict["ID"] = hood.ID
hood_dict["Neighborhoods"] = hood.neighborhoods
all_neighborhoods.append(hood_dict)
neighborhood_data = jsonify(all_neighborhoods)
#query the crime data for chart based off of ID
crimes = session.query(Crime).filter(Crime.id == ID)
crime_data = []
for crime in crimes:
crime_dict = {}
crime_dict["ID"] = crime.id
crime_dict["battery"] = crime.battery
crime_dict["deceptive_practice"] = crime.deceptive_practice
crime_dict["homicide"] = crime.homicide
crime_dict["narcotics"] = crime.narcotics
crime_dict["non_criminal"] = crime.non_criminal
crime_dict["sexual"] = crime.sexual
crime_dict["theft"] = crime.theft
crime_data.append(crime_dict)
crimes2017 = jsonify(crime_data)
# render the template
return render_template("dashboard.html", comm_dict=comm_dict, handle_dict=handle_dict, pop_dict=pop_dict, demo_dict=demo_dict, hood_dict=hood_dict, crime_dict=crime_dict)
@app.route("/twitter/<ID>")
def twitter(ID):
twitters = session.query(Twitter).filter(Twitter.id == ID)
handles = []
for handle in twitters:
handle_dict = {}
handle_dict["handle"] = handle.twitter_handle
handles.append(handle_dict)
twitter_handle = jsonify(handles)
return twitter_handle
@app.route("/names/<ID>")
def names(ID):
results = session.query(Communities).filter(Communities.id == ID)
all_communities = []
for comm in results:
comm_dict = {}
comm_dict["ID"] = comm.id
comm_dict["Name"] = comm.community
all_communities.append(comm_dict)
return jsonify(all_communities)
@app.route("/hoods/<ID>")
def hoods(ID):
results = session.query(Neighborhoods).filter(Neighborhoods.ID ==ID)
all_neighborhoods = []
for hood in results:
hood_dict = {}
hood_dict["ID"] = hood.ID
hood_dict["Neighborhoods"] = hood.neighborhoods
all_neighborhoods.append(hood_dict)
return jsonify(all_neighborhoods)
@app.route("/pop/<ID>")
def pop(ID):
results = session.query(Population).filter(Population.id == ID)
totals = session.query(Population).filter(Population.id == 78)
pop = []
for population in results:
pop_dict = {}
pop_dict["ID"] = population.id
pop_dict["1930"] = population._1930
pop_dict["1940"] = population._1940
pop_dict["1950"] = population._1950
pop_dict["1960"] = population._1960
pop_dict["1970"] = population._1970
pop_dict["1980"] = population._1980
pop_dict["1990"] = population._1990
pop_dict["2000"] = population._2000
pop_dict["2010"] = population._2010
pop_dict["2015"] = population._2015
for total in totals:
pop_dict["all_1930"] = total._1930
pop_dict["all_1940"] = total._1940
pop_dict["all_1950"] = total._1950
pop_dict["all_1960"] = total._1960
pop_dict["all_1970"] = total._1970
pop_dict["all_1980"] = total._1980
pop_dict["all_1990"] = total._1990
pop_dict["all_2000"] = total._2000
pop_dict["all_2010"] = total._2010
pop_dict["all_2015"] = total._2015
pop.append(pop_dict)
return jsonify(pop)
@app.route("/race/<ID>")
def race(ID):
demographics = session.query(Race).filter(Race.id == ID)
race = []
for demo in demographics:
demo_dict = {}
demo_dict["ID"] = demo.id
demo_dict["asian2015"] = demo.asian2015
demo_dict["black2015"] = demo.black2015
demo_dict["hispanic2015"] = demo.hispanic2015
demo_dict["other2015"] = demo.other2015
demo_dict["white2015"] = demo.white2015
race.append(demo_dict)
return jsonify(race)
@app.route("/crime/<ID>")
def crime(ID):
crimes = session.query(Crime).filter(Crime.id == ID)
crime_data = []
for crime in crimes:
crime_dict = {}
crime_dict["ID"] = crime.id
crime_dict["battery"] = crime.battery
crime_dict["deceptive_practice"] = crime.deceptive_practice
crime_dict["homicide"] = crime.homicide
crime_dict["narcotics"] = crime.narcotics
crime_dict["non_criminal"] = crime.non_criminal
crime_dict["sexual"] = crime.sexual
crime_dict["theft"] = crime.theft
crime_data.append(crime_dict)
return jsonify(crime_data)
@app.route("/crime")
def crimes():
return render_template("crime.html")
@app.route("/about")
def about():
return("Chicago Community Project: "
"<NAME>, <NAME>, <NAME>, <NAME>, <NAME>")
if __name__ == '__main__':
app.run(debug=True)
| 2.96875 | 3 |
src/launch/scenario_simulator_launch/launch/autoware_auto_perception.launch.py | ruvus/auto | 19 | 12793623 | # Copyright 2021 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_index_python import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import os
def generate_launch_description():
"""
Launch perception nodes.
* euclidean_cluster
* off_map_obstacles_filter
* ray_ground_classifier
"""
autoware_auto_launch_pkg_prefix = get_package_share_directory(
'autoware_auto_launch')
euclidean_cluster_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/euclidean_cluster.param.yaml')
off_map_obstacles_filter_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml')
ray_ground_classifier_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/ray_ground_classifier.param.yaml')
# Arguments
with_obstacles_param = DeclareLaunchArgument(
'with_obstacles',
default_value='True',
description='Enable obstacle detection'
)
euclidean_cluster_param = DeclareLaunchArgument(
'euclidean_cluster_param_file',
default_value=euclidean_cluster_param_file,
description='Path to config file for Euclidean Clustering'
)
off_map_obstacles_filter_param = DeclareLaunchArgument(
'off_map_obstacles_filter_param_file',
default_value=off_map_obstacles_filter_param_file,
description='Path to parameter file for off-map obstacle filter'
)
ray_ground_classifier_param = DeclareLaunchArgument(
'ray_ground_classifier_param_file',
default_value=ray_ground_classifier_param_file,
description='Path to config file for Ray Ground Classifier'
)
# Nodes
euclidean_clustering = Node(
package='euclidean_cluster_nodes',
executable='euclidean_cluster_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('euclidean_cluster_param_file')],
remappings=[
("points_in", "points_nonground")
]
)
off_map_obstacles_filter = Node(
package='off_map_obstacles_filter_nodes',
name='off_map_obstacles_filter_node',
namespace='perception',
executable='off_map_obstacles_filter_nodes_exe',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')],
output='screen',
remappings=[
('bounding_boxes_in', 'lidar_bounding_boxes'),
('bounding_boxes_out', 'lidar_bounding_boxes_filtered'),
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
]
)
ray_ground_classifier = Node(
package='ray_ground_classifier_nodes',
executable='ray_ground_classifier_cloud_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('ray_ground_classifier_param_file')],
remappings=[("points_in", "/lidars/points_fused")]
)
return LaunchDescription([
euclidean_cluster_param,
ray_ground_classifier_param,
with_obstacles_param,
off_map_obstacles_filter_param,
euclidean_clustering,
ray_ground_classifier,
off_map_obstacles_filter,
])
| 1.90625 | 2 |
peon/src/lint/principles/definition/no_public_methods_without_a_contract_interface.py | roch1990/peon | 32 | 12793624 | """Nothing to do here...."""
| 1.070313 | 1 |
AlphaPose/Alphapose.py | Nadern96/Realtime-Action-Recognition | 0 | 12793625 | <reponame>Nadern96/Realtime-Action-Recognition
import os
import json
path = r"../data/source_images3"
# f= open("../data_proc/raw_skeletons/skeletons_info.txt", 'w+')
count = 0
couldRename = 0
Classes = {'clap':1,
'hit':2,
'jump':3,
'kick':4,
'punch':5,
'push':6,
'run':7,
'shake':8,
'sit':9,
'situp':10,
'stand':11,
'turn':12,
'walk':13,
'wave':14,
}
imagecount = 1
for subdir, dirs, files in os.walk(path,topdown=True):
dirs.sort()
for dir in dirs:
try:
FullList = []
if str(dir).endswith('.json'):
continue
count += 1
print("proccessing file#" + str(count))
print(dir)
pathtofile = os.path.join(subdir,dir)
command = "python3 /home/mina_atef0/Desktop/AlphaPose/demo.py --indir {} --outdir {} --detbatch 4 ".format(pathtofile,subdir)
os.system(command)
with open('data_proc/alphapose-results.json') as f:
items = json.load(f)
for item in items:
itemList = []
class_name = dir.split('_')[0]
itemList.append(Classes[class_name])
itemList.append(count)
itemList.append(int(item['image_id'][:5]) + 1)
itemList.append(class_name)
itemList.append(dir + '/' + item['image_id'])
itemList = itemList + item['keypoints']
FullList.append(itemList)
FullList = sorted(FullList, key= lambda x: x[2])
with open('../data_proc/raw_skeletons/skeletons_info/'+dir + '.txt', 'w+') as outfile:
json.dump(FullList, outfile)
except:
pass
print("couldn't rename " +str(couldRename) )
print("made " +str(count) ) | 2.421875 | 2 |
src/fal/cli/fal_runner.py | emekdahl/fal | 360 | 12793626 | <gh_stars>100-1000
import argparse
from pathlib import Path
from typing import Any, Dict, List
import os
from dbt.config.profile import DEFAULT_PROFILES_DIR
from fal.run_scripts import raise_for_run_results_failures, run_scripts
from fal.fal_script import FalScript
from faldbt.project import DbtModel, FalDbt, FalGeneralException
def create_fal_dbt(args: argparse.Namespace):
real_project_dir = os.path.realpath(os.path.normpath(args.project_dir))
real_profiles_dir = None
env_profiles_dir = os.getenv("DBT_PROFILES_DIR")
if args.profiles_dir is not None:
real_profiles_dir = os.path.realpath(os.path.normpath(args.profiles_dir))
elif env_profiles_dir:
real_profiles_dir = os.path.realpath(os.path.normpath(env_profiles_dir))
else:
real_profiles_dir = DEFAULT_PROFILES_DIR
if hasattr(args, "state") and args.state is not None:
real_state = Path(os.path.realpath(os.path.normpath(args.state)))
else:
real_state = None
return FalDbt(
real_project_dir,
real_profiles_dir,
args.select,
args.exclude,
args.selector,
args.keyword,
args.threads,
real_state,
args.target,
)
def fal_run(args: argparse.Namespace):
"Runs the fal run command in a subprocess"
selector_flags = args.select or args.exclude or args.selector
if args.all and selector_flags:
raise FalGeneralException(
"Cannot pass --all flag alongside selection flags (--select/--models, --exclude, --selector)"
)
faldbt = create_fal_dbt(args)
models = _get_filtered_models(faldbt, args.all, selector_flags, args.before)
scripts = _select_scripts(args, models, faldbt)
if args.before:
if not _scripts_flag(args):
# run globals when no --script is passed
_run_global_scripts(faldbt, args.before)
results = run_scripts(scripts, faldbt)
raise_for_run_results_failures(scripts, results)
else:
results = run_scripts(scripts, faldbt)
raise_for_run_results_failures(scripts, results)
if not _scripts_flag(args):
# run globals when no --script is passed
_run_global_scripts(faldbt, args.before)
def _scripts_flag(args: argparse.Namespace) -> bool:
return bool(args.scripts)
def _select_scripts(
args: argparse.Namespace, models: List[DbtModel], faldbt: FalDbt
) -> List[FalScript]:
scripts = []
scripts_flag = _scripts_flag(args)
for model in models:
model_scripts = model.get_scripts(args.keyword, bool(args.before))
for path in model_scripts:
if not scripts_flag:
# run all scripts when no --script is passed
scripts.append(FalScript(faldbt, model, path))
elif path in args.scripts:
# if --script selector is there only run selected scripts
scripts.append(FalScript(faldbt, model, path))
return scripts
def _run_global_scripts(faldbt: FalDbt, is_before: bool):
global_scripts = list(
map(
lambda path: FalScript(faldbt, None, path),
faldbt._global_script_paths["before" if is_before else "after"],
)
)
results = run_scripts(global_scripts, faldbt)
raise_for_run_results_failures(global_scripts, results)
def _get_models_with_keyword(faldbt: FalDbt) -> List[DbtModel]:
return list(
filter(lambda model: faldbt.keyword in model.meta, faldbt.list_models())
)
def _get_filtered_models(faldbt: FalDbt, all, selected, before) -> List[DbtModel]:
selected_ids = _models_ids(faldbt._compile_task._flattened_nodes)
filtered_models: List[DbtModel] = []
if (
not all
and not selected
and not before
and faldbt._run_results.nativeRunResult is None
):
from faldbt.parse import FalParseError
raise FalParseError(
"Cannot define models to run without selection flags or dbt run_results artifact or --before flag"
)
models = _get_models_with_keyword(faldbt)
for node in models:
if selected:
if node.unique_id in selected_ids:
filtered_models.append(node)
elif before:
if node.get_scripts(faldbt.keyword, before) != []:
filtered_models.append(node)
elif all:
filtered_models.append(node)
elif node.status != "skipped":
filtered_models.append(node)
return filtered_models
def _models_ids(models):
return list(map(lambda r: r.unique_id, models))
| 2.078125 | 2 |
Compute resonances/calc_cxroots.py | zmoitier/Asymptotic_metacavity | 0 | 12793627 | """ Compute resonances using the cxroots library (contour integration techniques)
Authors: <NAME>, <NAME>
Karlsruhe Institute of Technology, Germany
University of California, Merced
Last modified: 20/04/2021
"""
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
from cxroots import AnnulusSector, Circle
from scipy.special import h1vp, hankel1, iv, ivp
## Entries ##
ε = float(argv[1]) # For example -1.1 + 1e-2 * 1j
η = np.sqrt(-ε)
print(f"η = {η}")
c = η + 1 / η
## Internal functions ##
def rootsAnnSec(m, rMin, rMax, aMin, aMax):
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
A = AnnulusSector(center=0.0, radii=(rMin, rMax), phiRange=(aMin, aMax))
z = A.roots(f0, df=f1)
return z.roots
def writeFile(myFile, m, z):
if np.size(z, 0):
for i in range(np.size(z, 0)):
myFile.write(f"{m} {z[i].real} {z[i].imag}\n")
def calcInt():
plaTrue = ε > -1.0
if plaTrue:
Int = open(f"eps_{ε}_int", "w")
Pla = open(f"eps_{ε}_pla", "w")
else:
Int = open(f"eps_{ε}_int", "w")
for m in range(65):
print(f"m = {m}")
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
t = np.linspace(0.2, 65.0, num=1024)
k = 1j * t
rf = np.real(f0(k))
ind = np.where(rf[1:] * rf[:-1] < 0.0)[0]
roots = np.zeros(np.shape(ind), dtype=complex)
for a, i in enumerate(ind):
C = Circle(center=1j * (t[i] + t[i + 1]) / 2.0, radius=(t[i + 1] - t[i]))
z = C.roots(f0, df=f1)
roots[a] = z.roots[0]
if plaTrue:
if m:
writeFile(Int, m, roots[1:])
writeFile(Pla, m, roots[[0]])
else:
writeFile(Int, m, roots)
else:
writeFile(Int, m, roots)
if plaTrue:
Int.close()
Pla.close()
else:
Int.close()
calcInt()
def calcResPla():
if ε < -1.0:
Pla = open(f"eps_{ε}_pla", "w")
angle = -np.pi / 4.0
for m in range(1, 65):
r = max(0.1, 0.9 * np.sqrt(1.0 - η ** (-2)) * m - 1.0)
R = max(2.0, 1.1 * np.sqrt(1.0 - η ** (-2)) * m + 1.0)
a = min(angle, -1e-3)
z = rootsAnnSec(m, r, R, a, 1e-3)
writeFile(Pla, m, z)
angle = np.angle(z[0])
Pla.close()
calcResPla()
def calcResOut():
Out = open(f"eps_{ε}_out", "w")
rMin = 0.2
rMax = 5.0
aMin = -np.pi + 0.01
aMax = 0.0
for m in range(33, 65):
print(f"m = {m}")
z = rootsAnnSec(m, rMin, rMax, aMin, aMax)
writeFile(Out, m, z)
if m > 3:
zMod = np.abs(z)
zArg = np.angle(z)
rMin = max(0.2, np.amin(zMod) * 0.75)
rMax = max(rMax, np.amax(zMod) + 3.0)
aMin = min(aMin, (-np.pi + np.amin(zArg)) / 2.0)
aMax = np.amax(zArg) / 2.0
Out.close()
calcResOut()
def calc_cx_pla():
with open(f"eps_{ε}_pla", "w") as file:
rMin, rMax = 0.1, 0.5
aMin = -np.pi / 4
for m in range(1, 65):
z = rootsAnnSec(m, rMin, rMax, aMin, 1e-3)[0]
file.write(f"{m} {z.real} {z.imag}\n")
rMin = abs(z)
rMax = abs(z) * (m + 1) / m + 1
aMin = min(2.5 * np.angle(z), -1e-3)
print(m, rMin, rMax, aMin)
calc_cx_pla()
def rewriteSave():
Int = np.loadtxt(f"eps_{ε}_int")
Pla = np.loadtxt(f"eps_{ε}_pla")
Out = np.loadtxt(f"eps_{ε}_out")
ind = np.argsort(Out[:, 1])[::-1]
out2 = Out[ind]
rep = out2[:, 1] > -1e-3
np.savez(f"eps_{ε}.npz", inner=Int, plasmon=Pla, outer=out2[rep])
rewriteSave()
def rewriteSave_pla():
Pla = np.loadtxt(f"eps_{ε}_pla")
np.savez(f"eps_{ε}.npz", plasmon=Pla)
# rewriteSave_pla()
| 2.828125 | 3 |
wolk/interfaces/OutboundMessageFactory.py | iperformance/WolkConnect-Python | 0 | 12793628 | # Copyright 2018 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
"""
OutboundMessageFactory Module.
"""
class OutboundMessageFactory(ABC):
"""Serialize messages to be sent to WolkAbout IoT Platform."""
@abstractmethod
def make_from_sensor_reading(self, reading):
"""
Serialize a sensor reading to be sent to WolkAbout IoT Platform.
:param reading: Reading to be serialized
:type reading: wolk.models.SensorReading.SensorReading
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_alarm(self, alarm):
"""
Serialize an alarm event to be sent to WolkAbout IoT Platform.
:param alarm: Alarm to be serialized
:type alarm: wolk.models.Alarm.Alarm
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_actuator_status(self, actuator):
"""
Serialize an actuator status to be sent to WolkAbout IoT Platform.
:param actuator: Actuator status to be serialized
:type actuator: wolk.models.ActuatorStatus.ActuatorStatus
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_firmware_status(self, firmware_status):
"""
Report the current status of the firmware update process.
:param firmware_status: Current status of the firmware update process
:type firmware_status: wolk.models.FirmwareStatus.FirmwareStatus
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_chunk_request(self, file_name, chunk_index, chunk_size):
"""
Request a chunk of the firmware file from WolkAbout IoT Platform.
:param file_name: Name of the file that contains the requested chunk
:type file_name: str
:param chunk_index: Index of the requested chunk
:type chunk_index: int
:param chunk_size: Size of the requested chunk
:type chunk_size: int
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_firmware_version(self, version):
"""
Report the current firmware version to WolkAbout IoT Platform.
:param version: Current firmware version
:type version: str
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_keep_alive_message(self):
"""
Make a ping message to be sent to WolkAbout IoT Platform.
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
@abstractmethod
def make_from_configuration(self, configuration):
"""
Serialize device's configuration to be sent to WolkAbout IoT Platform.
:param configuration: Device's current configuration
:type configuration: dict
:returns: message
:rtype: wolk.models.OutboundMessage.OutboundMessage
"""
pass
| 2.140625 | 2 |
Ilya and Bank Account.py | mdhasan8/Problem_Solving | 0 | 12793629 | <reponame>mdhasan8/Problem_Solving<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 20:23:31 2021
@author: Easin
"""
in1 = input()
in1 = int(in1)
list1 = []
if in1 >= 0:
print(in1)
else:
x = abs(in1)//10
list1.append(-x)
y = abs(in1) % 10
#print(y)
z = abs(in1)//100
#print(z)
m = str(z)+str(y)
list1.append(-int(m))
print(max(list1)) | 3.328125 | 3 |
Chapter 2/wall_time.py | indrag49/Computational-Stat-Mech | 19 | 12793630 | from sympy import oo
def wall_time(pos, vel, radius): return (1.0-radius-pos)/vel if vel>0.0 else (pos-radius)/abs(vel) if vel<0.0 else float(oo)
| 2.890625 | 3 |
users/migrations/0002_auto_20150708_1621.py | moshthepitt/answers | 6 | 12793631 | <filename>users/migrations/0002_auto_20150708_1621.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')),
('name', models.CharField(max_length=300, verbose_name='Group Name')),
],
options={
'ordering': ['name'],
'verbose_name': 'User Group',
'verbose_name_plural': 'User Groups',
},
),
migrations.AlterModelOptions(
name='userprofile',
options={'ordering': ['user__first_name', 'created_on'], 'verbose_name': 'User Profile', 'verbose_name_plural': 'User Profiles'},
),
migrations.AddField(
model_name='usergroup',
name='manager',
field=models.ForeignKey(default=None, blank=True, to='users.UserProfile', null=True, verbose_name='Group Manager'),
),
migrations.AddField(
model_name='usergroup',
name='parent',
field=models.ForeignKey(default=None, blank=True, to='users.UserGroup', null=True, verbose_name='Parent Group'),
),
migrations.AddField(
model_name='userprofile',
name='group',
field=models.ManyToManyField(default=None, to='users.UserGroup', blank=True),
),
]
| 1.796875 | 2 |
swagger_marshmallow_codegen/tests/dst/00default.py | dotness/swagger-marshmallow-codegen | 0 | 12793632 | <reponame>dotness/swagger-marshmallow-codegen<filename>swagger_marshmallow_codegen/tests/dst/00default.py
from marshmallow import (
Schema,
fields
)
import datetime
from collections import OrderedDict
class X(Schema):
string = fields.String(missing=lambda: 'default')
integer = fields.Integer(missing=lambda: 10)
boolean = fields.Boolean(missing=lambda: True)
datetime = fields.DateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1))
object = fields.Nested('XObject', missing=lambda: OrderedDict([('name', 'foo'), ('age', 20)]))
array = fields.List(fields.Integer(), missing=lambda: [1, 2, 3])
class XObject(Schema):
name = fields.String(missing=lambda: 'foo')
age = fields.Integer(missing=lambda: 20)
| 2.21875 | 2 |
app/modules/typos.py | Centaurus-dj/Calculonv | 0 | 12793633 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
try: import modules.typo_colors as c;
except Exception as e: print(e)
##############################################################################
####
#### CLASS OF TYPOS USED FOR WRITING
####
##############################################################################
class typo:
def __init__(self, warning=False):
self.type = self
self.warn = warning
if self.warn: ## We print this warning if self.warn is True
print("You initialised a typo class, be sure to understand the fact that it creates spaces only in the vertical alignment")
print("If you don't want it to appear again enter")
print(" ")
def vspace(self, text=None, times=33): ## It creates a space letting the dev
print(" ") ## to have spaces between text.
if text != None: ## We can also write some text
print(text) ## to write some informations.
print(" ")
def hashsep(self, text=None, times=33): ## It creates a div with hash.
times = int(times) ## It adds spaces between this div
print(" ") ## the text before and after him.
print(times*"#") ## We have the possibility to write
if text != None: ## some text as args to write informations
print(text)
print(times*"#")
print(" ")
def barsep(self, text=None, times=33): ## It creates a div with bars.
times = int(times) ## It adds spaces between this div
print(" ") ## the text before and after him.
print(times*"/") ## We have the possibility to write
if text != None: ## some text as args to write informations
print(text)
print(times*"/")
print(" ")
### Functions for printing text
def printg(self, text="Text Sample"): ## It's still in development
try:
print(c.color.bold + ' Hello World ! ' + c.color.end) ##Normally, it prints the text in bold
except Exception as e: ## It's actually not working
self.ErrorPrecisedPrint(e)
def ErrorPrint(self):
print(" ") ## It's executed if any error occurs
print("We're sorry but an error occured.... Please retry")
print("If this error persists or if you encounter another error")
print("please contact us at <EMAIL>")
print(" ")
def ErrorPrecisedPrint(self, error):
print(" ") ## It's executed if any error occurs
print("We're sorry but this error occured:") ## and if we want more informations
print(error)
print("If this error persists or if you encounter another error")
print("please contact us at <EMAIL>")
print(" ")
| 3.90625 | 4 |
2020/04_2/solution.py | budavariam/advent_of_code | 0 | 12793634 | """ Advent of code 2020 day 4/2 """
import logging
import math
from os import path
import re
record_splitter = re.compile(' |\n')
# Field info:
# byr (Birth Year) - four digits; at least 1920 and at most 2002.
# iyr (Issue Year) - four digits; at least 2010 and at most 2020.
# eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
# hgt (Height) - a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
# hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
# ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
# pid (Passport ID) - a nine-digit number, including leading zeroes.
# cid (Country ID) - ignored, missing or not.
def height_validator(x):
match = re.match(r'^(\d+)(cm|in)$', x)
if match is not None:
value = match.group(1)
unit = match.group(2)
if unit == "cm":
return 150 <= int(value) <= 193
elif unit == "in":
return 59 <= int(value) <= 76
expected_fields = [
{"key": 'byr', "validator": lambda x:
re.match(r'^\d{4}$', x) is not None and (1920 <= int(x) <= 2002)}, # (Birth Year)
{"key": 'iyr', "validator": lambda x: \
re.match(r'^\d{4}$', x) is not None and (2010 <= int(x) <= 2020)}, # (Issue Year)
{"key": 'eyr', "validator": lambda x: \
re.match(r'^\d{4}$', x) is not None and (2020 <= int(x) <= 2030)}, # (Expiration Year)
{"key": 'hgt', "validator": height_validator}, # (Height)
{"key": 'hcl', "validator": lambda x: \
re.match(r'^#[a-f0-9]{6}$', x) is not None}, # (Hair Color)
{"key": 'ecl', "validator": lambda x: \
re.match(r'^amb|blu|brn|gry|grn|hzl|oth$', x) is not None}, # (Eye Color)
{"key": 'pid', "validator": lambda x: \
re.match(r'^\d{9}$', x) is not None}, # (Passport ID)
# {"key": 'cid', "validator": lambda x: \
# True}, # (Country ID),
]
class PassportProcessor(object):
def __init__(self, records):
self.records = records
def validate_field(self, record, field):
result = field["key"] in record and field["validator"](record[field["key"]])
# print(result, record)
return result
def solve(self):
result = 0
for record in self.records:
result += 1 if all([self.validate_field(record, field) for field in expected_fields]) else 0
return result
def solution(data):
""" Solution to the problem """
# split records by empty lines, split fields by ":"-s, create a list of dictionaries from the records.
lines = [{key: value for [key, value] in [field.split(
":") for field in record_splitter.split(record)]} for record in data.split("\n\n")]
solver = PassportProcessor(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
| 3.34375 | 3 |
src/storage-preview/azext_storage_preview/_help.py | mboersma/azure-cli-extensions | 1 | 12793635 | <reponame>mboersma/azure-cli-extensions
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
# pylint: disable=line-too-long, too-many-lines
helps['storage account create'] = """
type: command
short-summary: Create a storage account.
long-summary: >
The SKU of the storage account defaults to 'Standard_RAGRS'.
examples:
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
"""
helps['storage account update'] = """
type: command
short-summary: Update the properties of a storage account.
"""
helps['storage blob service-properties'] = """
type: group
short-summary: Manage storage blob service properties.
"""
helps['storage blob service-properties update'] = """
type: command
short-summary: Update storage blob service properties.
"""
helps['storage account management-policy'] = """
type: group
short-summary: Manage storage account management policies.
"""
helps['storage account management-policy create'] = """
type: command
short-summary: Creates the data policy rules associated with the specified storage account.
"""
helps['storage account management-policy update'] = """
type: command
short-summary: Updates the data policy rules associated with the specified storage account.
"""
helps['storage azcopy'] = """
type: group
short-summary: |
[EXPERIMENTAL] Manage storage operations utilizing AzCopy.
long-summary: |
Open issues here: https://github.com/Azure/azure-storage-azcopy
"""
helps['storage azcopy blob'] = """
type: group
short-summary: Manage object storage for unstructured data (blobs) using AzCopy.
"""
helps['storage azcopy blob upload'] = """
type: command
short-summary: Upload blobs to a storage blob container using AzCopy.
examples:
- name: Upload a single blob to a container.
text: az storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/file" -d NewBlob
- name: Upload a directory to a container.
text: az storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory" --recursive
- name: Upload the contents of a directory to a container.
text: az storage azcopy blob upload -c MyContainer --account-name MyStorageAccount -s "path/to/directory/*" --recursive
"""
helps['storage azcopy blob download'] = """
type: command
short-summary: Download blobs from a storage blob container using AzCopy.
examples:
- name: Download a single blob from a container.
text: az storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/blob" -d "path/to/file"
- name: Download a virtual directory from a container.
text: az storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s "path/to/virtual_directory" -d "download/path" --recursive
- name: Download the contents of a container onto a local file system.
text: az storage azcopy blob download -c MyContainer --account-name MyStorageAccount -s * -d "download/path" --recursive
"""
helps['storage azcopy blob delete'] = """
type: command
short-summary: Delete blobs from a storage blob container using AzCopy.
examples:
- name: Delete a single blob from a container.
text: az storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t TargetBlob
- name: Delete all blobs from a container.
text: az storage azcopy blob delete -c MyContainer --account-name MyStorageAccount --recursive
- name: Delete all blobs in a virtual directory.
text: az storage azcopy blob delete -c MyContainer --account-name MyStorageAccount -t "path/to/virtual_directory" --recursive
"""
helps['storage azcopy run-command'] = """
type: command
short-summary: Run a command directly using the AzCopy CLI. Please use SAS tokens for authentication.
"""
| 1.601563 | 2 |
gencode/python/udmi/schema/reflect_config.py | johnrandolph/udmi | 1 | 12793636 | <filename>gencode/python/udmi/schema/reflect_config.py<gh_stars>1-10
"""Generated class for reflect_config.json"""
class SetupReflectorConfig:
"""Generated schema class"""
def __init__(self):
self.last_state = None
self.deployed_at = None
@staticmethod
def from_dict(source):
if not source:
return None
result = SetupReflectorConfig()
result.last_state = source.get('last_state')
result.deployed_at = source.get('deployed_at')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = SetupReflectorConfig.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.last_state:
result['last_state'] = self.last_state # 5
if self.deployed_at:
result['deployed_at'] = self.deployed_at # 5
return result
class ReflectorConfig:
"""Generated schema class"""
def __init__(self):
self.timestamp = None
self.version = None
self.setup = None
@staticmethod
def from_dict(source):
if not source:
return None
result = ReflectorConfig()
result.timestamp = source.get('timestamp')
result.version = source.get('version')
result.setup = SetupReflectorConfig.from_dict(source.get('setup'))
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = ReflectorConfig.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.timestamp:
result['timestamp'] = self.timestamp # 5
if self.version:
result['version'] = self.version # 5
if self.setup:
result['setup'] = self.setup.to_dict() # 4
return result
| 2.015625 | 2 |
lib/scRNA/clonotype_split.py | shengqh/ngsperl | 6 | 12793637 | <filename>lib/scRNA/clonotype_split.py
import argparse
import logging
import os
import os.path
import sys
import re
import json
import pandas as pd
from collections import OrderedDict
def initialize_logger(logfile, args):
logger = logging.getLogger('clonotype_split')
loglevel = logging.INFO
logger.setLevel(loglevel)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel(loglevel)
handler.setFormatter(formatter)
logger.addHandler(handler)
# create error file handler and set level to error
handler = logging.FileHandler(logfile, "w")
handler.setLevel(loglevel)
handler.setFormatter(formatter)
logger.addHandler(handler)
return(logger)
def check_file(filename, parser):
if not os. path. isfile(filename):
print("error: file not exists: " + filename)
parser.print_help()
sys.exit(1)
def split(json_file, cell_hashtag_file, hashtag_sample_file, output_folder, logger):
logger.info("reading %s" % cell_hashtag_file)
cells=pd.read_csv(cell_hashtag_file)
cells=cells.loc[cells['HTO.global'] == 'Singlet']
barcode_dict = dict(zip(cells.iloc[:, 0], cells.HTO))
#print(barcode_dict)
logger.info("reading %s" % hashtag_sample_file)
samples=pd.read_table(hashtag_sample_file, header=None)
samples_dict = dict(zip(samples.iloc[:, 1], samples.iloc[:, 0]))
print(samples_dict)
logger.info("reading %s" % json_file)
json_data = []
with open(json_file, "rt") as fin:
data = json.load(fin)
for record in data:
json_data.append(record)
for sample_name in samples_dict.values():
sample_folder = os.path.join(output_folder, sample_name)
if not os.path.isdir(sample_folder):
os.mkdir(sample_folder)
sample_file = os.path.join(sample_folder, "all_contig_annotations.json")
sample_data = [record for record in json_data if record['barcode'] in barcode_dict and samples_dict[barcode_dict[record['barcode']]] == sample_name]
logger.info("writing %s" % sample_file)
with open(sample_file, "wt") as fout:
json.dump(sample_data, fout, indent=4)
logger.info("done")
def main():
parser = argparse.ArgumentParser(description="merge clonotype data",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG = False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help="Input clone type json file", required=NOT_DEBUG)
parser.add_argument('-c', '--cell_hashtag', action='store', nargs='?', help="Input cell hashtag file", required=NOT_DEBUG)
parser.add_argument('-s', '--hashtag_sample', action='store', nargs='?', help="Input hashtag sample file", required=NOT_DEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output folder")
if not DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input="/data/cqs/alexander_gelbard_data/AG_5126_10X/VDJ/5126-AG-4/all_contig_annotations.json"
args.cell_hashtag="/scratch/cqs/alexander_gelbard_projects/20201202_5126_scRNA_split/split_samples/result/COVID/COVID.HTO.csv"
args.hashtag_sample="/scratch/cqs/alexander_gelbard_projects/20201202_5126_scRNA_split/split_bam/result/fileList_3_COVID.txt"
args.output="/scratch/cqs/alexander_gelbard_projects/20201202_5126_scRNA_split/clonotype_split/result/"
check_file(args.input, parser)
check_file(args.cell_hashtag, parser)
check_file(args.hashtag_sample, parser)
logger = initialize_logger(os.path.join(args.output, "clonotype_split.log"), args)
split(args.input, args.cell_hashtag, args.hashtag_sample, args.output, logger)
if __name__ == "__main__":
main()
| 2.40625 | 2 |
week 12/w12_group.py | belarminobrunoz/BYUI-CSE-110 | 0 | 12793638 |
with open("week 12/books_and_chapters.txt") as scripture_list:
largest_book = 0
largest_book_name = ""
chosen_b_section = ""
user_choice = input("""
What volume of scripture do you want to look at?
1. Old Testament
2. New Testament
3. Book of Mormon
4. Doctrine and Covenants
5. Pearl of Great Price
""")
if user_choice == "1":
chosen_b_section = "Old Testament"
elif user_choice == "2":
chosen_b_section = "New Testament"
elif user_choice == "3":
chosen_b_section = "Book of Mormon"
elif user_choice == "4":
chosen_b_section = "Doctrine and Covenants"
elif user_choice == "5":
chosen_b_section = "Pearl of Great Price"
for line in scripture_list:
#Genesis:50:Old Testament
clean_list = line.strip()
book = clean_list.split(":")
book_name = book[0]
book_chapters = int(book[1])
book_section = book[2]
if book_section == chosen_b_section:
print(f"Scripture: {book_section}, Book: {book_name}, Chapters: {book_chapters}")
if book_chapters > largest_book:
largest_book = book_chapters
largest_book_name = book_name
print("*"*20)
print(f"The largest book is {largest_book_name} with {largest_book} chapters")
| 3.859375 | 4 |
smpl/exec.py | robertblackwell/smpl | 0 | 12793639 | import os
import sys
import subprocess
from typing import Union, TextIO, List, AnyStr
import smpl.log_module as logger
#
# This module executes commands, manages output from those commands and provides a dry-run capability.
#
# The primary function is
#
# def run(cmd, where)
#
# dry-run and output options are controlled by:
#
# def configure(arg_dry_run, arg_reporting_option)
#
# both arguments can ge provided as kw-args and have defaults; no dry-run and report everything
# configure() should be called before any calls to run()
#
# Output options are:
# REPORTING_OPTION_STDOUT_STDERR : simple pass through stdout and stderr
# REPORTING_OPTION_STDOUT_ONLY : simple pass through stdout and show any stderr output only on a failure
# REPORTING_OPTION_STDERR_ONLY : show stderr only on a failure and does not show any stdout
# REPORTING_OPTION_NEITHER : shows no output either from stdout or stderr
# REPORTING_OPTION_STDERR_STDOUT_PROGRESS : shows stderr only on a failure and prints an X for each line
# of stdout - does this in realtime while the command is executing
#
REPORTING_OPT_STDOUT_STDERR = 1
REPORTING_OPT_STDOUT_ONLY = 2
REPORTING_OPT_STDERR_ONLY = 3
REPORTING_OPT_STDERR_STDOUT_PROGRESS = 5
REPORTING_OPT_NEITHER = 4
class Options:
def __init__(self):
self.reporting_option = REPORTING_OPT_STDOUT_ONLY
self.dry_run = False
options: Options = Options()
def configure(arg_reporting_option = REPORTING_OPT_STDOUT_STDERR, arg_dry_run: bool = False) -> None:
options.reporting_option = arg_reporting_option
options.dry_run = arg_dry_run
logger.debugln("dry_run: {} reporting: {}".format(options.dry_run, options.reporting_option))
def exec_cmd(cmd, where: Union[str, None]) -> None:
""" Does the hard work of executing commands, optionally in the given directory
with the reporting global reporting option.
On failure of the command it quits the program
"""
logger.debugln(" cmd: {} where: {} dry_run: {}".format(",".join(cmd), where, options.dry_run))
if options.dry_run:
return
if where is None:
where = os.getcwd()
try:
stderr_output = "unassigned"
if options.reporting_option == REPORTING_OPT_STDOUT_STDERR:
result = subprocess.run(cmd, cwd = where)
retcode = result.returncode
elif options.reporting_option == REPORTING_OPT_STDOUT_ONLY:
result = subprocess.run(cmd, cwd = where, stderr=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
elif options.reporting_option == REPORTING_OPT_STDERR_ONLY:
result = subprocess.run(cmd, cwd = where, stdout=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
elif options.reporting_option == REPORTING_OPT_STDERR_STDOUT_PROGRESS:
count = 0
result = subprocess.Popen(cmd, cwd = where, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while result.poll() is None:
if count == 0:
sys.stdout.write("\n")
stdoutline = result.stdout.readline()
sys.stdout.write("X")
count = (count + 1) % 50
flush = result.stdout.read()
sys.stdout.write("YY\n")
# sys.stdout.write("\n")
result.stdout.close()
# print("result.stdout closed")
retcode = result.returncode
stderr_output = result.stderr
else:
result = subprocess.run(cmd, cwd = where, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
if retcode > 0:
sys.stderr.write("ERROR cmd: {} return code {}\n".format(", ".join(cmd), retcode))
sys.stderr.write("stderr {}\n".format(stderr_output))
raise RuntimeError("bad return code")
except Exception as exception:
sys.stderr.write("Cmd was {}\n".format(", ".join(cmd)))
sys.stderr.write(
"An error occurred while running command [{}] error type: {}\n".format(", ".join(cmd), type(exception).__name__))
sys.stderr.write("Details: \n{}\n".format(str(exception)))
quit()
def run(cmd: List[str], where: Union[str, None] = None) -> None:
logger.debugln(" cmd: {} where: {}".format(",".join(cmd), where))
if not isinstance(cmd, list):
raise ValueError("cmd must be a list")
# exec_cmd handles failure of the command
exec_cmd(cmd, where)
if __name__ == '__main__':
logger.init(logger.LOG_LEVEL_WARN)
logger.set_stdout_logfile()
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDOUT_ONLY)
run(["wget", "http://whiteacorn.com"], None)
run(["tree", "/home/robert/Projects/smpl"])
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDERR_STDOUT_PROGRESS)
run(["tree", "/home/robert/Projects/smpl"])
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDOUT_ONLY)
run(["tree", "/xhome/robert/Projects/smpl"])
| 2.609375 | 3 |
src/backend/connector/server.py | JDaniloC/Electronpy | 0 | 12793640 | <reponame>JDaniloC/Electronpy
from .handler import connect_websocket, spawn, _javascript_call
from bottle.ext import websocket as bottle_websocket
import bottle
def start(port = 4949, block = True, quiet = True):
def run_server():
return bottle.run(
port = port,
quiet = quiet,
host = "0.0.0.0",
app = bottle.default_app(),
server = bottle_websocket.GeventWebSocketServer,
)
if block:
run_server()
else:
spawn(run_server)
bottle.route(
path = '/',
callback = connect_websocket,
apply = (bottle_websocket.websocket,))
| 2.328125 | 2 |
lightbus/utilities/io.py | gcollard/lightbus | 178 | 12793641 | import logging
logger = logging.getLogger(__name__)
def make_file_safe_api_name(api_name):
"""Make an api name safe for use in a file name"""
return "".join([c for c in api_name if c.isalpha() or c.isdigit() or c in (".", "_", "-")])
| 2.90625 | 3 |
dependencies/svgwrite/tests/test_clock_val_parser.py | charlesmchen/typefacet | 21 | 12793642 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman --<<EMAIL>>
# Purpose: test clock_val_parser
# Created: 03.11.2010
# Copyright (C) 2010, <NAME>
# License: GPLv3
import sys
import unittest
PYTHON3 = sys.version_info[0] > 2
if PYTHON3:
import svgwrite.data.pyparsing_py3 as pp
else:
import svgwrite.data.pyparsing_py2 as pp
from svgwrite.data.svgparser import _build_clock_val_parser
from svgwrite.data.svgparser import _build_wall_clock_val_parser
class TestClockValParser(unittest.TestCase):
clock_val_parser = _build_clock_val_parser()
def is_valid(self, value):
try:
self.clock_val_parser.parseString(value, parseAll=True)
return True
except pp.ParseException:
return False
def test_full_clock_values(self):
self.assertTrue(self.is_valid("02:30:03"))
self.assertTrue(self.is_valid("01:00:00"))
self.assertTrue(self.is_valid("50:00:10.25"))
def test_partial_clock_values(self):
self.assertTrue(self.is_valid("02:33"))
self.assertTrue(self.is_valid("00:10.5"))
def test_time_count_values(self):
self.assertTrue(self.is_valid("3.2h"))
self.assertTrue(self.is_valid("45min"))
self.assertTrue(self.is_valid("30s"))
self.assertTrue(self.is_valid("5ms"))
self.assertTrue(self.is_valid("12.467"))
class TestWallClockValParser(unittest.TestCase):
wallclock_parser = _build_wall_clock_val_parser()
def is_valid(self, value):
try:
self.wallclock_parser.parseString(value, parseAll=True)
return True
except pp.ParseException:
return False
def test_date_plus_hhmm(self):
# Complete date plus hours and minutes:
# YYYY-MM-DDThh:mmTZD (e.g. 1997-07-16T19:20+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20+01:00"))
def test_date_plus_hhmmss(self):
# Complete date plus hours, minutes and seconds:
# YYYY-MM-DDThh:mm:ssTZD (e.g. 1997-07-16T19:20:30+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20:30+01:00"))
def test_date_plus_hhmmss_frac(self):
# Complete date plus hours, minutes, seconds and a decimal fraction of a second
# YYYY-MM-DDThh:mm:ss.sTZD (e.g. 1997-07-16T19:20:30.45+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20:30.45+01:00"))
if __name__=='__main__':
unittest.main() | 2.625 | 3 |
manager.py | zhangmingkai4315/Flask-Web-App | 0 | 12793643 | <reponame>zhangmingkai4315/Flask-Web-App
#!/usr/bin/env python
import os
from app import create_app,db
from app.models import User,Role
from flask.ext.script import Manager,Shell
from flask.ext.migrate import Migrate,MigrateCommand
app=create_app(os.getenv('FLASK_CONFIG') or 'default')
manager=Manager(app)
migrate=Migrate(app,db)
manager.add_command('db',MigrateCommand)
if __name__=='__main__':
manager.run()
| 2.0625 | 2 |
ssguan/ignitor/etl/service.py | samuelbaizg/ssguan | 1 | 12793644 | # -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from ssguan.ignitor.base import context
from ssguan.ignitor.base.error import NoFoundError
from ssguan.ignitor.etl.model import IncrExtract, IncrExtractLog
from ssguan.ignitor.utility import kind, parallel
__lock = parallel.create_lock()
def get_extract_timespan(ie_name, code_path, first_time=None, start_delta=IncrExtract.DEFAULT_START_DELTA, end_delta=IncrExtract.DEFAULT_END_DELTA):
"""
Get extract timespan.
:param ie_name|str: the incrment extract job name.
:param code_path|str: the increment job code path.
:param first_time|datetime: it will be converted to utc time to save.
:param start_delta|float: the delta to compute extract start time.
:param end_delta|float: the delta to compute extract end time.
:return tuple(datetime,datetime): return (start_time,end_time)
"""
query = IncrExtract.all()
query.filter("ie_name =", ie_name)
__lock.acquire()
try:
incrextr = query.get()
if incrextr is None:
start_delta = IncrExtract.DEFAULT_START_DELTA if start_delta is None else float(start_delta)
end_delta = IncrExtract.DEFAULT_END_DELTA if end_delta is None else float(end_delta)
first_time = (kind.utcnow() - datetime.timedelta(seconds=end_delta)) if first_time is None else first_time
first_time = kind.local_to_utc(first_time)
first_time = kind.datetime_floor(first_time)
last_time = first_time - datetime.timedelta(seconds=start_delta)
last_time = kind.datetime_floor(first_time)
incrextr = IncrExtract(ie_name=ie_name, code_path=code_path, first_time=first_time, start_delta=start_delta, end_delta=end_delta, last_time=last_time)
incrextr = incrextr.create(context.get_user_id())
start_time = incrextr.last_time - datetime.timedelta(seconds=incrextr.start_delta)
end_time = kind.utcnow() - datetime.timedelta(seconds=incrextr.end_delta)
end_time = kind.datetime_floor(end_time)
return (start_time, end_time)
finally:
__lock.release()
def update_last_extr_time(ie_name, last_extr_time):
"""
Update last extract time
:param ie_nme|str: the extractor name
:param last_extr_time|datetime: the last extract time
"""
query = IncrExtract.all()
query.filter("ie_name =", ie_name)
incrextr = query.get()
if incrextr is None:
raise NoFoundError('Extractor', ie_name)
log = IncrExtractLog(ie_id=incrextr.key(), ie_name=ie_name, extr_time=last_extr_time)
log.create(context.get_user_id())
query.set("last_time set", last_extr_time)
query.update(context.get_user_id())
return True
| 1.804688 | 2 |
tests/_async/test_client_with_auto_confirm_enabled.py | zodman/gotrue-py | 13 | 12793645 | <filename>tests/_async/test_client_with_auto_confirm_enabled.py
from typing import AsyncIterable, Optional
import pytest
from faker import Faker
from gotrue import AsyncGoTrueClient
from gotrue.exceptions import APIError
from gotrue.types import Session, User, UserAttributes
GOTRUE_URL = "http://localhost:9998"
TEST_TWILIO = False
@pytest.fixture(name="client")
async def create_client() -> AsyncIterable[AsyncGoTrueClient]:
async with AsyncGoTrueClient(
url=GOTRUE_URL,
auto_refresh_token=False,
persist_session=True,
) as client:
yield client
@pytest.fixture(name="client_with_session")
async def create_client_with_session() -> AsyncIterable[AsyncGoTrueClient]:
async with AsyncGoTrueClient(
url=GOTRUE_URL,
auto_refresh_token=False,
persist_session=False,
) as client:
yield client
@pytest.fixture(name="new_client")
async def create_new_client() -> AsyncIterable[AsyncGoTrueClient]:
async with AsyncGoTrueClient(
url=GOTRUE_URL,
auto_refresh_token=False,
persist_session=False,
) as client:
yield client
fake = Faker()
email = f"client_ac_enabled_{fake.email().lower()}"
set_session_email = f"client_ac_session_{fake.email().lower()}"
refresh_token_email = f"client_refresh_token_signin_{fake.email().lower()}"
password = <PASSWORD>()
access_token: Optional[str] = None
@pytest.mark.asyncio
async def test_sign_up(client: AsyncGoTrueClient):
try:
response = await client.sign_up(
email=email,
password=password,
data={"status": "alpha"},
)
assert isinstance(response, Session)
global access_token
access_token = response.access_token
assert response.access_token
assert response.refresh_token
assert response.expires_in
assert response.expires_at
assert response.user
assert response.user.id
assert response.user.email == email
assert response.user.email_confirmed_at
assert response.user.last_sign_in_at
assert response.user.created_at
assert response.user.updated_at
assert response.user.app_metadata
assert response.user.app_metadata.get("provider") == "email"
assert response.user.user_metadata
assert response.user.user_metadata.get("status") == "alpha"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_set_session_should_return_no_error(
client_with_session: AsyncGoTrueClient,
):
try:
response = await client_with_session.sign_up(
email=set_session_email,
password=password,
)
assert isinstance(response, Session)
assert response.refresh_token
await client_with_session.set_session(refresh_token=response.refresh_token)
data = {"hello": "world"}
response = await client_with_session.update(
attributes=UserAttributes(data=data)
)
assert response.user_metadata == data
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_up.__name__])
async def test_sign_up_the_same_user_twice_should_throw_an_error(
client: AsyncGoTrueClient,
):
expected_error_message = "User already registered"
try:
await client.sign_up(
email=email,
password=password,
)
assert False
except APIError as e:
assert expected_error_message in e.msg
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_up.__name__])
async def test_set_auth_should_set_the_auth_headers_on_a_new_client(
new_client: AsyncGoTrueClient,
):
try:
assert access_token
await new_client.set_auth(access_token=access_token)
assert new_client.current_session
assert new_client.current_session.access_token == access_token
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(
on=[test_set_auth_should_set_the_auth_headers_on_a_new_client.__name__]
)
async def test_set_auth_should_set_the_auth_headers_on_a_new_client_and_recover(
new_client: AsyncGoTrueClient,
):
try:
assert access_token
await new_client.init_recover()
await new_client.set_auth(access_token=access_token)
assert new_client.current_session
assert new_client.current_session.access_token == access_token
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_up.__name__])
async def test_sign_in(client: AsyncGoTrueClient):
try:
response = await client.sign_in(email=email, password=password)
assert isinstance(response, Session)
assert response.access_token
assert response.refresh_token
assert response.expires_in
assert response.expires_at
assert response.user
assert response.user.id
assert response.user.email == email
assert response.user.email_confirmed_at
assert response.user.last_sign_in_at
assert response.user.created_at
assert response.user.updated_at
assert response.user.app_metadata
assert response.user.app_metadata.get("provider") == "email"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_sign_in_with_refresh_token(client_with_session: AsyncGoTrueClient):
try:
response = await client_with_session.sign_up(
email=refresh_token_email,
password=password,
)
assert isinstance(response, Session)
assert response.refresh_token
response2 = await client_with_session.sign_in(
refresh_token=response.refresh_token
)
assert isinstance(response2, Session)
assert response2.access_token
assert response2.refresh_token
assert response2.expires_in
assert response2.expires_at
assert response2.user
assert response2.user.id
assert response2.user.email == refresh_token_email
assert response2.user.email_confirmed_at
assert response2.user.last_sign_in_at
assert response2.user.created_at
assert response2.user.updated_at
assert response2.user.app_metadata
assert response2.user.app_metadata.get("provider") == "email"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_in.__name__])
async def test_get_user(client: AsyncGoTrueClient):
try:
await client.init_recover()
response = client.user()
assert isinstance(response, User)
assert response.id
assert response.email == email
assert response.email_confirmed_at
assert response.last_sign_in_at
assert response.created_at
assert response.updated_at
assert response.app_metadata
provider = response.app_metadata.get("provider")
assert provider == "email"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_in.__name__])
async def test_get_session(client: AsyncGoTrueClient):
try:
await client.init_recover()
response = client.session()
assert isinstance(response, Session)
assert response.access_token
assert response.refresh_token
assert response.expires_in
assert response.expires_at
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_in.__name__])
async def test_update_user(client: AsyncGoTrueClient):
try:
await client.init_recover()
response = await client.update(
attributes=UserAttributes(data={"hello": "world"})
)
assert isinstance(response, User)
assert response.id
assert response.email == email
assert response.email_confirmed_at
assert response.last_sign_in_at
assert response.created_at
assert response.updated_at
assert response.user_metadata
assert response.user_metadata.get("hello") == "world"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_update_user.__name__])
async def test_get_user_after_update(client: AsyncGoTrueClient):
try:
await client.init_recover()
response = client.user()
assert isinstance(response, User)
assert response.id
assert response.email == email
assert response.email_confirmed_at
assert response.last_sign_in_at
assert response.created_at
assert response.updated_at
assert response.user_metadata
assert response.user_metadata.get("hello") == "world"
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_get_user_after_update.__name__])
async def test_sign_out(client: AsyncGoTrueClient):
try:
await client.init_recover()
await client.sign_out()
response = client.session()
assert response is None
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_out.__name__])
async def test_get_user_after_sign_out(client: AsyncGoTrueClient):
try:
await client.init_recover()
response = client.user()
assert not response
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_out.__name__])
async def test_get_update_user_after_sign_out(client: AsyncGoTrueClient):
expected_error_message = "Not logged in."
try:
await client.init_recover()
await client.update(attributes=UserAttributes(data={"hello": "world"}))
assert False
except ValueError as e:
assert str(e) == expected_error_message
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_get_user_after_sign_out.__name__])
async def test_sign_in_with_the_wrong_password(client: AsyncGoTrueClient):
try:
await client.sign_in(email=email, password=password + "2")
assert False
except APIError:
assert True
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_sign_up_with_password_none(client: AsyncGoTrueClient):
expected_error_message = "Password must be defined, can't be None."
try:
await client.sign_up(email=email)
assert False
except ValueError as e:
assert str(e) == expected_error_message
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_sign_up_with_email_and_phone_none(client: AsyncGoTrueClient):
expected_error_message = "Email or phone must be defined, both can't be None."
try:
await client.sign_up(password=password)
assert False
except ValueError as e:
assert str(e) == expected_error_message
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_sign_in_with_all_nones(client: AsyncGoTrueClient):
expected_error_message = (
"Email, phone, refresh_token, or provider must be defined, "
"all can't be None."
)
try:
await client.sign_in()
assert False
except ValueError as e:
assert str(e) == expected_error_message
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_sign_in_with_magic_link(client: AsyncGoTrueClient):
try:
response = await client.sign_in(email=email)
assert response is None
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_sign_up.__name__])
async def test_get_session_from_url(client: AsyncGoTrueClient):
try:
assert access_token
dummy_url = (
"https://localhost"
f"?access_token={access_token}"
"&refresh_token=refresh_token"
"&token_type=bearer"
"&expires_in=3600"
"&type=recovery"
)
response = await client.get_session_from_url(url=dummy_url, store_session=True)
assert isinstance(response, Session)
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
async def test_get_session_from_url_errors(client: AsyncGoTrueClient):
try:
dummy_url = "https://localhost"
error_description = fake.email()
try:
await client.get_session_from_url(
url=dummy_url + f"?error_description={error_description}"
)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == error_description
try:
await client.get_session_from_url(url=dummy_url)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == "No access_token detected."
dummy_url += "?access_token=access_token"
try:
await client.get_session_from_url(url=dummy_url)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == "No refresh_token detected."
dummy_url += "&refresh_token=refresh_token"
try:
await client.get_session_from_url(url=dummy_url)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == "No token_type detected."
dummy_url += "&token_type=bearer"
try:
await client.get_session_from_url(url=dummy_url)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == "No expires_in detected."
dummy_url += "&expires_in=str"
try:
await client.get_session_from_url(url=dummy_url)
assert False
except APIError as e:
assert e.code == 400
assert e.msg == "Invalid expires_in."
except Exception as e:
assert False, str(e)
@pytest.mark.asyncio
@pytest.mark.depends(on=[test_get_update_user_after_sign_out.__name__])
async def test_refresh_session(client: AsyncGoTrueClient):
try:
response = await client.sign_in(email=email, password=password)
assert isinstance(response, Session)
assert response.refresh_token
response = await client.set_session(refresh_token=response.refresh_token)
assert isinstance(response, Session)
response = await client.refresh_session()
assert isinstance(response, Session)
await client.sign_out()
try:
await client.refresh_session()
assert False
except ValueError as e:
assert str(e) == "Not logged in."
except Exception as e:
assert False, str(e)
| 2.03125 | 2 |
CodeChef/COMPETE/ZCO Practice Contest - ZCOPRAC/Covering - ZCO15003.py | IshanManchanda/competitive-python | 6 | 12793646 | <gh_stars>1-10
# https://www.codechef.com/ZCOPRAC/problems/ZCO15003
def main():
from sys import stdin, stdout
rl = stdin.readline
n = int(rl())
a = [[int(x) for x in rl().split()] for _ in range(n)]
a.sort()
i = s = 0
while i < n:
end = a[i][1]
while i < n and end >= a[i][0]:
i += 1
end = min(end, a[i][1]) if i < n else end
s += 1
stdout.write(str(s))
main()
| 2.640625 | 3 |
controllers/accounting_controllers.py | rbaylon/ngi | 0 | 12793647 | from models import ChapterPayments
from baseapp import db
class ChapterPaymentsController:
def __init__(self):
pass
def add(self, payment):
existing = False
payments = ChapterPayments.query.filter_by(received_date=payment['received_date']).all()
for existing_payment in payments:
if existing_payment.received_from == payment['received_from'] \
and existing_payment.received_amount == payment['received_amount'] \
and existing_payment.payment_type == payment['payment_type']:
existing = True
break
if not existing:
new_payment = ChapterPayments()
new_payment.received_from = payment['received_from']
new_payment.received_date = payment['received_date']
new_payment.received_amount = payment['received_amount']
new_payment.payment_type = payment['payment_type']
new_payment.cpc = payment['cpc']
new_payment.chapter = payment['chapter']
db.session.add(new_payment)
db.session.commit()
return True
return False
def edit(self, payment):
existing_payment = ChapterPayments.query.filter_by(id=payment['id']).first()
if existing_payment:
existing_payment.received_from = payment['received_from']
existing_payment.received_date = payment['received_date']
existing_payment.received_amount = payment['received_amount']
existing_payment.payment_type = payment['payment_type']
existing_payment.cpc = payment['cpc']
existing_payment.chapter = payment['chapter']
db.session.commit()
return True
return False
def delete(self, payment):
existing_payment = ChapterPayments.query.filter_by(id=payment['id']).first()
if existing_payment:
db.session.delete(existing_payment)
db.session.commit()
return True
return False | 2.46875 | 2 |
src/fhir_types/FHIR_DataRequirement.py | anthem-ai/fhir-types | 2 | 12793648 | from typing import Any, List, Literal, TypedDict
from .FHIR_canonical import FHIR_canonical
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_DataRequirement_CodeFilter import FHIR_DataRequirement_CodeFilter
from .FHIR_DataRequirement_DateFilter import FHIR_DataRequirement_DateFilter
from .FHIR_DataRequirement_Sort import FHIR_DataRequirement_Sort
from .FHIR_Element import FHIR_Element
from .FHIR_positiveInt import FHIR_positiveInt
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Describes a required data item for evaluation in terms of the type of data, and optional code or date-based filters of the data.
FHIR_DataRequirement = TypedDict(
"FHIR_DataRequirement",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# The type of the required data, specified as the type name of a resource. For profiles, this value is set to the type of the base resource of the profile.
"type": FHIR_code,
# Extensions for type
"_type": FHIR_Element,
# The profile of the required data, specified as the uri of the profile definition.
"profile": List[FHIR_canonical],
# The intended subjects of the data requirement. If this element is not provided, a Patient subject is assumed.
"subjectCodeableConcept": FHIR_CodeableConcept,
# The intended subjects of the data requirement. If this element is not provided, a Patient subject is assumed.
"subjectReference": FHIR_Reference,
# Indicates that specific elements of the type are referenced by the knowledge module and must be supported by the consumer in order to obtain an effective evaluation. This does not mean that a value is required for this element, only that the consuming system must understand the element and be able to provide values for it if they are available. The value of mustSupport SHALL be a FHIRPath resolveable on the type of the DataRequirement. The path SHALL consist only of identifiers, constant indexers, and .resolve() (see the [Simple FHIRPath Profile](fhirpath.html#simple) for full details).
"mustSupport": List[FHIR_string],
# Extensions for mustSupport
"_mustSupport": List[FHIR_Element],
# Code filters specify additional constraints on the data, specifying the value set of interest for a particular element of the data. Each code filter defines an additional constraint on the data, i.e. code filters are AND'ed, not OR'ed.
"codeFilter": List[FHIR_DataRequirement_CodeFilter],
# Date filters specify additional constraints on the data in terms of the applicable date range for specific elements. Each date filter specifies an additional constraint on the data, i.e. date filters are AND'ed, not OR'ed.
"dateFilter": List[FHIR_DataRequirement_DateFilter],
# Specifies a maximum number of results that are required (uses the _count search parameter).
"limit": FHIR_positiveInt,
# Extensions for limit
"_limit": FHIR_Element,
# Specifies the order of the results to be returned.
"sort": List[FHIR_DataRequirement_Sort],
},
total=False,
)
| 1.65625 | 2 |
gateways/cms_gateway.py | project-lolquiz/the-backend | 0 | 12793649 | import requests
import json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
LOLQUIZ_CMS_URL = 'https://lolquiz-cms.herokuapp.com/questions?_sort=id&_limit={}&_start={}'
HTTP_STATUS_ERROR_CODES = [408, 502, 503, 504]
TOTAL_QUESTIONS = 100
INIT_OFFSET = 0
def get_questions(url=LOLQUIZ_CMS_URL):
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=HTTP_STATUS_ERROR_CODES)
s.mount('https://', HTTPAdapter(max_retries=retries))
offset = INIT_OFFSET
all_questions = []
while True:
url = LOLQUIZ_CMS_URL.format(TOTAL_QUESTIONS, offset)
response = s.get(url)
if not has_content(response):
break
all_questions.append(json.loads(response.content))
offset += TOTAL_QUESTIONS
return [set_question(question)
for outer_questions in all_questions
for question in outer_questions if valid_question(question)]
def has_content(response):
return len(json.loads(response.content)) > 0
def valid_question(question):
return 'id' in question \
and 'title' in question \
and 'game_type' in question and question['game_type'] is not None \
and 'game_modes' in question and len(question['game_modes']) > 0 \
and 'options' in question
def set_question(question):
return {'id': question['id'],
'title': question['title'],
'game_type': question['game_type'],
'options': question['options'],
'game_modes': question['game_modes']}
| 2.953125 | 3 |
hitblow/hitblow_solo_manual.py | HayatoNatural/NEDO-Hit-Blow-teamF | 1 | 12793650 | <reponame>HayatoNatural/NEDO-Hit-Blow-teamF
# coding : UTF-8
"""
File Name: hitblow_solo_manual.py
Description: Hit&Blowの手動一人対戦モード
Created on october 13,2021
Created by <NAME>, <NAME>, <NAME>
"""
import random
import argparse
import time
from PIL import Image
import streamlit as st
import pygame
st.set_page_config(layout="wide")
col1,col2 =st.columns([4,1])
col4,space,col6 =st.columns([7,1,4])
button_num = 0
def initialize_streamlit() -> None:
"""クラスを定義する前にweb上で画面を出しておく
状態量として, 試合数, 経験値, レベル, 連勝数を定義し, 初期化しておく(マジックコマンド的な)
: rtype : None
: return : なし
"""
col1.title("Welcome to Hit&Blow Game!16進数5桁の秘密の数字を当てよう!")
col1.subheader("対戦すると経験値がもらえるよ. 経験値は当てた回数や連勝数に応じて増えるぞ!")
col1.subheader("経験値が貯まるとレベルアップだ!いずれはキャラが進化するかも‥?")
if 'game_count' not in st.session_state:
st.session_state.game_count = 1
if 'exp' not in st.session_state:
st.session_state.exp = 0
if 'level' not in st.session_state:
st.session_state.level = 1
if 'win_in_a_row' not in st.session_state:
st.session_state.win_in_a_row = 1
if 'turn_count' not in st.session_state:
st.session_state.turn_count= 0
if 'history' not in st.session_state:
st.session_state.history= {}
name = col4.selectbox("キャラクターを選んでね",["ジャック","クリス","フローラ","ドロシー"])
st.session_state.chara_name = name
pic_url1 = "picture/"+name+"-1.jpg"
pic_url2 = "picture/"+name+"-2.jpg"
if st.session_state.level < 20:
image = Image.open(pic_url1)
col4.image(image)
else:
image = Image.open(pic_url2)
col4.image(image)
col6.subheader("{}の現在のレベル : {}".format(st.session_state.chara_name,st.session_state.level))
col6.write("対戦回数 : {}".format(st.session_state.game_count-1))
class Playgame_solo_manual:
"""16進数5桁のHit&Blow 自動一人対戦の数当てモード
:param int digits : 数の桁数
:param set Tuple_16 : 数に使う16進数の数字の集合
:param str ans : comの答え(自分が当てる数字)
:param List[dict] my_history : 自分が相手の数当をした時の履歴
:param str num : こちらが予想した相手の数字
:param int hit : 数字のhit数
:param int blow : 数字のblow数
:param int volume:音量(0~1で変更)
"""
def __init__(self,ans=None,num=None) -> None:
self.digits = 5
self.Tuple_16 = ("0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f")
self.hit = None
self.blow = None
self.volume = 0.3
if ans is not None:
self.ans = ans
else:
self.ans = self._define_answer()
if 'ans' not in st.session_state:
st.session_state.ans= self.ans
self.num = num
def _define_answer(self) -> str:
"""自分が当てる答えをつくる
: rtype : str
: return : ans
"""
ans_list = random.sample(self.Tuple_16, self.digits)
ans = "".join(ans_list)
return ans
def _play_song(self,num:int, title):
"""待機時間中音楽再生
:param int num:再生回数(-1で無限ループ,これを使って止めたいときにstopするのが良いかと)
:param int playtime:再生時間(基本-1で無限ループしてるので、使わない.デフォルト値Noneで良い)
: rtype : None
: return : なし
"""
pygame.mixer.init() # 初期設定
pygame.mixer.music.load(title) # 音楽ファイルの読み込み
pygame.mixer.music.set_volume(self.volume)
pygame.mixer.music.play(num) # 音楽の再生回数(1回)
def _music_stop(self) -> None:
"""再生中の音楽停止
: rtype : None
: return : なし
"""
pygame.mixer.music.stop() # 再生の終了
def _voice_play(self,num:int, title):
"""音楽再生中のキャラボイス再生用
: rtype : None
: return : なし
"""
pygame.mixer.init() # 初期設定
sound = pygame.mixer.Sound(title) # 音楽ファイルの読み込み
sound.set_volume(self.volume)
sound.play()
def _check_hit_blow(self,num,ans) -> None:
"""メインで使用
2つの引数を入力し, その2数のhit,blowを計算してself.hit, self.blowに格納
: rtype : None
: return : なし
"""
self.hit = 0
self.blow = 0
for i in range(self.digits):
if num[i] == ans[i]:
self.hit += 1
else:
if num[i] in ans:
self.blow += 1
def _play_game_manual(self) -> None:
""" 手動一人対戦の数当てゲーム
対戦中の表示を出してから部屋を作成して答えをポストして対戦開始, 終わったら対戦終了と結果の表示
: rtype : None
: return : なし
"""
#self._music_stop()
place = col6.empty()
place.write("対戦中・・・")
if st.session_state.turn_count == 0:
self._music_stop()
time.sleep(3)
self._play_song(num = 1,title = "bgm/game_start.wav")
self._voice_play(num = 1, title ='voice/'+st.session_state.chara_name+'/game_start.wav')
time.sleep(3)
self._play_song(num = -1,title = "bgm/Battle.wav")
print("aaaaa")
self._check_hit_blow(self.num,st.session_state.ans)
st.session_state.history[self.num] = [str(self.hit)+"hit", str(self.blow)+"blow"]
st.session_state.turn_count += 1
print("!! {} Hit, {} Blow !!".format(self.hit,self.blow))
col6.subheader("{} Hit, {} Blowだ!".format(self.hit,self.blow))
col6.write("現在のターン数,{}".format(st.session_state.turn_count))
col6.write("今までの入力履歴,{}".format(st.session_state.history))
if self.hit == self.digits:
print("!! 正解です !!")
place.write("対戦終了!")
self._show_result_vscode()
self._show_result_streamlit()
def _show_result_vscode(self) -> None:
"""対戦終了後, お互いの結果を表示(vscode上に表示する分)
: rtype : None
: return : なし
"""
print("------------------------")
print("show history")
print(st.session_state.history)
print("------------------------")
print("正解は{}です. おめでとうございます! {}回で正解しました.".format(st.session_state.ans,st.session_state.turn_count))
print("------------------------")
def _get_information(self) -> str:
"""対戦終了後,web画面に表示する内容を計算
勝敗,連勝に応じて獲得経験値を求め, 経験値に加える.レベルや次のレベルまでの必要経験値も求める
進化やレベルアップの判定も行う
: rtype : str
: return : 獲得経験値と次のレベルまでの必要経験値
"""
# st.session_state.win_in_a_row += 1
level_up = False
evolution = False
new_exp = round(3000*(1+(st.session_state.win_in_a_row-1)/4)/st.session_state.turn_count)
st.session_state.exp += new_exp
for i in range(200):
if i**3/3 <= st.session_state.exp and st.session_state.exp < (i+1)**3/3:
remaining_exp = round((i+1)**3/3 - st.session_state.exp)
new_level = i
if new_level != st.session_state.level:
level_up = True
if new_level == 20:
evolution = True
st.session_state.level = new_level
break
return new_exp,remaining_exp,level_up,evolution
def _show_result_streamlit(self) -> None:
"""対戦終了後, お互いの結果を表示(web画面上に表示する分)
勝敗、連勝数に応じて表示を変える, 経験値やレベル, 対戦回数も表示
進化とレベルアップの時は追加エフェクト
: rtype : None
: return : なし
"""
new_exp,remaining_exp,level_up,evolution = self._get_information()
self._music_stop()
self._play_song(num = -1, title = "bgm/winner.wav")
self._voice_play(num = 1, title ='voice/'+st.session_state.chara_name+'/winner.wav')
col6.subheader("")
col6.subheader("勝利だ,おめでとう!")
col6.subheader("正解は‥【{}】{}回で正解できた!".format(self.num,st.session_state.turn_count))
col6.subheader("")
# if st.session_state.win_in_a_row >= 2:
# col6.subheader("すごいぞ,{}連勝だ!その調子!".format(st.session_state.win_in_a_row))
time.sleep(3)
st.balloons()
col6.write("{}は{}経験値を得た!".format(st.session_state.chara_name,new_exp))
col6.write("")
time.sleep(13)
if level_up:
if evolution:
col4.subheader("おや?{}の様子が...".format(st.session_state.chara_name))
image_light = Image.open('picture/evolution_light.png')
col4.image(image_light)
self._play_song(num = 1,title = "bgm/evolution_light.mp3")
time.sleep(3)
col4.subheader("やったね, 進化した!")
pic_url2 = "picture/"+st.session_state.chara_name+"-2.jpg"
image = Image.open(pic_url2)
col4.image(image)
img = Image.open('picture/evolution.gif')
col6.image(img)
self._play_song(num = 1,title = "bgm/evolution.mp3")
time.sleep(3)
else:
col6.subheader("レベルアップだ!")
self._music_stop()
self._play_song(num = 1,title = "bgm/level_up.wav")
img = Image.open('picture/level-up.gif')
time.sleep(1)
col6.image(img)
col6.write("次のレベルまでの経験値:{}".format(remaining_exp))
col6.write("今まで得た合計経験値:{}".format(st.session_state.exp))
col6.subheader("")
col6.subheader("{}の現在のレベル : {}".format(st.session_state.chara_name,st.session_state.level))
col6.write("対戦回数 : {}".format(st.session_state.game_count))
col6.subheader("また新たな秘密の数字が現れた!当てに行こう!")
st.session_state.game_count += 1
st.session_state.turn_count = 0
st.session_state.history = {}
st.session_state.ans = self._define_answer()
def get_parser() -> argparse.Namespace:
"""コマンドライン引数を解析したものを持つ
:rtype : argparse.Namespace
:return : コマンド値
"""
parser = argparse.ArgumentParser(description="Hit&Blow, 数当てゲーム")
parser.add_argument("--ans",default=None)
args = parser.parse_args()
return args
def main() -> None:
"""Hit&Blowのメイン
"""
args = get_parser()
ans= args.ans
num = col6.text_input("予想する数字を入力してね")
print(button_num)
initialize_streamlit()
if args.ans is not None:
runner = Playgame_solo_manual(ans=ans,num=num)
else:
runner = Playgame_solo_manual(num=num)
if st.session_state.turn_count == 0:
runner._play_song(num = -1,title = 'bgm/waiting.wav')
runner._voice_play(num = 1, title ='voice/'+st.session_state.chara_name+'/waiting.wav')
if col6.button("クリックすると数字をチェックするよ!"):
runner._play_game_manual()
if __name__ == "__main__":
main()
| 2.234375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.