hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f5755adc834fa964d8b57abac91fbc6499d9935
| 4,608 |
py
|
Python
|
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | 1 |
2021-01-23T21:42:10.000Z
|
2021-01-23T21:42:10.000Z
|
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
menucard/migrations/0001_initial.py
|
baniasbaabe/happy-qr
|
bf44ac19306ea6405cc7c9a100e6f83afca125b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-12-27 10:36
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
('crm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vorspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Snacks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Nachspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Hauptspeise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='Besucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vorname', models.CharField(max_length=45)),
('nachname', models.CharField(max_length=45)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('telefon', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, null=True, region=None)),
('strasse', models.CharField(max_length=45)),
('hausnummer', models.CharField(max_length=5)),
('plz', models.CharField(max_length=45)),
('stadt', models.CharField(max_length=45)),
('besucht_am', models.DateTimeField(auto_now_add=True, null=True)),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='AlkoholhaltigeDrinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('centiliter', models.FloatField()),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
migrations.CreateModel(
name='AlkoholfreieDrinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=55)),
('liter', models.FloatField()),
('beschreibung', models.TextField(blank=True, default='')),
('preis', models.FloatField()),
('kundeId', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='crm.kunde')),
],
),
]
| 48 | 128 | 0.567491 | 4,445 | 0.964627 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.136936 |
1f5774372518e14045e4add17d37c16fbf360cfe
| 10,289 |
py
|
Python
|
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import datetime
from collections import defaultdict
import numpy as np
from scipy import sparse
from episim.ontology import Ontology
from episim.plot.modeling import System, Accumulator
from .data import State
class EulerSimulator(object):
"""
Explicit Euler method
"""
def __init__(self, *dx_dt, step_size=1.):
self.step_size = step_size
self.dx_dt = dx_dt
self.N = len(dx_dt)
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
h = self.step_size
x = np.array(x)
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
for i, dxi_dt in enumerate(self.dx_dt):
dx[i] = dxi_dt(*x)
x = x + h * dx
yield x
class LinNonLinEulerSimulator(object):
"""
P : p
"""
def __init__(self, dx_dt_lin, dx_dt_dict, step_size=1.):
if hasattr(M, "tocsr"):
dx_dt_lin = dx_dt_lin.tocsr()
self.dx_dt_matrix = dx_dt_lin
self.dx_dt_dict = dx_dt_dict
self.N = len(dx_dt_lin)
self.step_size = step_size
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
x = np.array(x)
h = self.step_size
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
dx *= 0
# Linear part
dx[:] = self.dx_dt_matrix.dot(x)
# Non linear
for i, f in self.dx_dt_dict.items():
dx[i] += f(*x)
x = x + h * dx
yield x
class F(object):
def __init__(self, callable, label):
self.label = label
self.callable = callable
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
def __str__(self):
return self.label
class Dynamic(object):
@classmethod
def from_nodes(cls, *node_and_time_deriv):
nodes = []
dx_dt = []
for node, dxi_dt in node_and_time_deriv:
nodes.append(node)
dx_dt.append(dxi_dt)
sorted_nodes = [x for x in nodes]
sorted_nodes.sort(key=lambda n: n.index)
names = [x.name for x in sorted_nodes]
dynamic = cls(*names)
for name, dxi_dt in zip(names, dx_dt):
dynamic[name] = dxi_dt
return dynamic
def __init__(self, *variable_names):
self.variable_names = variable_names
self.var2idx = {s: i for i, s in enumerate(variable_names)}
self.dx_dt = [F(lambda *x: 0, "0") for _ in range(len(variable_names))]
def _idx(self, key):
try:
idx = int(key)
except (TypeError, ValueError):
idx = self.var2idx[key]
return idx
def __setitem__(self, key, value):
self.dx_dt[self._idx(key)] = value
def __getitem__(self, item):
return self.dx_dt[self._idx(item)]
def long_repr(self):
s = ""
for idx, name in enumerate(self.variable_names):
s += "d{}/dt = {}{}".format(name, self.dx_dt[idx], os.linesep)
return s
def __iter__(self):
return iter(self.dx_dt)
class Model(object):
@classmethod
def compute_parameters(cls, virus, population):
return tuple()
@classmethod
def factory(cls, initial_state, virus, population, resolution=0.1):
t = cls.compute_parameters(virus, population)
model = cls(*t, resolution=resolution)
return model.set_state(initial_state)
def __init__(self, resolution=0.1):
self.current_state = None
self.resolution = resolution
self.ontology = Ontology.default_ontology()
def _compute_reproduction_number(self, n_susceptible, n_total):
return 0
def set_state(self, state):
queriable = self.ontology(state)
R = self._compute_reproduction_number(queriable.susceptible,
queriable.population)
state.reproduction_number = R
if state.n_infection is None:
state.n_infection = queriable.infected
self.current_state = state
return self
def _state2variables(self, state):
return tuple()
def _variables2state(self, date, *values):
return State(date)
def run(self, n_steps=1):
variables = self._state2variables(self.current_state)
date = self.current_state.date
plus_one = datetime.timedelta(days=1)
for variables in self.simulator(*variables, dt=n_steps):
date = date + plus_one
state = self._variables2state(date, *variables)
self.set_state(state)
yield state
class SEIRS(Model):
"""
beta: float
transmission coefficient: average number of contact per person per time,
multiplied by the probability of disease transmission at a contact
between a susceptible person and an infectious person
gamma: float
1/D, where D is the average time infectious time
ksi:
re-susceptibility rate (depends on the fraction of alive, recovered
people will not develop a lasting immunity and depends on the time
before the immunity drops)
"""
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
kappa = 1. / virus.exposed_duration
gamma = 1. / virus.infectious_duration
ksi = virus.immunity_drop_rate
return beta, kappa, gamma, ksi
def __init__(self, beta=0, kappa=0, gamma=0, ksi=0, resolution=0.1):
if resolution is None:
resolution = EulerSimulator
super().__init__(resolution=resolution)
self.beta = beta
self.kappa = kappa
self.gamma = gamma
self.ksi = ksi
self.current_state = None
S, E, I, R = System.new("S", "E", "I", "R")
N = S + E + I + R
N.override_name("N")
S2E = self.beta * S * I / N
S2E_acc = Accumulator(S2E, self.resolution)
E2I = self.kappa * E
I2R = self.gamma * I
R2S = self.ksi * R
dS_dt = -S2E + R2S
dE_dt = S2E_acc - E2I
dI_dt = E2I - I2R
dR_dt = I2R - R2S
self.dynamic = Dynamic.from_nodes((S, dS_dt), (E, dE_dt),
(I, dI_dt), (R, dR_dt))
self.acc_n_infect = S2E_acc
self.simulator = EulerSimulator(*iter(self.dynamic),
step_size=resolution)
def __repr__(self):
s = "{}(beta={}, kappa={}, gamma={}, ksi={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.kappa),
repr(self.gamma),
repr(self.ksi),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, kappa={:.2e}, gamma={:.2e}, ksi={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.kappa,
self.gamma, self.ksi)
# def __str__(self):
# return self.dynamic.long_repr()
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
E = zero(state.exposed)
I = zero(state.infectious)
R = zero(state.recovered)
return S, E, I, R
def _variables2state(self, date, *values):
S, E, I, R = values
n_infection = self.current_state.n_infection
n_infection += self.acc_n_infect.value
self.acc_n_infect.reset()
state = State(date)
state.susceptible = S
state.exposed = E
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
class SIR(Model):
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
gamma = 1. / (virus.exposed_duration + virus.infectious_duration)
return beta, gamma
def __init__(self, beta, gamma, resolution=0.1):
super().__init__(resolution)
self.beta = beta
self.gamma = gamma
S, I, R = System.new("S", "I", "R")
N = S + I + R
N.override_name("N")
S2I = self.beta * S * I / N
I2R = self.gamma * I
dS_dt = -S2I
dI_dt = S2I - I2R
dR_dt = I2R
self.dynamic = Dynamic.from_nodes((S, dS_dt), (I, dI_dt), (R, dR_dt))
self.simulator = EulerSimulator(iter(self.dynamic), resolution)
def __repr__(self):
s = "{}(beta={}, gamma={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.gamma),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, gamma={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.gamma)
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
I = zero(state.infectious)
R = zero(state.recovered)
return S, I, R
def _variables2state(self, date, *values):
S, I, R = values
n_infection = self.current_state.n_infection
n_infection += (self.current_state.susceptible - S)
state = State(date)
state.susceptible = S
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
| 26.180662 | 80 | 0.57197 | 10,032 | 0.975022 | 1,291 | 0.125474 | 1,343 | 0.130528 | 0 | 0 | 930 | 0.090388 |
1f57dbdd7653a2a24621940b5ab48570df0a2af1
| 4,620 |
py
|
Python
|
pytorch/camera_intrinsics.py
|
abdur4373/ROS_depth_pred
|
63ed4d97df8b49a43aad53c4c6bf01441f05153d
|
[
"MIT"
] | 1 |
2019-06-29T07:48:31.000Z
|
2019-06-29T07:48:31.000Z
|
pytorch/camera_intrinsics.py
|
abdur4373/ROS_depth_pred
|
63ed4d97df8b49a43aad53c4c6bf01441f05153d
|
[
"MIT"
] | null | null | null |
pytorch/camera_intrinsics.py
|
abdur4373/ROS_depth_pred
|
63ed4d97df8b49a43aad53c4c6bf01441f05153d
|
[
"MIT"
] | null | null | null |
import numpy as np
from sensor_msgs.msg import CameraInfo, RegionOfInterest
from std_msgs.msg import Header
class CameraIntrinsics(object):
"""A set of intrinsic parameters for a camera. This class is used to project
and deproject points.
"""
def __init__(self, frame, fx, fy=None, cx=0.0, cy=0.0, skew=0.0, height=None, width=None):
"""Initialize a CameraIntrinsics model.
Parameters
----------
frame : :obj:`str`
The frame of reference for the point cloud.
fx : float
The x-axis focal length of the camera in pixels.
fy : float
The y-axis focal length of the camera in pixels.
cx : float
The x-axis optical center of the camera in pixels.
cy : float
The y-axis optical center of the camera in pixels.
skew : float
The skew of the camera in pixels.
height : float
The height of the camera image in pixels.
width : float
The width of the camera image in pixels
"""
self._frame = frame
self._fx = float(fx)
self._fy = float(fy)
self._cx = float(cx)
self._cy = float(cy)
self._skew = float(skew)
self._height = int(height)
self._width = int(width)
# set focal, camera center automatically if under specified
if fy is None:
self._fy = fx
# set camera projection matrix
self._K = np.array([[self._fx, self._skew, self._cx],
[0, self._fy, self._cy],
[0, 0, 1]])
@property
def frame(self):
""":obj:`str` : The frame of reference for the point cloud.
"""
return self._frame
@property
def fx(self):
"""float : The x-axis focal length of the camera in pixels.
"""
return self._fx
@property
def fy(self):
"""float : The y-axis focal length of the camera in pixels.
"""
return self._fy
@property
def cx(self):
"""float : The x-axis optical center of the camera in pixels.
"""
return self._cx
@cx.setter
def cx(self, z):
self._cx = z
self._K = np.array([[self._fx, self._skew, self._cx],
[0, self._fy, self._cy],
[0, 0, 1]])
@property
def cy(self):
"""float : The y-axis optical center of the camera in pixels.
"""
return self._cy
@cy.setter
def cy(self, z):
self._cy = z
self._K = np.array([[self._fx, self._skew, self._cx],
[0, self._fy, self._cy],
[0, 0, 1]])
@property
def skew(self):
"""float : The skew of the camera in pixels.
"""
return self._skew
@property
def height(self):
"""float : The height of the camera image in pixels.
"""
return self._height
@property
def width(self):
"""float : The width of the camera image in pixels
"""
return self._width
@property
def proj_matrix(self):
""":obj:`numpy.ndarray` : The 3x3 projection matrix for this camera.
"""
return self._K
@property
def K(self):
""":obj:`numpy.ndarray` : The 3x3 projection matrix for this camera.
"""
return self._K
@property
def vec(self):
""":obj:`numpy.ndarray` : Vector representation for this camera.
"""
return np.r_[self.fx, self.fy, self.cx, self.cy, self.skew, self.height, self.width]
@property
def rosmsg(self):
""":obj:`sensor_msgs.CamerInfo` : Returns ROS CamerInfo msg
"""
msg_header = Header()
msg_header.frame_id = self._frame
msg_roi = RegionOfInterest()
msg_roi.x_offset = 0
msg_roi.y_offset = 0
msg_roi.height = 0
msg_roi.width = 0
msg_roi.do_rectify = 0
msg = CameraInfo()
msg.header = msg_header
msg.height = self._height
msg.width = self._width
msg.distortion_model = 'plumb_bob'
msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
msg.K = [self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0]
msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
msg.P = [self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, 0.0, 0.0, 1.0, 0.0]
msg.binning_x = 0
msg.binning_y = 0
msg.roi = msg_roi
print msg
return msg
CameraIntrinsics()
| 28.695652 | 96 | 0.534632 | 4,488 | 0.971429 | 0 | 0 | 2,881 | 0.623593 | 0 | 0 | 1,772 | 0.38355 |
1f581484e57d7f06ab12a83feeb46bea44a7e7c3
| 327 |
py
|
Python
|
app/logger.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 6 |
2019-01-01T23:38:12.000Z
|
2021-07-27T03:43:11.000Z
|
app/logger.py
|
d3vzer0/kickstart-flask-vuejs
|
562a829d3f3b87488035719025f2d29b4fe33a89
|
[
"MIT"
] | 1 |
2020-08-02T00:21:41.000Z
|
2020-08-02T00:21:41.000Z
|
app/logger.py
|
d3vzer0/kickstart-flask-vuejs
|
562a829d3f3b87488035719025f2d29b4fe33a89
|
[
"MIT"
] | 1 |
2021-07-27T03:43:24.000Z
|
2021-07-27T03:43:24.000Z
|
import logging
from logging.handlers import SysLogHandler
# Logging environment that can be used by the application to output syslog
logging_object = logging.getLogger(__name__)
logging_object.setLevel(logging.INFO)
syslog_handler = logging.handlers.SysLogHandler(address='/dev/log')
logging_object.addHandler(syslog_handler)
| 36.333333 | 74 | 0.844037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.256881 |
1f5af941019a09b58bc8c7a46b832a62890985af
| 2,446 |
py
|
Python
|
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime
import sqlalchemy as sa
import sqlalchemy.orm as so
from .base import Base, Session
__all__ = ["User", "Message"]
logger = logging.getLogger(__name__)
class User(Base):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
nickname = sa.Column(sa.String, unique=True)
first_name = sa.Column(sa.String, nullable=True)
last_name = sa.Column(sa.String, nullable=True)
utc_created_at = sa.Column(sa.DateTime, default=datetime.utcnow)
messages = so.relationship("Message", lazy='dynamic')
query = Session.query_property()
def __init__(self, nickname, first_name=None, last_name=None):
self.nickname = nickname
self.first_name = first_name
self.last_name = last_name
def __repr__(self):
return "<User({s.id!r}, {s.nickname!r})>".format(s=self)
def __str__(self):
full_name = ""
if self.first_name:
full_name += self.first_name
if self.last_name:
if full_name:
full_name += " "
full_name += self.last_name
return full_name or self.nickname
@classmethod
def get_or_create(cls, nickname, **kwargs):
user = cls.query.filter(cls.nickname == nickname).one_or_none()
if user is None:
user = cls(nickname, **kwargs)
Session.add(user)
Session.flush()
logger.info("Created %r", user)
else:
logger.debug("Got %r", user)
return user
def create_message(self, text):
return Message.create(self.id, str(text))
class Message(Base):
__tablename__ = "messages"
id = sa.Column(sa.Integer, primary_key=True)
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id, ondelete="CASCADE"), nullable=False)
text = sa.Column(sa.String, default=str)
utc_created_at = sa.Column(sa.DateTime, default=datetime.utcnow)
query = Session.query_property()
def __init__(self, user_id, text):
self.user_id = user_id
self.text = text
def __repr__(self):
return "<Message({s.id!r}, {s.user_id!r}, {s.text!r})>".format(s=self)
def __str__(self):
return self.text
@classmethod
def create(cls, user_id, text):
message = cls(user_id, text)
Session.add(message)
Session.flush()
logger.info("Created %r", message)
return message
| 27.795455 | 95 | 0.629191 | 2,240 | 0.915781 | 0 | 0 | 581 | 0.237531 | 0 | 0 | 178 | 0.072772 |
1f5bf93e6d736ffce8e14bcf71e0ff664aca6f6a
| 553 |
py
|
Python
|
tests/private/test_uber_string_formatter.py
|
odedlaz-oss/uberlogs
|
09658f6ee98b4018c1c3620f56973fcdadb260d5
|
[
"MIT"
] | null | null | null |
tests/private/test_uber_string_formatter.py
|
odedlaz-oss/uberlogs
|
09658f6ee98b4018c1c3620f56973fcdadb260d5
|
[
"MIT"
] | null | null | null |
tests/private/test_uber_string_formatter.py
|
odedlaz-oss/uberlogs
|
09658f6ee98b4018c1c3620f56973fcdadb260d5
|
[
"MIT"
] | null | null | null |
import six
from unittest import TestCase
from uberlogs.private import UberStringFormatter
class UberStringFormatterTests(TestCase):
def setUp(self):
self.formatter = UberStringFormatter()
self.invalid_format = "{[blabla]"
def test_raise_on_invalid_format_when_not_silent(self):
with self.assertRaises(Exception):
list(self.formatter.parse(self.invalid_format, silent=False))
def test_no_raise_on_invalid_format_when_silent(self):
list(self.formatter.parse(self.invalid_format, silent=True))
| 29.105263 | 73 | 0.750452 | 459 | 0.830018 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.019892 |
1f5c32e422bbc6af249f8c6cdbc7e36215c76758
| 371 |
py
|
Python
|
testapp/models.py
|
andrewyoung1991/django-redis-pubsub
|
6ec9467528919a20bc9db6ebe94d5929ddd028a6
|
[
"BSD-3-Clause"
] | 21 |
2016-02-11T06:04:48.000Z
|
2021-12-27T07:02:28.000Z
|
testapp/models.py
|
kashyap2108/django-redis-pubsub
|
6ec9467528919a20bc9db6ebe94d5929ddd028a6
|
[
"BSD-3-Clause"
] | 3 |
2020-05-17T13:53:50.000Z
|
2021-06-10T20:38:34.000Z
|
testapp/models.py
|
kashyap2108/django-redis-pubsub
|
6ec9467528919a20bc9db6ebe94d5929ddd028a6
|
[
"BSD-3-Clause"
] | 8 |
2016-02-05T20:17:43.000Z
|
2020-07-14T17:10:20.000Z
|
from django.conf import settings
from django.db import models
from redis_pubsub.models import PublishableModel
class Message(PublishableModel):
"""
"""
PUBLISH_ON_CREATE = True
PUBLISH_ON_UPDATE = True
from_user = models.ForeignKey(settings.AUTH_USER_MODEL)
to_user = models.ForeignKey(settings.AUTH_USER_MODEL)
body = models.TextField()
| 21.823529 | 59 | 0.752022 | 256 | 0.690027 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.02965 |
1f5d11bfac525af7eaa9a4069790c1ad9c1d4423
| 10,602 |
py
|
Python
|
src/phat/thold.py
|
rskene/phat
|
84a946e1e638642f36ce5fd81dc85aa89f7b66f0
|
[
"MIT"
] | 2 |
2021-07-23T11:34:21.000Z
|
2022-01-09T17:22:45.000Z
|
src/phat/thold.py
|
rjskene/phat
|
84a946e1e638642f36ce5fd81dc85aa89f7b66f0
|
[
"MIT"
] | 3 |
2022-01-18T09:27:16.000Z
|
2022-01-18T09:28:43.000Z
|
src/phat/thold.py
|
rskene/phat
|
84a946e1e638642f36ce5fd81dc85aa89f7b66f0
|
[
"MIT"
] | null | null | null |
from functools import wraps
from typing import Iterable
import numpy as np
import scipy.stats as scist
import matplotlib.pyplot as plt
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
from phat.utils import argsetter
base = importr('base')
utils = importr('utils')
utils.chooseCRANmirror(ind=1)
utils.install_packages('POT')
POT = importr('POT')
def fit_line_within(stacked, ival):
ivalmask = np.logical_and(stacked[:,0]>=ival[0], stacked[:,0]<=ival[1])
return (*scist.linregress(stacked[ivalmask])), ivalmask.sum()
def threshset(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
data = kwargs['data']
spacer = 45 if 'spacer' not in kwargs else kwargs['spacer']
if not hasattr(self, 'tholds') and 'tholds' not in kwargs:
step = np.quantile(data, .995)/spacer
tholds = np.arange(-.1, max(data), step=step)
self.tholds = tholds
elif 'tholds' not in kwargs:
tholds = self.tholds
else:
self.tholds = kwargs['tholds']
kwargs['tholds'] = tholds
return func(self, *args, **kwargs)
return wrapper
class Threshold:
def __init__(self, data):
self.data = data
@argsetter(['data'])
@threshset
def MRL(self, data:Iterable=None,
tholds:Iterable=None, alpha:float=.05,
fig=None, ax=None, show_plot:bool=True,
splits:Iterable=None, *args, **kwargs
):
is_excess = np.array([data > thold for thold in tholds])
excesses = np.array([data - thold for thold in tholds])
excesses = np.where(
is_excess,
excesses,
np.nan
)
self.mean_exc = np.nanmean(excesses, axis=1)
stds = np.nanstd(excesses, axis=1)
z_inverse = scist.norm.ppf(1-(alpha/2))
CI = z_inverse*stds/(len(excesses)**0.5)
if show_plot:
if fig is None or ax is None:
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(tholds, self.mean_exc)
ax.fill_between(tholds, self.mean_exc - CI, self.mean_exc + CI, alpha = 0.4)
ax.set_xlabel('u')
ax.set_ylabel('Mean Excesses')
ax.set_title('Mean Residual Life Plot')
if splits is not None:
self._MRL_regrs(splits, ax)
def _MRL_regrs(self, splits:Iterable, ax):
splits = np.array(splits)
stacked = np.vstack([self.tholds, self.mean_exc]).T
sgmts = np.vstack((splits[:-1], splits[1:])).T
for i in range(sgmts.shape[0]):
sgmt = sgmts[i]
b, a, r, p, stderr, n = fit_line_within(stacked, sgmt)
count = (self.data>sgmt[1]).sum()
y = b*sgmt + a
label = '[{:.4f},{:.4f}] N<{}; N>{}'.format(*sgmt, n, count) + r' $R^2: $' + f'{r**2:.0%}'
label += f' p-value: {p:.02f}'
ax.plot(sgmt, y, label=label)
plt.legend(loc='best')
@argsetter(['data'])
@threshset
def param_stable(self, data:Iterable=None,
tholds:Iterable=None, alpha:float=.05,
fig=None, axs=None,
*args, **kwargs
):
shape = []
scale = []
mod_scale = []
CI_shape = []
CI_mod_scale = []
z = scist.norm.ppf(1-(alpha/2))
for thold in tholds:
fit, _, _ = self.fit(data=data, thold=thold.item(), est='mle')
shape.append(fit[0][1])
CI_shape.append(fit[1][1]*z)
scale.append(fit[0][0])
mod_scale.append(fit[0][0] - (fit[0][1]*thold))
Var_mod_scale = (fit[3][0] - (thold*fit[3][2]) - thold*(fit[3][1] - (fit[3][3]*thold)))
CI_mod_scale.append((Var_mod_scale**0.5)*z)
#Plotting shape parameter against u vales
axs[0].errorbar(tholds, shape, yerr = CI_shape, fmt = 'o' )
axs[0].set_xlabel('u')
axs[0].set_ylabel('Shape')
axs[0].set_title('Shape Parameter Stability')
#Plotting modified scale parameter against u values
axs[1].errorbar(tholds, mod_scale, yerr = CI_mod_scale, fmt = 'o')
axs[1].set_xlabel('u')
axs[1].set_ylabel('Modified Scale')
axs[1].set_title('Modified Scale Parameter Stability')
@argsetter(['data'])
def fit(self, data:Iterable=None, thold:float=0, est:str='mle'):
rdata = np.sort(data)
data_over_thresh = rdata[rdata > thold]
data_exc= data_over_thresh - thold
rdata = FloatVector(rdata)
fit = POT.fitgpd(rdata, thold, est=est)
return fit, data_over_thresh, data_exc
@argsetter(['data'])
def qqplot(self,
data:Iterable=None, thold:float=0, est:str='mle', alpha:float=.05,
fig=None, ax=None
):
fit, over_thresh, _ = self.fit(data=data, thold=thold, est=est)
scale, shape = fit[0][0], fit[0][1]
p = []
n = len(data)
data = np.sort(data)
i_initial = np.searchsorted(data, thold)
k = i_initial - 1
p = (np.arange(i_initial, n) - .35) / n
p0 = (k - 0.35)/(n)
quantiles = thold + ((scale/shape)*(((1-((p-p0)/(1-p0)))**-shape) - 1))
n = len(over_thresh)
y = np.arange(1,n+1)/n
#Kolmogorov-Smirnov Test for getting the confidence interval
K = (-0.5*np.log(alpha/2))**0.5
M = (len(p)**2/(2*len(p)))**0.5
CI_qq_high = []
CI_qq_low = []
for prob in y:
F1 = prob - K/M
F2 = prob + K/M
CI_qq_low.append(thold + ((scale/shape)*(((1-((F1)/(1)))**-shape) - 1)))
CI_qq_high.append(thold + ((scale/shape)*(((1-((F2)/(1)))**-shape) - 1)))
a, b, r_value, p_value, std_err = scist.linregress(quantiles, over_thresh)
ax.scatter(quantiles, over_thresh)
x = np.linspace(0,1,101)*100
ax.plot(x, a*x + b, c='black', label='Regression')
ax.plot(over_thresh, CI_qq_low, linestyle='--', color='red', alpha = 0.5, lw = 0.8, label='Confidence Bands')
ax.plot(over_thresh, CI_qq_high, linestyle='--', color='red', alpha = 0.5, lw = 0.8)
ax.set_xlabel('Theoretical GPD Quantiles')
ax.set_ylabel('Sample Quantiles')
ax.legend()
ax.set_title('Q-Q Plot')
@argsetter(['data'])
def ppplot(self, data:Iterable=None, thold:float=0, est:str='mle', alpha:float=.05,
fig=None, ax=None
):
fit, over_thresh, _ = self.fit(data=data, thold=thold, est=est)
scale, shape = fit[0][0], fit[0][1]
n = len(over_thresh)
y = np.arange(1,n+1)/n
cdf_pp = scist.genpareto.cdf(over_thresh, shape, loc=thold, scale=scale)
#Getting Confidence Intervals using the Dvoretzky–Kiefer–Wolfowitz method
data = np.sort(data)
i_initial = np.searchsorted(data, thold)
F1 = []
F2 = []
for i in range(i_initial, len(data)):
e = (((np.log(2/alpha))/(2*len(over_thresh)))**0.5)
F1.append(y[i-i_initial] - e)
F2.append(y[i-i_initial] + e)
ax.scatter(y, cdf_pp)
a, b, r_value, p_value, std_err = scist.linregress(y, cdf_pp)
ax.plot(y, a*y + b, c='black', label='Regression')
ax.plot(y, F1, linestyle='--', color='red', alpha = 0.5, lw = 0.8, label = 'Confidence Bands')
ax.plot(y, F2, linestyle='--', color='red', alpha = 0.5, lw = 0.8)
ax.set_xlabel('Empirical Probability')
ax.set_ylabel('Theoritical Probability')
ax.legend()
ax.set_title('P-P Plot')
@argsetter(['data'])
def return_value(self, data:Iterable=None, thold:float=0, alpha:float=.05,
block_size:int=252, return_period:int=252*100, est:str='mle',
fig=None, ax=None
):
data = np.sort(data)
fit, over_thresh, _ = self.fit(data=data, thold=thold, est=est)
scale, shape = fit[0][0], fit[0][1]
#Computing the return value for a given return period with the confidence interval estimated by the Delta Method
m = return_period
Eu = len(over_thresh)/len(data)
x_m = thold + (scale/shape)*(((m*Eu)**shape) - 1)
#Solving the delta method
d = Eu*(1-Eu)/len(data)
e = fit[3][0]
f = fit[3][1]
g = fit[3][2]
h = fit[3][3]
a = (scale*(m**shape))*(Eu**(shape-1))
b = (shape**-1)*(((m*Eu)**shape) - 1)
c = (-scale*(shape**-2))*((m*Eu)**shape - 1) + (scale*(shape**-1))*((m*Eu)**shape)*np.log(m*Eu)
CI = (scist.norm.ppf(1-(alpha/2))*((((a**2)*d) + (b*((c*g) + (e*b))) + (c*((b*f) + (c*h))))**0.5))
ny = block_size
N_year = return_period/block_size
i_initial = np.searchsorted(data, thold)
p = np.arange(i_initial,len(data))/(len(data))
N = 1/(ny*(1 - p))
year_array = np.arange(min(N), N_year+0.1, 0.1)
#Algorithm to compute the return value and the confidence intervals for plotting
z_N = []
CI_z_N_high_year = []
CI_z_N_low_year = []
for year in year_array:
z_N.append(thold + (scale/shape)*(((year*ny*Eu)**shape) - 1))
a = (scale*((year*ny)**shape))*(Eu**(shape-1))
b = (shape**-1)*((((year*ny)*Eu)**shape) - 1)
c = (-scale*(shape**-2))*(((year*ny)*Eu)**shape - 1) + (scale*(shape**-1))*(((year*ny)*Eu)**shape)*np.log((year*ny)*Eu)
CIyear = (scist.norm.ppf(1-(alpha/2))*((((a**2)*d) + (b*((c*g) + (e*b))) + (c*((b*f) + (c*h))))**0.5))
CI_z_N_high_year.append(thold + (scale/shape)*(((year*ny*Eu)**shape) - 1) + CIyear)
CI_z_N_low_year.append(thold + (scale/shape)*(((year*ny*Eu)**shape) - 1) - CIyear)
#Plotting Return Value
ax.plot(year_array, CI_z_N_high_year, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Confidence Bands')
ax.plot(year_array, CI_z_N_low_year, linestyle='--', color='red', alpha = 0.8, lw = 0.9)
ax.plot(year_array, z_N, color = 'black', label = 'Theoretical Return Level')
ax.scatter(N, over_thresh, label = 'Empirical Return Level')
text = f'{N_year:.0f} Year Return Level: {x_m:.2f} \u00B1 {CI:.2f}'
ax.text(.6,.05,text, transform=ax.transAxes)
ax.set_xscale('log')
ax.set_xlabel('Return Period')
ax.set_title('Return Level Plot')
ax.legend()
| 36.940767 | 131 | 0.54518 | 9,409 | 0.887139 | 0 | 0 | 9,232 | 0.870451 | 0 | 0 | 1,256 | 0.118424 |
1f5e5337671f2aa26669d1f985e1feb6f9bb2487
| 3,075 |
py
|
Python
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 27 |
2017-11-27T05:01:05.000Z
|
2020-11-14T19:52:26.000Z
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 259 |
2017-11-23T00:43:26.000Z
|
2020-11-03T01:07:30.000Z
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 8 |
2018-10-29T04:39:29.000Z
|
2020-10-01T22:18:12.000Z
|
from flask_wtf import FlaskForm
from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, ValidationError
from wtforms.validators import Length, Required
from .. models import EventFrameTemplate
class CopyEventFrameTemplateForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
toElementTemplate = SelectField("To Element Template", validators = [Required()], coerce = int)
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplate = EventFrameTemplate.query.filter_by(ElementTemplateId = self.toElementTemplate.data, Name = field.data).first()
if eventFrameTemplate is not None:
# Trying to copy an eventFrameTemplate using a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
class EventFrameTemplateForm(FlaskForm):
parentEventFrameTemplateId = HiddenField()
name = StringField("Name", validators = [Required(), Length(1, 45)])
order = IntegerField("Order", validators = [Required()])
description = StringField("Description", validators = [Length(0, 255)])
eventFrameTemplateId = HiddenField()
elementTemplateId = HiddenField()
parentEventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
if self.elementTemplateId.data == "":
eventFrameTemplate = EventFrameTemplate.query.filter_by(Name = field.data,
ParentEventFrameTemplateId = self.parentEventFrameTemplateId.data).first()
else:
eventFrameTemplate = EventFrameTemplate.query.filter_by(ElementTemplateId = self.elementTemplateId.data, Name = field.data).first()
if eventFrameTemplate:
if self.eventFrameTemplateId.data == "":
# Trying to add a new eventFrameTemplate using a name that already exists.
validationError = True
else:
if int(self.eventFrameTemplateId.data) != eventFrameTemplate.EventFrameTemplateId:
# Trying to change the name of an eventFrameTemplate to a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
def validate_order(self, field):
validationError = False
eventFrameTemplate = EventFrameTemplate.query.filter_by(Order = field.data, ParentEventFrameTemplateId = self.parentEventFrameTemplateId.data).first()
if eventFrameTemplate:
if self.eventFrameTemplateId.data == "":
# Trying to add a new eventFrameTemplate using an order that already exists.
validationError = True
else:
if int(self.eventFrameTemplateId.data) != eventFrameTemplate.EventFrameTemplateId:
# Trying to change the order of an eventFrameTemplate to an order that already exists.
validationError = True
if validationError:
raise ValidationError('The order "{}" already exists.'.format(field.data))
| 45.220588 | 152 | 0.766504 | 2,848 | 0.926179 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.185041 |
1f605b59e4b42a83b06301dd95460d66a85a140f
| 3,751 |
py
|
Python
|
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from stegano import lsb
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/home/pi/Destktop/StegyCat/pics'
app = Flask(__name__, template_folder='templates')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def stego_in(ct, mac, nonce, picture):
secret_message = {'msg': ct, 'nc': nonce, 'mc': mac}
secret_message = str(secret_message)
secret_image = lsb.hide('./pics/cat.png', secret_message)
secret_image.save('./secretpics/secret_image.png')
#print(var)
def stego_out(picture):
hidden_ct = lsb.reveal(picture)
#Parse here
dt = eval(hidden_ct)
message = dt['msg']
nonce = dt['nc']
mac = dt['mc']
return message, nonce, mac
def decrypt(message, nonce, mac):
f = open("key.txt", "r")
string = f.read()
dict = eval(string)
key = dict['key']
#ctlength = len(hidden_ct)
#nonce = hidden_ct[ctlength:]
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=backend)
decryptor = cipher.decryptor()
msg = decryptor.update(message) + decryptor.finalize()
print(msg)
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(msg)
cmpmac = digest.finalize()
if mac != cmpmac:
return 0
else:
return msg
def encrypt(msg, email):
backend = default_backend()
# Salts should be randomly generated
salt = os.urandom(16)
nonce = os.urandom(16)
# derive
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=backend
)
key = kdf.derive(email.encode('UTF-8'))
dict = {'key': key}
f = open("key.txt" ,"w")
f.write(str(dict))
# verify
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=backend
)
#kdf.verify(b"[email protected]", key)
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(msg.encode('UTF-8')) + encryptor.finalize()
#newct = ct + nonce
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(msg.encode('UTF-8'))
mac = digest.finalize()
return ct, mac, nonce
@app.route('/')
def index():
return render_template('create.html')
@app.route('/get-info', methods=['POST', 'GET'])
def get_info():
if request.method == 'POST':
result = request.form
picture = result.getlist('file')
msg = result.get('message')
email = result.get('email')
#write key(email) to file
msg, mac, nonce = encrypt(msg, email)
stego_in(msg, mac, nonce, picture)
#redirect(url_for('encrypt', msg=msg, email=email))
return render_template("decrypt.html")
@app.route('/get_decrypt', methods=['POST', 'GET'])
def get_decrypt():
if request.method == 'POST':
# picture = request.form['file']
# filename = secure_filename(file.filename)
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
message, nonce, mac = stego_out('./secretpics/secret_image.png')
#get key from file
pt = decrypt(message, nonce, mac)
return render_template("display.html", message = pt)
#read key from file
if __name__ == '__main__':
app.run(debug=True)
| 27.379562 | 76 | 0.643295 | 0 | 0 | 0 | 0 | 1,042 | 0.277793 | 0 | 0 | 789 | 0.210344 |
1f61bacc0966d711145c05f1a6526934fd3ce1d0
| 1,585 |
py
|
Python
|
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
jogador = dict()
lista_de_jogadores = []
lista = []
print("_"*38)
contador = 0
while True:
jogador["nome"] = str(input("Informe o nome do jogador: ")).strip()
jogador["partidas"] = int(input("Informe quantas partidas foram jogadas: "))
jogador["gols marcados"] = []
for c in range(0, jogador["partidas"]):
jogador["gols marcados"].append((int(input("Partida {}: ".format(c)))))
lista.append(jogador.copy())
lista_de_jogadores.append(lista[:])
lista.clear()
print("=-" * 20)
print("Ultimo jogador cadastrado:")
for k, v in jogador.items():
print(f"{k}: {v}")
jogador.clear()
print("=-"*20), print()
print(lista_de_jogadores), print()
print("=-" * 20)
continuar = str(input("Deseja continuar? [S/N]")).strip().upper()
while continuar not in "S N NAO SIM NÃO":
continuar = str(input("Informe um valor valido[S/N]: ")).upper().strip()
if continuar in "NAO N NÃO":
break
for cod, j in enumerate(lista_de_jogadores):
print("{} ---- {}".format(cod, j))
while True:
contador = int(input("Mostrar dados de qual jogador[999 PARA PARAR]? "))
if contador == 999:
break
print(f"-- LEVANTAMENTO DO JOGADOR {lista_de_jogadores[contador][0]['nome']}:")
while contador > (len(lista_de_jogadores)-1) or contador < 0:
contador = int(input("Informe um valor válido: "))
for p, g in enumerate(lista_de_jogadores[contador][0]['gols marcados']):
print("No jogo {:>3} fez {:>3} gols".format(p, g))
# print(lista_de_jogadores[contador][0]['gols marcados'])
| 40.641026 | 83 | 0.620189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.344458 |
1f6205c1effd40848344000bccb696a977da03f4
| 954 |
py
|
Python
|
app/stock/apps.py
|
shift37/asx_gym
|
dd3d8dafae4f22ab9c9027bf362013255dbc6c36
|
[
"RSA-MD"
] | null | null | null |
app/stock/apps.py
|
shift37/asx_gym
|
dd3d8dafae4f22ab9c9027bf362013255dbc6c36
|
[
"RSA-MD"
] | 3 |
2020-06-06T08:27:08.000Z
|
2020-06-13T09:51:26.000Z
|
app/stock/apps.py
|
asxgym/asx_gym
|
8b7745820c0d4cd59281acf7c003ec1f1938005a
|
[
"RSA-MD"
] | null | null | null |
from django.apps import AppConfig
import logging
logger = logging.getLogger(__name__)
class StockConfig(AppConfig):
name = 'stock'
def ready(self):
from background_task.models import Task
task_update_daily_stock_price = Task.objects.filter(
task_name='stock.tasks.cron_update_daily_stock_price').first()
if not task_update_daily_stock_price:
from stock.tasks import cron_update_daily_stock_price
cron_update_daily_stock_price(repeat=Task.DAILY)
logger.info("start cron_update_daily_stock_price task")
task_update_live_stock_price = Task.objects.filter(
task_name='stock.tasks.cron_update_live_stock_price').first()
if not task_update_live_stock_price:
from stock.tasks import cron_update_live_stock_price
cron_update_live_stock_price(repeat=60 * 20)
logger.info("start cron_update_live_stock_price task")
| 38.16 | 74 | 0.72327 | 864 | 0.90566 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.183438 |
1f634bdb1c7a7c3154dda573b13beb16dfe4e289
| 8,568 |
py
|
Python
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | 2 |
2021-09-16T08:38:10.000Z
|
2021-09-16T10:46:53.000Z
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | 6 |
2021-09-20T10:56:21.000Z
|
2022-01-05T08:25:17.000Z
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | null | null | null |
import threading
from io import BytesIO
from django.db import models
import fast
import time
import numpy as np
from PIL import Image
from django.conf import settings
from slide.timing import Timer
from tag.models import Tag
class Slide(models.Model):
"""
Model for whole slide image
"""
name = models.CharField(max_length=255)
path = models.CharField(max_length=1024)
description = models.TextField()
pathology = models.BooleanField(default=False, help_text='Does the slide show pathology or not')
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
def load_image(self):
if not hasattr(self, '_image'):
self.timers = {
'import': Timer('Importing WSI'),
'getPatchImage': Timer('getPatchImage function'),
'sharpening': Timer('Tile sharpening'),
'conversion': Timer('Tile FAST->PIL conversion'),
'resize': Timer('Tile resize'),
'jpeg': Timer('JPEG Conversion'),
}
self.timers['import'].start()
importer = fast.WholeSlideImageImporter.create(self.path)
try:
image = importer.runAndGetOutputData()
except:
raise RuntimeError('Failed to load slide image pyramid from ' + self.path)
self._image = image
self.timers['import'].stop()
# Count how many OSD levels we need: OSD requires that every level is downsampled by a factor of 2
# TODO This assumes that every level size of WSI in FAST is a multiple of 2
current_width = image.getFullWidth()
current_height = image.getFullHeight()
levels = image.getNrOfLevels()
smallest_width = image.getLevelWidth(levels-1)
smallest_height = image.getLevelHeight(levels-1)
osd_level = 0
tile_width = 256
tile_height = 256
if self.path.endswith('.vsi'): # TODO Hack for now
tile_width = image.getLevelTileWidth(0)
tile_height = image.getLevelTileHeight(0)
osd_tile_width = {0: tile_width}
osd_tile_height = {0: tile_height}
osd_to_fast_level_map = {0: 0}
print('Smallest width', smallest_width)
while abs(current_width - smallest_width/2) > 1:
print(osd_level, current_width, current_height)
current_width = int(current_width/2)
current_height = int(current_height/2)
if self.path.endswith('.vsi'): # TODO Hack for now
current_width += current_width % tile_width
current_height += current_height % tile_height
osd_level += 1
# If current_width is closer to previous FAST level width, than the next FAST level width, then use that.
if osd_to_fast_level_map[osd_level-1] < levels-1 and abs(current_width - image.getLevelWidth(osd_to_fast_level_map[osd_level-1]+1)) < 1:
osd_tile_width[osd_level] = tile_width
osd_tile_height[osd_level] = tile_height
osd_to_fast_level_map[osd_level] = osd_to_fast_level_map[osd_level - 1] + 1
print('Map to next: ', osd_to_fast_level_map[osd_level])
else:
osd_tile_width[osd_level] = osd_tile_width[osd_level-1]*2
osd_tile_height[osd_level] = osd_tile_height[osd_level-1]*2
osd_to_fast_level_map[osd_level] = osd_to_fast_level_map[osd_level - 1]
print('Map to previous', osd_to_fast_level_map[osd_level])
if current_width < 1024:
break
print('Total OSD levels', osd_level+1)
self._fast_levels = image.getNrOfLevels()
self._osd_levels = osd_level+1
self._width = image.getFullWidth()
self._height = image.getFullHeight()
self._tile_width = tile_width
self._tile_height = tile_height
self._osd_tile_width = osd_tile_width
self._osd_tile_height = osd_tile_height
self._osd_to_fast_level = osd_to_fast_level_map
@property
def image(self):
self.load_image()
return self._image
@property
def width(self):
self.load_image()
return self._width
@property
def height(self):
self.load_image()
return self._height
@property
def osd_levels(self):
self.load_image()
return self._osd_levels
@property
def tile_width(self):
self.load_image()
return self._tile_width
@property
def tile_height(self):
self.load_image()
return self._tile_height
def get_fast_level(self, osd_level):
"""
Get FAST image pyramid level from OSD level
"""
self.load_image()
return self._osd_to_fast_level[osd_level]
def get_osd_tile_size(self, osd_level):
self.load_image()
return self._osd_tile_width[osd_level], self._osd_tile_height[osd_level]
def get_fast_tile_size(self):
self.load_image()
return self._tile_width, self._tile_height
def get_osd_tile_as_buffer(self, osd_level, x, y):
fast_level = self.get_fast_level(osd_level)
width, height = self.get_osd_tile_size(osd_level)
access = self._image.getAccess(fast.ACCESS_READ)
tile_width = width
tile_height = height
if x*width + tile_width >= self._image.getLevelWidth(fast_level):
tile_width = self._image.getLevelWidth(fast_level) - x*width - 1
if y*height + tile_height >= self._image.getLevelHeight(fast_level):
tile_height = self._image.getLevelHeight(fast_level) - y*height - 1
self.timers['getPatchImage'].start()
image = access.getPatchAsImage(fast_level, x*width, y*height, tile_width, tile_height)
self.timers['getPatchImage'].stop()
self.timers['sharpening'].start()
sharpening = fast.ImageSharpening.create(1.5).connect(image)
image = sharpening.runAndGetOutputData()
self.timers['sharpening'].stop()
#tileAccess = image.getImageAccess(fast.ACCESS_READ)
#return Image.frombytes(size=(tile_width, tile_height), data=tileAccess.get(), mode='RGB')
# TODO get rid of asarray conversion, and read directly from bytes instead somehow
self.timers['conversion'].start()
image = np.asarray(image)
tile = Image.fromarray(image, mode='RGB')
self.timers['conversion'].stop()
if tile.width != self._tile_width: # TODO What about edges cases here.
self.timers['resize'].start()
tile.thumbnail((self._tile_height, self._tile_width), resample=Image.BICUBIC)
self.timers['resize'].stop()
# Convert PIL image to JPEG byte buffer and send back
self.timers['jpeg'].start()
buffer = BytesIO()
tile.save(buffer, 'jpeg', quality=75) # TODO Set quality
self.timers['jpeg'].stop()
if settings.PRINT_RUNTIME:
print('Runtimes')
print('==============================')
for timer in self.timers.values():
timer.print()
return buffer
class AnnotatedSlide(models.Model):
"""
Model for an annotated slide.
A slide can have multiple annotations.
A task uses an annotated slide.
"""
slide = models.ForeignKey(Slide, on_delete=models.CASCADE)
def get_html(self):
"""
Get HTML for all annotations
"""
html = ''
for pointer in Pointer.objects.filter(annotated_slide=self):
html += f'<div id="pointer-{pointer.id}" class="overlay"> {pointer.text} →</div>'
return html
def get_js(self):
"""
Get JS for all annotations
"""
js = ''
for pointer in Pointer.objects.filter(annotated_slide=self):
js += f"{{id: 'pointer-{pointer.id}', x: {pointer.position_x}, y: {pointer.position_y}, placement: 'RIGHT', checkResize: false }},"
return js
class Pointer(models.Model):
"""
A pointer on a slide consisting of a position (x,y) and a text
"""
annotated_slide = models.ForeignKey(AnnotatedSlide, on_delete=models.CASCADE)
position_x = models.FloatField()
position_y = models.FloatField()
text = models.CharField(max_length=256)
| 38.25 | 152 | 0.615079 | 8,334 | 0.972689 | 0 | 0 | 532 | 0.062092 | 0 | 0 | 1,783 | 0.2081 |
1f641a14add400abd8e0ed7c75835db3c0d6d277
| 742 |
py
|
Python
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 75 |
2022-01-18T02:17:57.000Z
|
2022-03-24T02:30:04.000Z
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 73 |
2022-01-18T03:01:27.000Z
|
2022-03-27T16:41:38.000Z
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 3 |
2022-01-18T22:47:06.000Z
|
2022-01-25T02:03:53.000Z
|
from __future__ import annotations
import typing
from di.api.providers import CallableProvider, CoroutineProvider
from di.dependant import Dependant
from xpresso.dependencies._dependencies import Depends, DependsMarker
Endpoint = typing.Union[CallableProvider[typing.Any], CoroutineProvider[typing.Any]]
class EndpointDependant(Dependant[typing.Any]):
def __init__(
self,
endpoint: Endpoint,
sync_to_thread: bool = False,
) -> None:
super().__init__(
call=endpoint,
scope="endpoint",
use_cache=False,
wire=True,
sync_to_thread=sync_to_thread,
)
def get_default_marker(self) -> DependsMarker[None]:
return Depends()
| 25.586207 | 84 | 0.677898 | 431 | 0.580863 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013477 |
1f643bc3dbbf8bbb68623cedf5db93412e8053a1
| 554 |
py
|
Python
|
MkSSensor.py
|
MakeSenseCorp/mksdk-py
|
3466124288c3a89effa0e918c2f310e25db17e0e
|
[
"Apache-2.0"
] | null | null | null |
MkSSensor.py
|
MakeSenseCorp/mksdk-py
|
3466124288c3a89effa0e918c2f310e25db17e0e
|
[
"Apache-2.0"
] | 7 |
2018-02-19T12:15:46.000Z
|
2018-05-04T23:02:12.000Z
|
MkSSensor.py
|
MakeSenseCorp/mksdk-py
|
3466124288c3a89effa0e918c2f310e25db17e0e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
class Sensor:
Name = ""
ID = 0
UUID = 0
Type = 0
Value = 0
def __init__(self, id, type, local_id):
self.ID = local_id
self.UUID = id[:-1] + str(local_id)
self.Type = type
def SetInterval(self, interval):
self.UpdateInterval = interval
def SetUUID(self, device_uuid, local_id):
self.UUID = device_uuid[:-1] + str(local_id)
def ConvertToStr(self):
return "{\"id\":" + self.ID + ",\"uuid\":\"" + str(self.UUID) + "\",\"type\":" + str(self.Type) + ",\"name\":\"" + self.Name + "\"}"
| 24.086957 | 135 | 0.566787 | 531 | 0.958484 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.138989 |
1f6482191a91e02ca740cd105cd4bb4ccfd6872b
| 1,010 |
py
|
Python
|
tuples.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
tuples.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
tuples.py
|
ShuhaoZQGG/Python-Very-Beginner-to-Very-Intermediate
|
cfad98b1c1c175761d3a68861438562f7d410cb0
|
[
"MIT"
] | null | null | null |
mytuple = ("Max", 28, "Boston")
print(mytuple)
print(type(mytuple))
mytuple2 = ("Max") ## , is needed before the closing paranthese if only one string
print(mytuple2)
print(type(mytuple2))
mt3 = tuple(["Max", 28, "Boston"]) ## mt indicates mytuple + number
print(mt3)
print(type(mt3))
item = mytuple [0]
print(item)
item2 = mytuple [-2]
print(item2)
for i in mytuple:
print(i)
if "Max" in mytuple:
print("yes")
else:
print("No")
mt4 = ('a', 'b', 'c' ,'d')
print(mt4.count('c'))
print(mt4.count('z'))
print(mt4.index('c'))
## convert tuple to list and vice versa
mylist = list(mytuple)
print(mylist)
mt5=tuple(mylist)
print(mt5)
a = (1,2,3,4,5,6,7,8,9)
b = a[::-1]
print(a)
print(b)
mt6 = "Max", 28, "Boston"
name, age, city = mt6
print(name)
print(age)
print(city)
mt7 = ("Max", 180, 90, 35, "Boston")
name2, *data, city2 = mt7
print(name2)
print(*data) ## height, weight, age
print(city2)
## tuple is more efficient when working with large data
| 17.413793 | 104 | 0.620792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.3 |
1f64ad352e9b9691d83fdce5ed744e84a89c5372
| 13,330 |
py
|
Python
|
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | null | null | null |
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | 4 |
2020-09-25T22:35:29.000Z
|
2022-02-09T23:37:24.000Z
|
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | 1 |
2020-10-17T01:36:03.000Z
|
2020-10-17T01:36:03.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# coding=utf-8
"""Create Language Model TF examples for ALBERT (Decoder-Only)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import tokenization
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string("input_file_mode", "r",
"The data format of the input file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", True,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer("max_seq_length", 256, "Maximum sequence length.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class LMTrainingInstance(object):
"""A single training instance."""
def __init__(self, tokens, token_boundary):
self.tokens = tokens
self.token_boundary = token_boundary
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "token_boundary: %s\n" % (" ".join(
[str(x) for x in self.token_boundary]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
output_files):
"""Create TF example files from `LMTrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
print('Saving instance ' + str(inst_index))
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
# For LM, input mask is 2D Array with Transformer Decoder masking style.
# In order to save space, we will expand the data to 2D when feeding to model.
# Here we just need to store ID of sequence so we can reconstruct the 2D map corresponding to the sequence later,
input_mask = [1] * len(input_ids)
token_boundary = list(instance.token_boundary)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
token_boundary.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["token_boundary"] = create_int_feature(token_boundary)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
short_seq_prob, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
line_num = 0
with tf.gfile.GFile(input_file, FLAGS.input_file_mode) as reader:
while True:
print('Reading line ' + str(line_num))
line = reader.readline()
if not FLAGS.spm_model_file:
line = tokenization.convert_to_unicode(line)
if not line:
break
if FLAGS.spm_model_file:
line = tokenization.preprocess_text(line, lower=FLAGS.do_lower_case)
else:
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
line_num = line_num + 1
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
print('all_documents length = ' + str(len(all_documents)))
vocab_words = list(tokenizer.vocab.keys())
instances = []
for document_index in range(len(all_documents)):
print('Creating instance for doc ' + str(document_index))
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP]
# Note than in LM, [CLS] is at the end of string (because attention constraint)
max_num_tokens = max_seq_length - 2
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# In LM, we only have tokens_a
tokens_a = []
for j in range(len(current_chunk)):
tokens_a.extend(current_chunk[j])
truncate_seq(tokens_a, max_num_tokens, rng)
assert len(tokens_a) >= 1
tokens = []
for token in tokens_a:
tokens.append(token)
tokens.append("[SEP]")
tokens.append("[CLS]")
(tokens, token_boundary) = create_lm_predictions(
tokens, vocab_words, rng)
instance = LMTrainingInstance(
tokens=tokens,
token_boundary=token_boundary,
)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
def _is_start_piece_sp(piece):
"""Check if the current word piece is the starting piece (sentence piece)."""
special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
special_pieces.add(u"€".encode("utf-8"))
special_pieces.add(u"£".encode("utf-8"))
# Note(mingdachen):
# For foreign characters, we always treat them as a whole piece.
english_chars = set(list("abcdefghijklmnopqrstuvwxyz"))
if (six.ensure_str(piece).startswith("▁") or
six.ensure_str(piece).startswith("<") or piece in special_pieces or
not all([str(i).lower() in english_chars.union(special_pieces)
for i in piece])):
return True
else:
return False
def _is_start_piece_bert(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not six.ensure_str(piece).startswith("##")
def is_start_piece(piece):
if FLAGS.spm_model_file:
return _is_start_piece_sp(piece)
else:
return _is_start_piece_bert(piece)
def create_lm_predictions(tokens, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (FLAGS.do_whole_word_mask and not is_start_piece(token)):
pass
else:
if is_start_piece(token):
token_boundary[i] = 1
output_tokens = list(tokens)
return (output_tokens, token_boundary)
def truncate_seq(tokens_a, max_num_tokens, rng):
"""Truncates a sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
print('Create tokenizer')
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
print('Start reading input files')
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length,
FLAGS.short_seq_prob,
rng)
print('Number of instance = ' + str(len(instances)))
tf.logging.info("number of instances: %i", len(instances))
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
print('Writing output files')
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, output_files)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
| 33.076923 | 117 | 0.692798 | 485 | 0.03637 | 0 | 0 | 0 | 0 | 0 | 0 | 4,915 | 0.368579 |
1f6706c7305503eebcfb4dc0e941eec4fd99c3fd
| 3,260 |
py
|
Python
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 7 |
2020-07-24T03:19:59.000Z
|
2022-03-30T10:56:12.000Z
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 1 |
2021-04-07T22:30:23.000Z
|
2021-04-08T00:55:36.000Z
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 2 |
2020-06-08T08:25:09.000Z
|
2021-04-05T22:13:08.000Z
|
import enoki as ek
import pytest
import mitsuba
def r_inv(divisor, index):
factor = 1
value = 0
recip = 1.0 / divisor
while index != 0:
next_val = index // divisor
factor *= recip
value = value * divisor + index - next_val * divisor
index = next_val
return value * factor
def gen_primes():
# http://code.activestate.com/recipes/117119/
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
def test01_radical_inverse(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
v = RadicalInverse()
assert(v.eval(0, 0) == 0)
assert(v.eval(0, 1) == 0.5)
assert(v.eval(0, 2) == 0.25)
assert(v.eval(0, 3) == 0.75)
for index, prime in enumerate(gen_primes()):
if index >= 1024:
break
for i in range(10):
assert ek.abs(r_inv(prime, i) - v.eval(index, i)) < 1e-7
@pytest.mark.skip(reason="RadicalInverse has no vectorized bindings")
def test02_radical_inverse_vectorized(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
v = RadicalInverse()
for index, prime in enumerate(gen_primes()):
if index >= 1024:
break
result = v.eval(index, ek.arange(10, dtype=ek.uint64))
for i in range(len(result)):
assert ek.abs(r_inv(prime, i) - result[i]) < 1e-7
def test03_faure_permutations(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
p = RadicalInverse()
assert (p.permutation(0) == [0, 1]).all()
assert (p.permutation(1) == [0, 1, 2]).all()
assert (p.permutation(2) == [0, 3, 2, 1, 4]).all()
assert (p.permutation(3) == [0, 2, 5, 3, 1, 4, 6]).all()
def test04_scrambled_radical_inverse(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
from mitsuba.core import math
p = RadicalInverse(10, -1)
assert (p.permutation(0) == [0, 1]).all()
values = [
0.0, 0.5, 0.25, 0.75, 0.125, 0.625, 0.375, 0.875, 0.0625, 0.5625,
0.3125, 0.8125, 0.1875, 0.6875, 0.4375
]
for i in range(len(values)):
assert(p.eval_scrambled(0, i) == values[i])
p = RadicalInverse(10, 3)
assert (p.permutation(0) == [1, 0]).all()
values_scrambled = [
math.OneMinusEpsilon,
0.5, 0.75, 0.25, 0.875, 0.375, 0.625, 0.125, 0.9375, 0.4375,
0.6875, 0.1875, 0.8125, 0.3125, 0.5625
]
for i in range(len(values_scrambled)):
assert(p.eval_scrambled(0, i) == values_scrambled[i])
@pytest.mark.skip(reason="RadicalInverse has no vectorized bindings")
def test02_radical_inverse_vectorized(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
try:
from mitsuba.packet_rgb.core.qmc import RadicalInverseP
except ImportError:
pytest.skip("packet_rgb mode not enabled")
v = RadicalInverse()
v_p = RadicalInverseP()
for index in range(1024):
result = v_p.eval_scrambled(index, ek.arange(10, dtype=ek.uint64))
for i in range(len(result)):
assert ek.abs(v.eval_scrambled(index, i) - result[i]) < 1e-7
| 28.347826 | 74 | 0.60184 | 0 | 0 | 301 | 0.092331 | 1,045 | 0.320552 | 0 | 0 | 160 | 0.04908 |
1f69bfc2c5f28e5c08c2ff64bb83de310333e32a
| 14,656 |
py
|
Python
|
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
import warnings
import torch
from torch.utils.data.dataloader import DataLoader
from torch.optim import lr_scheduler
import numpy as np
from models import *
from dataloader import Aff2CompDataset, SubsetSequentialSampler, SubsetRandomSampler, Prefetcher
from tqdm import tqdm
import os
import time
from sklearn.metrics import f1_score, accuracy_score
from metrics import AccF1Metric, CCCMetric, MultiLabelAccF1
from collections import defaultdict
import opts
from utils import setup_seed, save_checkpoint, AverageMeter
import random
import logging
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
self.epoch_losses[idx, 0] = train_loss * 50
self.epoch_losses[idx, 1] = val_loss * 50
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
def plot_curve(self, save_path):
title = 'the accuracy/loss curve of train/val'
dpi = 80
width, height = 1600, 800
legend_fontsize = 10
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 100)
interval_y = 5
interval_x = 1
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=20)
plt.xlabel('the training epoch', fontsize=16)
plt.ylabel('accuracy', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle=':', label='train-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle=':', label='valid-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
# print('Curve was saved')
plt.close(fig)
class EarlyStopper(object):
def __init__(self, num_trials, save_path):
self.num_trials = num_trials
self.trial_counter = 0
self.best_accuracy = 0
self.save_path = save_path
os.makedirs(os.path.dirname(self.save_path), exist_ok=True)
def is_continuable(self, model, accuracy):
if accuracy > self.best_accuracy:
self.best_accuracy = accuracy
self.trial_counter = 0
torch.save(model.state_dict(), self.save_path)
return True
elif self.trial_counter + 1 < self.num_trials:
self.trial_counter += 1
return True
else:
return False
@torch.no_grad()
def evaluate(model, loader, loader_iter, device, num_step=1000):
model.eval()
bar = tqdm(range(int(num_step)), desc=f'Validation, {model.task}', colour='green', position=0, leave=False)
metric_ex = AccF1Metric(ignore_index=7)
metric_va = CCCMetric(ignore_index=-5.0)
metric_au = MultiLabelAccF1(ignore_index=-1)
total_loss = 0
scores = defaultdict()
for step in bar:
t1 = time.time()
try:
data = next(loader_iter)
except StopIteration as e:
print(e)
loader_iter = iter(loader)
break
t2 = time.time()
data_time = t2 - t1
label_ex = data['EX'].long().to(device)
label_ex[label_ex == -1] = 7
labels = {
'VA': data['VA'].float().to(device),
'AU': data['AU'].float().to(device),
'EX': label_ex,
}
x = {}
for modality in data:
x[modality] = data[modality].to(device)
result = model(x) # batchx22 12 + 8 + 2
logits_ex = result[:, 12:19]
logits_au = result[:, :12]
logits_va = result[:, 19:21] #tanh??
if model.task.lower() == 'ex':
loss = model.get_ex_loss(result, labels['EX'])
elif model.task.lower() == 'au':
loss = model.get_au_loss(result, labels['AU'])
elif model.task.lower() == 'va':
loss = model.get_va_loss(result, labels['VA'])
else:
losses = model.get_mt_loss(result, labels)
loss = losses[0] + losses[1] + losses[2]
total_loss += loss.item()
pred = torch.argmax(logits_ex, dim=1).detach().cpu().numpy().reshape(-1)
label = label_ex.detach().cpu().numpy().reshape(-1)
metric_ex.update(pred, label)
metric_va.update(y_pred=torch.tanh(logits_va).detach().cpu().numpy(), y_true=labels['VA'].detach().cpu().numpy())
metric_au.update(y_pred=np.round(torch.sigmoid(logits_au).detach().cpu().numpy()), y_true=labels['AU'].detach().cpu().numpy())
acc_ex = accuracy_score(y_true=label, y_pred=pred)
bar.set_postfix(data_fetch_time=data_time, batch_loss=loss.item(), avg_loss=total_loss / (step + 1), acc=acc_ex)
acc_ex, f1_ex = metric_ex.get()
acc_au, f1_au = metric_au.get()
scores['EX'] = {'EX:acc': acc_ex, 'f1': f1_ex, 'score': 0.67 * f1_ex + 0.33 * acc_ex}
scores['AU'] = {'AU:acc': acc_au, 'f1': f1_au, 'score': 0.5 * f1_au + 0.5 * acc_au}
scores['VA'] = {'VA:ccc_v': metric_va.get()[0],'ccc_a': metric_va.get()[1], 'score': metric_va.get()[2]}
model.train()
metric_va.clear()
metric_au.clear()
metric_ex.clear()
return scores, loader_iter
def train(args, model, dataset, optimizer, epochs, device):
early_stopper = EarlyStopper(num_trials=args['early_stop_step'], save_path=f'{args["checkpoint_path"]}/best.pth')
downsample_rate = args.get('downsample_rate')
downsample = np.zeros(len(dataset), dtype=int)
downsample[np.arange(0, len(dataset) - 1, downsample_rate)] = 1
start_epoch = 0
if args['resume'] == True:
start_epoch = args['start_epoch']
learning_rate = args['learning_rate']
for epoch in range(start_epoch,epochs):
if epoch == 30:
learning_rate = learning_rate*0.1
if epoch == 60:
learning_rate = learning_rate*0.1
random.shuffle(downsample)
dataset.set_aug(True)
train_sampler = SubsetSequentialSampler(np.nonzero(dataset.train_ids*downsample)[0], shuffle=True)
train_loader = DataLoader(dataset, batch_size=args['batch_size'], sampler=train_sampler, num_workers=0,
pin_memory=False,
drop_last=True)
print('Training set length: ' + str(sum(dataset.train_ids*downsample)))
bar = tqdm(train_loader, desc=f'Training {model.task}, Epoch:{epoch}', colour='blue', position=0, leave=True)
logging.info(f'Training {model.task}, Epoch:{epoch}')
t1 = time.time()
total_loss, ex_loss_record,au_loss_record,va_loss_record = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
prefetcher = Prefetcher(bar)
data = prefetcher.next()
step = -1
while data is not None:
step += 1
t2 = time.time()
data_time = t2 - t1
optimizer.zero_grad()
label_ex = data['EX'].long().to(device)
label_ex[label_ex == -1] = 7
labels = {
'VA': data['VA'].float().to(device),
'AU': data['AU'].float().to(device),
'EX': label_ex,
}
# ids = data['Index'].long()
x = {}
for modality in data:
x[modality] = data[modality].to(device)
#x['clip'] = data['clip'].to(device)
#x['audio_features'] = data['audio_features'].to(device)
result = model(x) # batchx22 12 + 8 + 2
if model.task.lower() == 'ex':
loss = model.get_ex_loss(result, labels['EX'])
elif model.task.lower() == 'au':
loss = model.get_au_loss(result, labels['AU'])
elif model.task.lower() == 'va':
loss = model.get_va_loss(result, labels['VA'])
else:
losses = model.get_mt_loss(result, labels, normalize = False)
loss = 3*losses[0] + losses[1] + losses[2]
ex_loss_record.update(losses[0].item())
au_loss_record.update(losses[1].item())
va_loss_record.update(losses[2].item())
loss.backward()
optimizer.step()
total_loss.update(loss.item())
if model.task.lower() == 'all':
bar.set_postfix(total = total_loss.avg, ex=ex_loss_record.avg, au=au_loss_record.avg, va=va_loss_record.avg)
else:
bar.set_postfix(data_fetch_time=data_time, batch_loss=loss.item(), avg_loss=total_loss.avg)
t1 = time.time()
data = prefetcher.next()
logging.info(f'Total Loss,{total_loss.avg}, Ex:{ex_loss_record.avg}, AU:{au_loss_record.avg}, VA:{va_loss_record.avg}')
save_checkpoint(state=model.state_dict(), filepath=args["checkpoint_path"], filename='latest.pth')
#if step % eval_step == 0 and step != 0:
dataset.set_aug(False)
val_sampler = SubsetSequentialSampler(np.nonzero(dataset.val_ids*downsample)[0], shuffle=True)
val_loader = DataLoader(dataset, batch_size=args['batch_size'] * 4, sampler=val_sampler, num_workers=0,
pin_memory=False,
drop_last=True)
print('Validation set length: ' + str(sum(dataset.val_ids*downsample)))
val_loader_iter = iter(val_loader)
scores, val_loader_iter = evaluate(model, val_loader, val_loader_iter, device,
num_step=int(sum(dataset.val_ids*downsample)/(args['batch_size']*4)))
score_str = ''
if model.task == 'ALL':
total_score = 0
for task in ['EX','AU','VA']:
score_dict = scores[task]
for k, v in score_dict.items():
score_str += f'{k}:{v:.3},'
total_score = total_score + score_dict["score"]
else:
score_dict = scores[model.task]
for k, v in score_dict.items():
score_str += f'{k}:{v:.3}, '
total_score = score_dict["score"]
print(f'Training,{args["task"]}, Epoch:{epoch}, {score_str}')
logging.info(f'Training,{args["task"]}, Epoch:{epoch}, {score_str}')
if not early_stopper.is_continuable(model, total_score):
print(f'validation: best score: {early_stopper.best_accuracy}')
logging.info(f'validation: best score: {early_stopper.best_accuracy}')
break
def main(args):
setup_seed(args.get('seed'))
task = args.get('task')
print(f'Task: {task}')
print('Model:',opt['model_name'])
print('Modality:',opt['modality'])
print('clip size',opt['n_frames'],opt['image_size'])
log_file_name = opt['model_name']+'_'+opt['modality']+'_log.txt'
logging.basicConfig(filename=os.path.join(args['exp_dir'],log_file_name), level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger()
# model
if opt['model_name'] == 'avformer':
model = TwoStreamAuralVisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'vformer':
model = VisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'vggformer':
model = VGGVisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'emonet':
model = ImageEmoNetModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'tformer':
model = SpatialTemporalFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'sformer':
model = SpatialFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'dsformer':
model = DualSpatialFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'i3d':
model = VisualI3DModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'mc3d':
model = VisualMC3DModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'van':
model = SpatialVAN(modality=args['modality'], task=task)
elif opt['model_name'] == 'audio':
model = Audio_only(modality=args['modality'], task=task)
else:
model = ImageResNetModel(task)
modes = model.modes
model = model.to(torch.cuda.current_device())
args['checkpoint_path'] = os.path.join(args['exp_dir'], 'pretrain')
if args['resume'] and os.path.exists(f'{args["checkpoint_path"]}/latest.pth'):
print('Loading weight from:{}'.format(f'{args["checkpoint_path"]}/latest.pth'))
pretrained_dict = torch.load(f'{args["checkpoint_path"]}/latest.pth')
model.load_state_dict(pretrained_dict,strict= False)
model.train()
# load dataset (first time this takes longer)
dataset = Aff2CompDataset(args)
dataset.set_modes(modes)
optimizer = torch.optim.Adam(params=model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
#train(args, model, train_loader, val_loader, optimizer, epochs=args['epochs'], device=torch.cuda.current_device())
train(args, model, dataset, optimizer, epochs=args['epochs'], device=torch.cuda.current_device())
if __name__ == '__main__':
opt = opts.parse_opt()
torch.cuda.set_device(opt.gpu_id)
opt = vars(opt)
main(opt)
| 42.604651 | 134 | 0.60917 | 3,170 | 0.216294 | 0 | 0 | 2,707 | 0.184703 | 0 | 0 | 2,354 | 0.160617 |
1f6aef11602a1e5873d6782928e3986a359ca69a
| 123 |
py
|
Python
|
carin/help.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
carin/help.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
carin/help.py
|
fiskurgit/Carin
|
41f5e8003d169f1f0454e7b674daf341d238f061
|
[
"Unlicense"
] | null | null | null |
def show_help():
print("Carbon Intensity API Help")
def show_bad_argument_help():
print("app -e generation")
| 11.181818 | 38 | 0.674797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.373984 |
1f6c4abc3f836e517354bb086cbc395ddcb5e9b2
| 251 |
py
|
Python
|
utils.py
|
Ls-Dai/Pytorch_FL_CNN
|
59bfd017dc21a4d11e7dafb382cdae3c57086071
|
[
"MIT"
] | 3 |
2021-03-22T01:54:43.000Z
|
2021-03-28T10:48:35.000Z
|
utils.py
|
Ls-Dai/Pytorch_FL_CNN
|
59bfd017dc21a4d11e7dafb382cdae3c57086071
|
[
"MIT"
] | null | null | null |
utils.py
|
Ls-Dai/Pytorch_FL_CNN
|
59bfd017dc21a4d11e7dafb382cdae3c57086071
|
[
"MIT"
] | null | null | null |
import os
def dir_setup(path):
if not os.path.exists(path):
os.makedirs(path)
"""def dir_setup(path):
if not os.path.isdir(path):
dir_setup(os.path.split(path)[0])
else:
return
os.mkdir(path)"""
| 17.928571 | 42 | 0.561753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.593625 |
1f6e05df1cdce06badd2f76abc8f4fd50f6739ab
| 212 |
py
|
Python
|
1011 - Esfera.py
|
le16bits/URI---Python
|
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
|
[
"Apache-2.0"
] | null | null | null |
1011 - Esfera.py
|
le16bits/URI---Python
|
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
|
[
"Apache-2.0"
] | null | null | null |
1011 - Esfera.py
|
le16bits/URI---Python
|
9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''Autor: Alessandra Souza
Data: 05/05/2017
Objetivo: Calcular o volume de uma esfera.
ID Urionlinejudge: 1011'''
R=float(input())
vol=((4.0/3)*3.14159)*R**3
print("VOLUME = %.3f" %vol)
| 21.2 | 42 | 0.646226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.716981 |
1f6f98de6468e928dedff399ac6db135e5b7f2ec
| 18,002 |
py
|
Python
|
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import datetime
from log import LOG_PATH
import os
import src.visualization as vis
from src.config import Config as con
import tensorflow.contrib as tfcontrib
server_count = con.server_count
server_state_dim = con.server_state_dim
total_server_state_dim = con.total_server_state_dim
server_feature_dim = con.server_feature_dim
job_state_dim = con.job_state_dim
dc_state_dim = con.dc_state_dim
action_dim = con.action_dim
# NET SIZE
server_feature_layer1_size = con.server_feature_layer1_size
q_net_layer1_size = con.q_net_layer1_size
q_net_layer2_size = con.q_net_layer2_size
# TRAIN PARAMETERS
gamma = con.gamma
learning_rate = con.learning_rate
batch_size = con.batch_size
epsilon = con.epsilon
update_target_q_every_iter = con.update_target_q_every_iter
ti = datetime.datetime.now()
log_dir = (LOG_PATH + '/' + str(ti.month) + '-' + str(ti.day) + '-' + str(ti.hour) + '-' + str(ti.minute) + '-' + str(
ti.second) + '/')
if os.path.exists(log_dir) is False:
os.mkdir(log_dir)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
tf.summary.scalar('value', var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
# tf.summary.scalar('max', tf.reduce_max(var))
# tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
class Agent(object):
def __init__(self):
self.sess = tf.InteractiveSession()
self.server_state_input = tf.placeholder(tf.float32, shape=[None, server_count, server_state_dim])
# self.server_state_input_flatten = contrib.layers.flatten(inputs=self.server_state_input)
self.job_state_input = tf.placeholder(tf.float32, shape=[None, job_state_dim])
self.dc_state_input = tf.placeholder(tf.float32, shape=[None, dc_state_dim])
self.action_input = tf.placeholder(tf.uint8, shape=[None])
self.reward_input = tf.placeholder(tf.float32, shape=[None, server_count])
self.action_is_valid = tf.placeholder(tf.float32, shape=[None, server_count])
self.target_q_off_by_action_input = tf.placeholder(tf.float32, shape=[None, server_count])
self.action_one_hot = tf.one_hot(indices=self.action_input, depth=server_count)
self.q_net = self.create_q_network()
self.q = self.q_net.outputs
self.target_q_net = self.create_q_network(prefix='TARGET_')
self.target_q = self.target_q_net.outputs
self.update_target_q_op = self.create_target_update_op_list()
# Define greedy policy to choose a valid action
temp = tf.multiply(x=self.action_is_valid,
y=tf.constant(1000.0, shape=[batch_size, server_count]))
self.temp = tf.add(x=self.q, y=temp)
self.greedy_policy_action = tf.argmax(self.temp, axis=1)
# Define op for q and target q with corresponding action
self.q_off_by_action = tf.multiply(self.q, tf.cast(self.action_one_hot, tf.float32))
# self.q_off_by_action = self.q
self.target_q_off_by_action = tf.multiply(self.reward_input + gamma * self.q,
tf.cast(self.action_one_hot, tf.float32))
# self.target_q_off_by_action = self.reward_input + gamma * self.target_q,
self.loss, self.optimizer, self.optimize_op, self.compute_gradients_op = self.create_training_method(
target_q_off_by_action=self.target_q_off_by_action_input)
self.gradients = self.optimizer.compute_gradients(loss=self.loss)
# Some op for test and visualization
self.max_q = tf.reduce_max(self.q, axis=1)
self.action = tf.argmax(self.q, axis=1)
self.mean_max_q = tf.reduce_mean(self.max_q)
variable_summaries(self.mean_max_q, 'mean_q')
# variable_summaries(self.compute_gradients_op, 'gradients')
# variable_summaries(self.loss, 'loss')
self.merged_summary = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
# Init op
tl.layers.initialize_global_variables(sess=self.sess)
self.q_net.print_params()
self.q_net.print_layers()
# def eplison_greedy_action_selection(self):
# temp = tf.multiply(x=self.action_is_valid,
# y=tf.constant(1000.0, shape=[batch_size, server_count]))
# self.temp = tf.add(x=self.q, y=temp)
# unpacked_q = tf.unstack(self.temp, axis=0)
#
# greedy_policy_action_list = []
#
# for tensor in unpacked_q:
# if np.random.uniform(0, 1.0) < epsilon:
# greedy_policy_action_list.append(tf.argmax(tensor, axis=1))
# else:
# k = np.random.randint(0, server_count)
# greedy_policy_action_list.append(k)
# self.greedy_policy_action = tf.argmax(self.temp, axis=1)
def define_server_feature_extraction_net(self, input, reuse=False, prefix=''):
with tf.variable_scope("SEVER_STATE", reuse=reuse):
tl.layers.set_name_reuse(reuse)
server_feature_extraction_net = tl.layers.InputLayer(inputs=input,
name=prefix + 'SERVER_STATE_INPUT')
server_feature_extraction_net = tl.layers.DenseLayer(layer=server_feature_extraction_net,
n_units=server_feature_layer1_size,
act=tf.nn.leaky_relu,
name=prefix + 'SERVER_STATE_LAYER_1')
server_feature_extraction_net = tl.layers.DenseLayer(layer=server_feature_extraction_net,
n_units=server_feature_dim,
name=prefix + 'SERVER_STATE_LAYER_2')
return server_feature_extraction_net
def create_q_network(self, prefix=''):
server_state_tensor_list = tf.split(self.server_state_input, server_count, axis=1)
server_feature_tensor_layer_list = []
for i in range(server_count):
tensor = tf.reshape(server_state_tensor_list[i], shape=(-1, server_state_dim))
if i == 0:
reuse = False
else:
reuse = True
server_feature_tensor_layer_list.append(self.define_server_feature_extraction_net(input=tensor,
reuse=reuse,
prefix=prefix))
job_input_layer = tl.layers.InputLayer(inputs=self.job_state_input,
name=prefix + 'JOB_STATE_INPUT')
dc_input_layer = tl.layers.InputLayer(inputs=self.dc_state_input,
name=prefix + 'DC_STATE_INPUT')
all_state_layer = tl.layers.ConcatLayer(
layer=server_feature_tensor_layer_list + [job_input_layer, dc_input_layer],
concat_dim=1,
name=prefix + 'SERVER_FEATURE')
q_net = tl.layers.DenseLayer(layer=all_state_layer,
n_units=q_net_layer1_size,
act=tf.nn.leaky_relu,
name=prefix + 'Q_NET_LAYER_1')
q_net = tl.layers.DenseLayer(layer=q_net,
n_units=q_net_layer2_size,
act=tf.nn.leaky_relu,
name=prefix + 'Q_NET_LAYER_2')
q_net = tl.layers.DenseLayer(layer=q_net,
n_units=server_count,
name=prefix + 'Q_NET_LAYER_3')
return q_net
def create_training_method(self, target_q_off_by_action):
loss = tf.reduce_mean(tf.squared_difference(target_q_off_by_action, self.q_off_by_action))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.3)
optimize = optimizer.minimize(loss=loss, var_list=self.q_net.all_params)
compute_gradients = optimizer.compute_gradients(loss=loss, var_list=self.q_net.all_params)
regularizer = tfcontrib.layers.l1_l2_regularizer()
loss = loss + tfcontrib.layers.apply_regularization(regularizer, weights_list=self.q_net.all_params)
return loss, optimizer, optimize, compute_gradients
def create_target_update_op_list(self):
op = []
for (q_para, target_q_para) in zip(self.q_net.all_params, self.target_q_net.all_params):
op.append(target_q_para.assign(q_para))
return op
def eval_some_tensor(self, tensor, mini_batch):
# For test and visual
res = self.sess.run(fetches=[tensor],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
})
return res
def eval_q_off_by_action(self, state_dict, action):
return self.sess.run(fetches=[self.q_off_by_action],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_input: action
})
def eval_greedy_policy_action(self, state_dict):
res, temp = self.sess.run(fetches=[self.greedy_policy_action, self.temp],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_is_valid: state_dict['VALID_ACTION']
})
return np.reshape(np.array(res), [-1])
def eval_action(self, state_dict):
# For test and visual
res = self.sess.run(fetches=[self.action],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_is_valid: state_dict['VALID_ACTION']
})
return np.reshape(np.array(res), [-1])
def eval_target_q_off_by_action(self, next_state_dict, next_action, reward):
res = self.sess.run(fetches=[self.target_q_off_by_action],
feed_dict={
self.reward_input: reward,
self.server_state_input: next_state_dict['SERVER_STATE'],
self.job_state_input: next_state_dict['JOB_STATE'],
self.dc_state_input: next_state_dict['DC'],
self.action_input: next_action
})
return np.reshape(np.array(res), newshape=[-1, server_count])
def eval_gradients(self, mini_batch):
next_action = self.eval_greedy_policy_action(state_dict=mini_batch['NEXT_STATE'])
target_q_off_by_action = self.eval_target_q_off_by_action(next_state_dict=mini_batch['NEXT_STATE'],
next_action=next_action,
reward=mini_batch['REWARD'])
gradients = self.sess.run(fetches=[self.compute_gradients_op],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
self.target_q_off_by_action_input: target_q_off_by_action
})
return gradients
def train(self, mini_batch):
next_action = self.eval_greedy_policy_action(state_dict=mini_batch['NEXT_STATE'])
target_q_off_by_action = self.eval_target_q_off_by_action(next_state_dict=mini_batch['NEXT_STATE'],
next_action=next_action,
reward=mini_batch['REWARD'])
_, loss = self.sess.run(fetches=[self.optimize_op, self.loss],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
self.target_q_off_by_action_input: target_q_off_by_action
})
# gradients = self.sess.run(fetches=[self.compute_gradients_op],
# feed_dict={
# self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
# self.job_state_input: mini_batch['STATE']['JOB_STATE'],
# self.dc_state_input: mini_batch['STATE']['DC'],
# self.action_input: mini_batch['ACTION'],
# self.target_q_off_by_action_input: target_q_off_by_action
# })
# print(target_q_off_by_action)
# print(self.eval_some_tensor(tensor=self.q_off_by_action, mini_batch=mini_batch))
# print(self.eval_some_tensor(tensor=self.reward_input, mini_batch=mini_batch))
# print(self.eval_some_tensor(tensor=self.target_q_off_by_action))
# print (gradients)
return loss
def update_target_net(self):
res = self.sess.run(self.update_target_q_op)
# res = self.sess.run(self.target_q_net.all_params[0])
# print(res)
def do_summary(self, mini_batch, epoch):
summary = self.sess.run(fetches=[self.merged_summary, self.max_q, self.action],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION']
})
self.file_writer.add_summary(summary=summary[0], global_step=epoch)
training_data_list = []
def do_print(test_batch, epoch, iter, print_flag=False):
global training_data_dict
server_state = np.array(test_batch['STATE']['SERVER_STATE'])
action = a.eval_action(state_dict=test_batch['STATE'])
q = a.eval_some_tensor(a.q, mini_batch=test_batch)[0]
q_off_by_action = a.eval_some_tensor(tensor=a.q_off_by_action, mini_batch=test_batch)
next_action = a.eval_greedy_policy_action(state_dict=test_batch['NEXT_STATE'])
target_q_off_by_action = a.eval_target_q_off_by_action(next_state_dict=test_batch['NEXT_STATE'],
next_action=next_action,
reward=test_batch['REWARD'])
grad = a.eval_gradients(test_batch)
if print_flag is True:
print("choosed action", action)
print("Q", q)
print("Input Action", test_batch['ACTION'])
print("Q off by action", q_off_by_action)
print ("target Q off by action", target_q_off_by_action)
dict = {
'EPOCH': epoch,
'ITER': iter,
'SERVER_STATE': server_state,
'ACTION': action,
'Q': q
}
training_data_list.append(dict)
pass
if __name__ == '__main__':
from src.environment import Environment
global training_data_list
import src.visualization as vis
a = Agent()
env = Environment(file_name="1-21-1-21-57.data")
batch_iter = con.batch_iter
epoch = con.epoch
for T in range(epoch):
print("Epoch %d" % T)
total_loss = 0.0
for i in range(batch_iter):
if i % update_target_q_every_iter == 0:
a.update_target_net()
data_batch = env.return_mini_batch(i, batch_size)
loss = a.train(mini_batch=data_batch)
total_loss = total_loss + loss
if T % con.save_data_every_epoch == 0:
do_print(test_batch=data_batch, epoch=T, iter=i, print_flag=True)
print("Aver loss = %f" % (total_loss / batch_iter))
res = np.array(training_data_list)
np.save(file=log_dir + '/training_data', arr=res)
vis.visual(res)
| 47.750663 | 118 | 0.575436 | 14,204 | 0.789023 | 0 | 0 | 0 | 0 | 0 | 0 | 3,149 | 0.174925 |
1f704e7278b9ef779559e766cacb1ef676546a1a
| 194 |
py
|
Python
|
programmers/lv3/2n_tiling.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
programmers/lv3/2n_tiling.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
programmers/lv3/2n_tiling.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
# 2 * n 타일링
def solution(n):
dp = [0] * 60001
dp[0], dp[1] = 1, 1
for i in range(2, n + 1):
dp[i] = (dp[i - 1] + dp[i - 2]) % 1000000007
return dp[n]
print(solution(4))
| 19.4 | 52 | 0.469072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.085 |
1f7102de5fec932b92f1cdaf56485f914e53929e
| 79 |
py
|
Python
|
pyup/settings.py
|
Callek/pyup
|
e29014320accdca2947b9e18c215d2144752081a
|
[
"MIT"
] | 445 |
2016-01-14T09:19:26.000Z
|
2022-03-16T13:19:33.000Z
|
pyup/settings.py
|
Callek/pyup
|
e29014320accdca2947b9e18c215d2144752081a
|
[
"MIT"
] | 387 |
2015-12-28T09:54:32.000Z
|
2022-01-04T00:45:00.000Z
|
pyup/settings.py
|
Callek/pyup
|
e29014320accdca2947b9e18c215d2144752081a
|
[
"MIT"
] | 96 |
2016-01-19T19:25:00.000Z
|
2021-09-30T18:22:02.000Z
|
api_key = None
def configure(key=None):
global api_key
api_key = key
| 11.285714 | 24 | 0.670886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1f746fdd19456f3c70b06319cd8440bf69081a77
| 190 |
py
|
Python
|
setup.py
|
alexisjihyeross/actionable-recourse
|
a00a0221484d1cf66ff6c0bcba6aaca2220bf9d1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
alexisjihyeross/actionable-recourse
|
a00a0221484d1cf66ff6c0bcba6aaca2220bf9d1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
alexisjihyeross/actionable-recourse
|
a00a0221484d1cf66ff6c0bcba6aaca2220bf9d1
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="Recourse",
version="0.1.1",
packages=find_packages(),
install_requires=open('requirements.txt').read().split('\n')
)
| 23.75 | 64 | 0.689474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.205263 |
1f74a2f22700c0cecd865a836091b95cf438f84d
| 536 |
py
|
Python
|
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
import pandas as pd
#####################
# Load Dataset
# https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
df = pd.read_csv('../data/wdbc.data', header=None)
from sklearn.preprocessing import LabelEncoder
X = df.loc[:, 2:].values
y = df.loc[:,1].values
le = LabelEncoder()
y = le.fit_transform(y)
print(repr(le.transform(['M', 'B'])))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=1)
| 28.210526 | 93 | 0.699627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.285448 |
1f75dc40de3440e94dfc62ec31434b5e0206507e
| 733 |
py
|
Python
|
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
import argparse
import os
from PIL import Image
def get_args():
parser = argparse.ArgumentParser(description='Transform tga files to jpg')
parser.add_argument('input_dir', type=str, help='Path of input directory containing tga files')
parser.add_argument('output_dir', type=str, help='Path of output directory containing jpg files')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
os.makedirs(args.output_dir, exist_ok=True)
for file in os.listdir(args.input_dir):
if file.endswith('.tga'):
im = Image.open(os.path.join(args.input_dir, file))
rgb_im = im.convert('RGB')
rgb_im.save(os.path.join(args.output_dir, file[:-4] + '.jpg'))
| 34.904762 | 101 | 0.682128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.233288 |
1f7833d48966d1ed49519df9d13f101196d3956c
| 3,057 |
py
|
Python
|
db/csvs_test_examples/project/project_availability/project_availability_types/doc.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 44 |
2020-10-27T19:05:44.000Z
|
2022-03-22T17:17:37.000Z
|
db/csvs_test_examples/project/project_availability/project_availability_types/doc.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 67 |
2020-10-08T22:36:53.000Z
|
2022-03-22T22:58:33.000Z
|
db/csvs_test_examples/project/project_availability/project_availability_types/doc.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 21 |
2020-10-08T23:23:48.000Z
|
2022-03-28T01:21:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
**Relevant tables:**
+--------------------------------+----------------------------------------------+
|:code:`scenarios` table column |:code:`project_availability_scenario_id` |
+--------------------------------+----------------------------------------------+
|:code:`scenarios` table feature |N/A |
+--------------------------------+----------------------------------------------+
|:code:`subscenario_` table |:code:`subscenarios_project_availability` |
+--------------------------------+----------------------------------------------+
|:code:`input_` tables |:code:`inputs_project_availability` |
+--------------------------------+----------------------------------------------+
All projects in a GridPath scenario must be a assigned an *availability
type*, which determines whether their capacity is operational in each
timepoint in which the capacity exists. All implemented availability types are
listed in the :code:`mod_availability_types` table.
Each project's availability type are given in the
:code:`inputs_project_availability`. The availability types currently
implemented include :code:`exogenous` (availability is determined outside of
a GridPath model via the data fed into it) and two endogenous types:
:code:`binary` and :code:`continuous` that require certain inputs that
determine how availability is constrained in the GridPath model. See the
:ref:`project-availability-type-section-ref` section for more info. In
addition to the project availability types, the
:code:`inputs_project_availability` table contains the information for
how to find any additional data needed to determine project availability with
the :code:`exogenous_availability_scenario_id` and
:code:`endogenous_availability_scenario` columns for the endogenous and
exogenous types respectively. The IDs in the former column are linked to the
data in the :code:`inputs_project_availability_exogenous` table and in the
latter column to the :code:`inputs_project_availability_endogenous` table.
For projects of the :code:`exogenous` availability type, if the value is in the
:code:`exogenous_availability_scenario_id` column is NULL, no availability
capacity derate is applied by GridPath. For projects of a :code:`binary` of
:code:`continuous` availability type, a value in the
:code:`endogenous_availability_scenario_id` is required.
"""
if __name__ == "__main__":
print(__doc__)
| 51.813559 | 81 | 0.669284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,005 | 0.98299 |
1f78bf747e413822fce9fdf17d1c1fc1b0c7a165
| 3,052 |
py
|
Python
|
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
import logging
import random
from typing import Dict, List, Tuple, Union
from construction_finder import codelets, frame
logger = logging.getLogger(f"{__name__}")
class SpinResult:
def __init__(
self,
temp_modifier: float,
workspace_modifiers: Union[List[codelets.WorkSpaceModifier], None] = None,
new_active_frames: Union[Tuple[str, frame.Frame], None] = None,
):
self.temp_modifier = temp_modifier
self.workspace_modifiers = workspace_modifiers
self.new_active_frames = new_active_frames
def __str__(self):
return f"""<SpinResult>: temp_modifier={self.temp_modifier}, workspace_modifiers={self.workspace_modifiers}"""
class CodeRack:
def __init__(self, urgency_levels: List = [1, 2, 3, 4, 5]):
self.urgency_levels = urgency_levels
self.urgency_bins: Dict = dict()
for urgency_level in urgency_levels:
self.urgency_bins[urgency_level]: List = []
def add_codelet(self, codelet):
urgency_level = min(codelet.urgency_level, max(self.urgency_levels))
self.urgency_bins[urgency_level].append(codelet)
def assess_urgency(self):
urgency = list()
for urgency_level in self.urgency_levels:
n = len(self.urgency_bins[urgency_level])
urgency.extend([urgency_level] * n * urgency_level)
return urgency
def empty(self):
total_codelets = 0
for urgency_level in self.urgency_levels:
n = len(self.urgency_bins[urgency_level])
total_codelets += n
return total_codelets == 0
def __contains__(self, codelet):
result = False
for urgency_level in self.urgency_levels:
if codelet in self.urgency_bins[urgency_level]:
result = True
return result
def spin_codelet(self):
logger.info("Spinning a new codelet")
urgency = self.assess_urgency()
logger.info(f"Current urgency = {urgency}")
workspace_modifiers = None
new_active_frames = None
if len(urgency) > 0:
chosen_bin = random.choice(urgency)
random_codelet_index = random.randint(
0, len(self.urgency_bins[chosen_bin]) - 1
)
chosen_codelet = self.urgency_bins[chosen_bin].pop(random_codelet_index)
logger.info(f"Chose codelet {chosen_codelet} from urgency bin {chosen_bin}")
codelet_result = chosen_codelet.run()
temp_modifier = codelet_result.temp_modifier
for new_codelet in codelet_result.new_codelets:
self.add_codelet(new_codelet)
if hasattr(codelet_result, "workspace_modifiers"):
workspace_modifiers = codelet_result.workspace_modifiers
if hasattr(codelet_result, "new_active_codelets"):
new_active_frames = codelet_result.new_active_frames
else:
temp_modifier = 0
return SpinResult(temp_modifier, workspace_modifiers, new_active_frames)
| 35.08046 | 118 | 0.656619 | 2,881 | 0.943971 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.090105 |
1f7babebb7eb438c1f113d421ddd85e8d4dce5ed
| 1,713 |
py
|
Python
|
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Default configurations of model configuration, training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
from typing import Dict
CONFIG = {
'is_train': True,
'src_train_set_path': './train_data_source',
'tgt_train_set_path': './train_data_target',
'test_set_small_path': './test_data/low_resolution/P2-100',
'test_set_large_path': './test_data/high_resolution/P2-100',
'test_size_small': [72,88],
'test_size_large': [512, 512],
'checkpoint_dir': './checkpoint',
'result_dir_small': './results/STIN-small',
'result_dir_large': './results/STIN-large',
'resume': True,
'train_config': {'epoch': 5,
'batch_size': 4,
'device': 'cuda:0',
'learning_rate': 0.0005,},
'train_config_adv': {'epoch': 5,
'batch_size': 2,
'device': 'cuda:0',
'learning_rate': 0.0005, },
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
CONFIG_NONLOCAL = {
'test_set_path': './test_data/low_resolution/P2-100',
'test_size': [72,88],
'result_dir': './result/non-local-small',
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
CONFIG_UNETPP = {
'test_set_path': './test_data/low_resolution/P2-100',
'test_size': [72,88],
'result_dir': './result/unetpp-small',
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
| 23.148649 | 65 | 0.549329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.535902 |
1f7d838dc8f88dc8eef76ebba1d92fdbf66fdaf5
| 54,959 |
py
|
Python
|
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the plugins of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import json_parser
import posixpath
import re
import sys
from typing import Optional, Set
from textwrap import dedent
import os
from special_case_helper import SpecialCaseHandler
from helper import (
map_qt_library,
featureName,
map_platform,
find_3rd_party_library_mapping,
generate_find_package_info,
get_compile_test_dependent_library_mapping,
)
knownTests = set() # type: Set[str]
class LibraryMapping:
def __init__(self, package: str, resultVariable: str, appendFoundSuffix: bool = True) -> None:
self.package = package
self.resultVariable = resultVariable
self.appendFoundSuffix = appendFoundSuffix
def map_tests(test: str) -> Optional[str]:
testmap = {
"c99": "c_std_99 IN_LIST CMAKE_C_COMPILE_FEATURES",
"c11": "c_std_11 IN_LIST CMAKE_C_COMPILE_FEATURES",
"x86SimdAlways": "ON", # FIXME: Make this actually do a compile test.
"aesni": "TEST_subarch_aesni",
"avx": "TEST_subarch_avx",
"avx2": "TEST_subarch_avx2",
"avx512f": "TEST_subarch_avx512f",
"avx512cd": "TEST_subarch_avx512cd",
"avx512dq": "TEST_subarch_avx512dq",
"avx512bw": "TEST_subarch_avx512bw",
"avx512er": "TEST_subarch_avx512er",
"avx512pf": "TEST_subarch_avx512pf",
"avx512vl": "TEST_subarch_avx512vl",
"avx512ifma": "TEST_subarch_avx512ifma",
"avx512vbmi": "TEST_subarch_avx512vbmi",
"avx512vbmi2": "TEST_subarch_avx512vbmi2",
"avx512vpopcntdq": "TEST_subarch_avx512vpopcntdq",
"avx5124fmaps": "TEST_subarch_avx5124fmaps",
"avx5124vnniw": "TEST_subarch_avx5124vnniw",
"bmi": "TEST_subarch_bmi",
"bmi2": "TEST_subarch_bmi2",
"cx16": "TEST_subarch_cx16",
"f16c": "TEST_subarch_f16c",
"fma": "TEST_subarch_fma",
"fma4": "TEST_subarch_fma4",
"fsgsbase": "TEST_subarch_fsgsbase",
"gfni": "TEST_subarch_gfni",
"ibt": "TEST_subarch_ibt",
"libclang": "TEST_libclang",
"lwp": "TEST_subarch_lwp",
"lzcnt": "TEST_subarch_lzcnt",
"mmx": "TEST_subarch_mmx",
"movbe": "TEST_subarch_movbe",
"mpx": "TEST_subarch_mpx",
"no-sahf": "TEST_subarch_no_shaf",
"pclmul": "TEST_subarch_pclmul",
"popcnt": "TEST_subarch_popcnt",
"prefetchwt1": "TEST_subarch_prefetchwt1",
"prfchw": "TEST_subarch_prfchw",
"pdpid": "TEST_subarch_rdpid",
"rdpid": "TEST_subarch_rdpid",
"rdseed": "TEST_subarch_rdseed",
"rdrnd": "TEST_subarch_rdrnd",
"rtm": "TEST_subarch_rtm",
"shani": "TEST_subarch_shani",
"shstk": "TEST_subarch_shstk",
"sse2": "TEST_subarch_sse2",
"sse3": "TEST_subarch_sse3",
"ssse3": "TEST_subarch_ssse3",
"sse4a": "TEST_subarch_sse4a",
"sse4_1": "TEST_subarch_sse4_1",
"sse4_2": "TEST_subarch_sse4_2",
"tbm": "TEST_subarch_tbm",
"xop": "TEST_subarch_xop",
"neon": "TEST_subarch_neon",
"iwmmxt": "TEST_subarch_iwmmxt",
"crc32": "TEST_subarch_crc32",
"vis": "TEST_subarch_vis",
"vis2": "TEST_subarch_vis2",
"vis3": "TEST_subarch_vis3",
"dsp": "TEST_subarch_dsp",
"dspr2": "TEST_subarch_dspr2",
"altivec": "TEST_subarch_altivec",
"spe": "TEST_subarch_spe",
"vsx": "TEST_subarch_vsx",
"openssl11": '(OPENSSL_VERSION VERSION_GREATER_EQUAL "1.1.0")',
"libinput_axis_api": "ON",
"xlib": "X11_FOUND",
"wayland-scanner": "WaylandScanner_FOUND",
"3rdparty-hunspell": "VKB_HAVE_3RDPARTY_HUNSPELL",
"t9write-alphabetic": "VKB_HAVE_T9WRITE_ALPHA",
"t9write-cjk": "VKB_HAVE_T9WRITE_CJK",
}
if test in testmap:
return testmap.get(test, None)
if test in knownTests:
return f"TEST_{featureName(test)}"
return None
def cm(ctx, *output):
txt = ctx["output"]
if txt != "" and not txt.endswith("\n"):
txt += "\n"
txt += "\n".join(output)
ctx["output"] = txt
return ctx
def readJsonFromDir(path: str) -> str:
path = posixpath.join(path, "configure.json")
print(f"Reading {path}...")
assert posixpath.exists(path)
parser = json_parser.QMakeSpecificJSONParser()
return parser.parse(path)
def processFiles(ctx, data):
print(" files:")
if "files" in data:
for (k, v) in data["files"].items():
ctx[k] = v
return ctx
def parseLib(ctx, lib, data, cm_fh, cmake_find_packages_set):
newlib = find_3rd_party_library_mapping(lib)
if not newlib:
print(f' XXXX Unknown library "{lib}".')
return
if newlib.packageName is None:
print(f' **** Skipping library "{lib}" -- was masked.')
return
print(f" mapped library {lib} to {newlib.targetName}.")
# Avoid duplicate find_package calls.
if newlib.targetName in cmake_find_packages_set:
return
# If certain libraries are used within a feature, but the feature
# is only emitted conditionally with a simple condition (like
# 'on Windows' or 'on Linux'), we should enclose the find_package
# call for the library into the same condition.
emit_if = newlib.emit_if
# Only look through features if a custom emit_if wasn't provided.
if not emit_if:
for feature in data["features"]:
feature_data = data["features"][feature]
if (
"condition" in feature_data
and f"libs.{lib}" in feature_data["condition"]
and "emitIf" in feature_data
and "config." in feature_data["emitIf"]
):
emit_if = feature_data["emitIf"]
break
if emit_if:
emit_if = map_condition(emit_if)
cmake_find_packages_set.add(newlib.targetName)
find_package_kwargs = {"emit_if": emit_if}
if newlib.is_bundled_with_qt:
# If a library is bundled with Qt, it has 2 FindFoo.cmake
# modules: WrapFoo and WrapSystemFoo.
# FindWrapSystemFoo.cmake will try to find the 'Foo' library in
# the usual CMake locations, and will create a
# WrapSystemFoo::WrapSystemFoo target pointing to the library.
#
# FindWrapFoo.cmake will create a WrapFoo::WrapFoo target which
# will link either against the WrapSystemFoo or QtBundledFoo
# target depending on certain feature values.
#
# Because the following qt_find_package call is for
# configure.cmake consumption, we make the assumption that
# configure.cmake is interested in finding the system library
# for the purpose of enabling or disabling a system_foo feature.
find_package_kwargs["use_system_package_name"] = True
find_package_kwargs["module"] = ctx["module"]
cm_fh.write(generate_find_package_info(newlib, **find_package_kwargs))
if "use" in data["libraries"][lib]:
use_entry = data["libraries"][lib]["use"]
if isinstance(use_entry, str):
print(f"1use: {use_entry}")
cm_fh.write(f"qt_add_qmake_lib_dependency({newlib.soName} {use_entry})\n")
else:
for use in use_entry:
print(f"2use: {use}")
indentation = ""
has_condition = False
if "condition" in use:
has_condition = True
indentation = " "
condition = map_condition(use["condition"])
cm_fh.write(f"if({condition})\n")
cm_fh.write(
f"{indentation}qt_add_qmake_lib_dependency({newlib.soName} {use['lib']})\n"
)
if has_condition:
cm_fh.write("endif()\n")
run_library_test = False
mapped_library = find_3rd_party_library_mapping(lib)
if mapped_library:
run_library_test = mapped_library.run_library_test
if run_library_test and "test" in data["libraries"][lib]:
test = data["libraries"][lib]["test"]
write_compile_test(
ctx, lib, test, data, cm_fh, manual_library_list=[lib], is_library_test=True
)
def lineify(label, value, quote=True):
if value:
if quote:
escaped_value = value.replace('"', '\\"')
return f' {label} "{escaped_value}"\n'
return f" {label} {value}\n"
return ""
def map_condition(condition):
# Handle NOT:
if isinstance(condition, list):
condition = "(" + ") AND (".join(condition) + ")"
if isinstance(condition, bool):
if condition:
return "ON"
else:
return "OFF"
assert isinstance(condition, str)
mapped_features = {"gbm": "gbm_FOUND"}
# Turn foo != "bar" into (NOT foo STREQUAL 'bar')
condition = re.sub(r"([^ ]+)\s*!=\s*('.*?')", "(! \\1 == \\2)", condition)
# Turn foo != 156 into (NOT foo EQUAL 156)
condition = re.sub(r"([^ ]+)\s*!=\s*([0-9]?)", "(! \\1 EQUAL \\2)", condition)
condition = condition.replace("!", "NOT ")
condition = condition.replace("&&", " AND ")
condition = condition.replace("||", " OR ")
condition = condition.replace("==", " STREQUAL ")
# explicitly handle input.sdk == '':
condition = re.sub(r"input\.sdk\s*==\s*''", "NOT INPUT_SDK", condition)
last_pos = 0
mapped_condition = ""
has_failed = False
for match in re.finditer(r"([a-zA-Z0-9_]+)\.([a-zA-Z0-9_+-]+)", condition):
substitution = None
# appendFoundSuffix = True
if match.group(1) == "libs":
libmapping = find_3rd_party_library_mapping(match.group(2))
if libmapping and libmapping.packageName:
substitution = libmapping.packageName
if libmapping.resultVariable:
substitution = libmapping.resultVariable
if libmapping.appendFoundSuffix:
substitution += "_FOUND"
# Assume that feature conditions are interested whether
# a system library is found, rather than the bundled one
# which we always know we can build.
if libmapping.is_bundled_with_qt:
substitution = substitution.replace("Wrap", "WrapSystem")
elif match.group(1) == "features":
feature = match.group(2)
if feature in mapped_features:
substitution = mapped_features.get(feature)
else:
substitution = f"QT_FEATURE_{featureName(match.group(2))}"
elif match.group(1) == "subarch":
substitution = f"TEST_arch_{'${TEST_architecture_arch}'}_subarch_{match.group(2)}"
elif match.group(1) == "call":
if match.group(2) == "crossCompile":
substitution = "CMAKE_CROSSCOMPILING"
elif match.group(1) == "tests":
substitution = map_tests(match.group(2))
elif match.group(1) == "input":
substitution = f"INPUT_{featureName(match.group(2))}"
elif match.group(1) == "config":
substitution = map_platform(match.group(2))
elif match.group(1) == "module":
substitution = f"TARGET {map_qt_library(match.group(2))}"
elif match.group(1) == "arch":
if match.group(2) == "i386":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL i386)"
elif match.group(2) == "x86_64":
substitution = "(TEST_architecture_arch STREQUAL x86_64)"
elif match.group(2) == "arm":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL arm)"
elif match.group(2) == "arm64":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL arm64)"
elif match.group(2) == "mips":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL mips)"
if substitution is None:
print(f' XXXX Unknown condition "{match.group(0)}"')
has_failed = True
else:
mapped_condition += condition[last_pos : match.start(1)] + substitution
last_pos = match.end(2)
mapped_condition += condition[last_pos:]
# Space out '(' and ')':
mapped_condition = mapped_condition.replace("(", " ( ")
mapped_condition = mapped_condition.replace(")", " ) ")
# Prettify:
condition = re.sub("\\s+", " ", mapped_condition)
condition = condition.strip()
# Special case for WrapLibClang in qttools
condition = condition.replace("TEST_libclang.has_clangcpp", "TEST_libclang")
if has_failed:
condition += " OR FIXME"
return condition
def parseInput(ctx, sinput, data, cm_fh):
skip_inputs = {
"prefix",
"hostprefix",
"extprefix",
"archdatadir",
"bindir",
"datadir",
"docdir",
"examplesdir",
"external-hostbindir",
"headerdir",
"hostbindir",
"hostdatadir",
"hostlibdir",
"importdir",
"libdir",
"libexecdir",
"plugindir",
"qmldir",
"settingsdir",
"sysconfdir",
"testsdir",
"translationdir",
"android-arch",
"android-ndk",
"android-ndk-host",
"android-ndk-platform",
"android-sdk",
"android-toolchain-version",
"android-style-assets",
"appstore-compliant",
"avx",
"avx2",
"avx512",
"c++std",
"ccache",
"commercial",
"confirm-license",
"dbus",
"dbus-runtime",
"debug",
"debug-and-release",
"developer-build",
"device",
"device-option",
"f16c",
"force-asserts",
"force-debug-info",
"force-pkg-config",
"framework",
"gc-binaries",
"gdb-index",
"gcc-sysroot",
"gcov",
"gnumake",
"gui",
"headersclean",
"incredibuild-xge",
"libudev",
"ltcg",
"make",
"make-tool",
"mips_dsp",
"mips_dspr2",
"mp",
"nomake",
"opensource",
"optimize-debug",
"optimize-size",
"optimized-qmake",
"optimized-tools",
"pch",
"pkg-config",
"platform",
"plugin-manifests",
"profile",
"qreal",
"reduce-exports",
"reduce-relocations",
"release",
"rpath",
"sanitize",
"sdk",
"separate-debug-info",
"shared",
"silent",
"qdbus",
"sse2",
"sse3",
"sse4.1",
"sse4.2",
"ssse3",
"static",
"static-runtime",
"strip",
"syncqt",
"sysroot",
"testcocoon",
"use-gold-linker",
"warnings-are-errors",
"Werror",
"widgets",
"xplatform",
"zlib",
"eventfd",
"glib",
"icu",
"inotify",
"journald",
"pcre",
"posix-ipc",
"pps",
"slog2",
"syslog",
}
if sinput in skip_inputs:
print(f" **** Skipping input {sinput}: masked.")
return
dtype = data
if isinstance(data, dict):
dtype = data["type"]
if dtype == "boolean":
print(f" **** Skipping boolean input {sinput}: masked.")
return
if dtype == "enum":
values_line = " ".join(data["values"])
cm_fh.write(f"# input {sinput}\n")
cm_fh.write(f'set(INPUT_{featureName(sinput)} "undefined" CACHE STRING "")\n')
cm_fh.write(
f"set_property(CACHE INPUT_{featureName(sinput)} PROPERTY STRINGS undefined {values_line})\n\n"
)
return
print(f" XXXX UNHANDLED INPUT TYPE {dtype} in input description")
return
def get_library_usage_for_compile_test(library):
result = {}
mapped_library = find_3rd_party_library_mapping(library)
if not mapped_library:
result["fixme"] = f"# FIXME: use: unmapped library: {library}\n"
return result
if mapped_library.test_library_overwrite:
target_name = mapped_library.test_library_overwrite
else:
target_name = mapped_library.targetName
result["target_name"] = target_name
result["package_name"] = mapped_library.packageName
result["extra"] = mapped_library.extra
return result
# Handles config.test/foo/foo.pro projects.
def write_standalone_compile_test(cm_fh, ctx, data, config_test_name, is_library_test):
rel_test_project_path = f"{ctx['test_dir']}/{config_test_name}"
if posixpath.exists(f"{ctx['project_dir']}/{rel_test_project_path}/CMakeLists.txt"):
label = ""
libraries = []
packages = []
if "label" in data:
label = data["label"]
if is_library_test and config_test_name in data["libraries"]:
if "label" in data["libraries"][config_test_name]:
label = data["libraries"][config_test_name]["label"]
# If a library entry in configure.json has a test, and
# the test uses a config.tests standalone project, we
# need to get the package and target info for the
# library, and pass it to the test so compiling and
# linking succeeds.
library_usage = get_library_usage_for_compile_test(config_test_name)
if "target_name" in library_usage:
libraries.append(library_usage["target_name"])
if "package_name" in library_usage:
find_package_arguments = []
find_package_arguments.append(library_usage["package_name"])
if "extra" in library_usage:
find_package_arguments.extend(library_usage["extra"])
package_line = "PACKAGE " + " ".join(find_package_arguments)
packages.append(package_line)
cm_fh.write(
f"""
qt_config_compile_test("{config_test_name}"
LABEL "{label}"
PROJECT_PATH "${{CMAKE_CURRENT_SOURCE_DIR}}/{rel_test_project_path}"
"""
)
if libraries:
libraries_string = " ".join(libraries)
cm_fh.write(f" LIBRARIES {libraries_string}\n")
if packages:
packages_string = " ".join(packages)
cm_fh.write(f" PACKAGES {packages_string}")
cm_fh.write(")\n")
def write_compile_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
if manual_library_list is None:
manual_library_list = []
inherited_test_name = details["inherit"] if "inherit" in details else None
inherit_details = None
if inherited_test_name and is_library_test:
inherit_details = data["libraries"][inherited_test_name]["test"]
if not inherit_details:
print(f" XXXX Failed to locate inherited library test {inherited_test_name}")
if isinstance(details, str):
write_standalone_compile_test(cm_fh, ctx, data, details, is_library_test)
return
def resolve_head(detail):
head = detail.get("head", "")
if isinstance(head, list):
head = "\n".join(head)
return head
head = ""
if inherit_details:
head += resolve_head(inherit_details)
head += resolve_head(details)
sourceCode = head + "\n"
def resolve_include(detail, keyword):
include = detail.get(keyword, "")
if isinstance(include, list):
include = "#include <" + ">\n#include <".join(include) + ">"
elif include:
include = f"#include <{include}>"
return include
include = ""
if is_library_test:
if inherit_details:
inherited_lib_data = data["libraries"][inherited_test_name]
include += resolve_include(inherited_lib_data, "headers")
this_lib_data = data["libraries"][name]
include += resolve_include(this_lib_data, "headers")
else:
if inherit_details:
include += resolve_include(inherit_details, "include")
include += resolve_include(details, "include")
sourceCode += include + "\n"
def resolve_tail(detail):
tail = detail.get("tail", "")
if isinstance(tail, list):
tail = "\n".join(tail)
return tail
tail = ""
if inherit_details:
tail += resolve_tail(inherit_details)
tail += resolve_tail(details)
sourceCode += tail + "\n"
sourceCode += "int main(int argc, char **argv)\n"
sourceCode += "{\n"
sourceCode += " (void)argc; (void)argv;\n"
sourceCode += " /* BEGIN TEST: */\n"
def resolve_main(detail):
main = detail.get("main", "")
if isinstance(main, list):
main = "\n".join(main)
return main
main = ""
if inherit_details:
main += resolve_main(inherit_details)
main += resolve_main(details)
sourceCode += main + "\n"
sourceCode += " /* END TEST: */\n"
sourceCode += " return 0;\n"
sourceCode += "}\n"
sourceCode = sourceCode.replace('"', '\\"')
librariesCmakeName = ""
languageStandard = ""
compileOptions = ""
qmakeFixme = ""
cm_fh.write(f"# {name}\n")
if "qmake" in details: # We don't really have many so we can just enumerate them all
if details["qmake"] == "unix:LIBS += -lpthread":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (UNIX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "linux: LIBS += -lpthread -lrt":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (LINUX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread rt)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "!winrt: LIBS += runtimeobject.lib":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (NOT WINRT)\n")
cm_fh.write(" set(" + librariesCmakeName + " runtimeobject)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "CONFIG += c++11":
# do nothing we're always in c++11 mode
pass
elif details["qmake"] == "CONFIG += c++11 c++14":
languageStandard = "CXX_STANDARD 14"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17":
languageStandard = "CXX_STANDARD 17"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17 c++2a":
languageStandard = "CXX_STANDARD 20"
elif details["qmake"] == "QMAKE_CXXFLAGS += -fstack-protector-strong":
compileOptions = details["qmake"][18:]
else:
qmakeFixme = f"# FIXME: qmake: {details['qmake']}\n"
library_list = []
test_libraries = manual_library_list
if "use" in data:
test_libraries += data["use"].split(" ")
for library in test_libraries:
if len(library) == 0:
continue
adjusted_library = get_compile_test_dependent_library_mapping(name, library)
library_usage = get_library_usage_for_compile_test(adjusted_library)
if "fixme" in library_usage:
qmakeFixme += library_usage["fixme"]
continue
else:
library_list.append(library_usage["target_name"])
cm_fh.write(f"qt_config_compile_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
if librariesCmakeName != "" or len(library_list) != 0:
cm_fh.write(" LIBRARIES\n")
if librariesCmakeName != "":
cm_fh.write(lineify("", "${" + librariesCmakeName + "}"))
if len(library_list) != 0:
cm_fh.write(" ")
cm_fh.write("\n ".join(library_list))
cm_fh.write("\n")
if compileOptions != "":
cm_fh.write(f" COMPILE_OPTIONS {compileOptions}\n")
cm_fh.write(" CODE\n")
cm_fh.write('"' + sourceCode + '"')
if qmakeFixme != "":
cm_fh.write(qmakeFixme)
if languageStandard != "":
cm_fh.write(f"\n {languageStandard}\n")
cm_fh.write(")\n\n")
# "tests": {
# "cxx11_future": {
# "label": "C++11 <future>",
# "type": "compile",
# "test": {
# "include": "future",
# "main": [
# "std::future<int> f = std::async([]() { return 42; });",
# "(void)f.get();"
# ],
# "qmake": "unix:LIBS += -lpthread"
# }
# },
def write_compiler_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_compiler_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def write_linker_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_linker_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def parseTest(ctx, test, data, cm_fh):
skip_tests = {
"c11",
"c99",
"gc_binaries",
"precomile_header",
"reduce_exports",
"gc_binaries",
"libinput_axis_api",
"wayland-scanner",
"xlib",
}
if test in skip_tests:
print(f" **** Skipping features {test}: masked.")
return
if data["type"] == "compile":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compile_test(ctx, test, details, data, cm_fh)
if data["type"] == "compilerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compiler_supports_flag_test(ctx, test, details, data, cm_fh)
if data["type"] == "linkerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_linker_supports_flag_test(ctx, test, details, data, cm_fh)
elif data["type"] == "libclang":
knownTests.add(test)
cm_fh.write(f"# {test}\n")
lib_clang_lib = find_3rd_party_library_mapping("libclang")
cm_fh.write(generate_find_package_info(lib_clang_lib))
cm_fh.write(
dedent(
"""
if(TARGET WrapLibClang::WrapLibClang)
set(TEST_libclang "ON" CACHE BOOL "Required libclang version found." FORCE)
endif()
"""
)
)
cm_fh.write("\n")
elif data["type"] == "x86Simd":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_x86simd({test} "{label}")\n')
cm_fh.write("\n")
elif data["type"] == "machineTuple":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_machine_tuple("{label}")\n')
cm_fh.write("\n")
# "features": {
# "android-style-assets": {
# "label": "Android Style Assets",
# "condition": "config.android",
# "output": [ "privateFeature" ],
# "comment": "This belongs into gui, but the license check needs it here already."
# },
else:
print(f" XXXX UNHANDLED TEST TYPE {data['type']} in test description")
def get_feature_mapping():
# This is *before* the feature name gets normalized! So keep - and + chars, etc.
feature_mapping = {
"alloc_h": None, # handled by alloc target
"alloc_malloc_h": None,
"alloc_stdlib_h": None,
"build_all": None,
"ccache": {"autoDetect": "1", "condition": "QT_USE_CCACHE"},
"compiler-flags": None,
"cross_compile": {"condition": "CMAKE_CROSSCOMPILING"},
"debug_and_release": {
"autoDetect": "1", # Setting this to None has weird effects...
"condition": "QT_GENERATOR_IS_MULTI_CONFIG",
},
"debug": {
"autoDetect": "ON",
"condition": "CMAKE_BUILD_TYPE STREQUAL Debug OR Debug IN_LIST CMAKE_CONFIGURATION_TYPES",
},
"dlopen": {"condition": "UNIX"},
"force_debug_info": {
"autoDetect": "CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo OR RelWithDebInfo IN_LIST CMAKE_CONFIGURATION_TYPES"
},
"framework": {
"condition": "APPLE AND BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE STREQUAL Debug"
},
"gc_binaries": {"condition": "NOT QT_FEATURE_shared"},
"gcc-sysroot": None,
"gcov": None,
"GNUmake": None,
"host-dbus": None,
"iconv": {
"condition": "NOT QT_FEATURE_icu AND QT_FEATURE_textcodec AND NOT WIN32 AND NOT QNX AND NOT ANDROID AND NOT APPLE AND WrapIconv_FOUND",
},
"incredibuild_xge": None,
"ltcg": {
"autoDetect": "ON",
"cmakePrelude": """set(__qt_ltcg_detected FALSE)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION)
set(__qt_ltcg_detected TRUE)
else()
foreach(config ${CMAKE_BUILD_TYPE} ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${config}" __qt_uc_config)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION_${__qt_uc_config})
set(__qt_ltcg_detected TRUE)
break()
endif()
endforeach()
unset(__qt_uc_config)
endif()""",
"condition": "__qt_ltcg_detected",
},
"msvc_mp": None,
"simulator_and_device": {"condition": "UIKIT AND NOT QT_UIKIT_SDK"},
"pkg-config": {"condition": "PKG_CONFIG_FOUND"},
"precompile_header": {"condition": "BUILD_WITH_PCH"},
"profile": None,
"qmakeargs": None,
"qpa_default_platform": None, # Not a bool!
"qreal": {
"condition": 'DEFINED QT_COORD_TYPE AND NOT QT_COORD_TYPE STREQUAL "double"',
"output": [
{"type": "define", "name": "QT_COORD_TYPE", "value": "${QT_COORD_TYPE}",},
{
"type": "define",
"name": "QT_COORD_TYPE_STRING",
"value": '\\"${QT_COORD_TYPE}\\"',
},
],
},
"reduce_exports": {"condition": "NOT MSVC",},
"release": None,
"release_tools": None,
"rpath": {
"autoDetect": "1",
"condition": "BUILD_SHARED_LIBS AND UNIX AND NOT WIN32 AND NOT ANDROID",
},
"shared": {
"condition": "BUILD_SHARED_LIBS",
"output": [
"publicFeature",
"publicQtConfig",
"publicConfig",
{
"type": "define",
"name": "QT_STATIC",
"prerequisite": "!defined(QT_SHARED) && !defined(QT_STATIC)",
"negative": True,
},
],
},
"silent": None,
"sql-sqlite": {"condition": "QT_FEATURE_datestring"},
"stl": None, # Do we really need to test for this in 2018?!
"strip": None,
"verifyspec": None, # qmake specific...
"warnings_are_errors": None, # FIXME: Do we need these?
"xkbcommon-system": None, # another system library, just named a bit different from the rest
}
return feature_mapping
def parseFeature(ctx, feature, data, cm_fh):
feature_mapping = get_feature_mapping()
mapping = feature_mapping.get(feature, {})
if mapping is None:
print(f" **** Skipping features {feature}: masked.")
return
handled = {
"autoDetect",
"comment",
"condition",
"description",
"disable",
"emitIf",
"enable",
"label",
"output",
"purpose",
"section",
}
label = mapping.get("label", data.get("label", ""))
purpose = mapping.get("purpose", data.get("purpose", data.get("description", label)))
autoDetect = map_condition(mapping.get("autoDetect", data.get("autoDetect", "")))
condition = map_condition(mapping.get("condition", data.get("condition", "")))
output = mapping.get("output", data.get("output", []))
comment = mapping.get("comment", data.get("comment", ""))
section = mapping.get("section", data.get("section", ""))
enable = map_condition(mapping.get("enable", data.get("enable", "")))
disable = map_condition(mapping.get("disable", data.get("disable", "")))
emitIf = map_condition(mapping.get("emitIf", data.get("emitIf", "")))
cmakePrelude = mapping.get("cmakePrelude", None)
cmakeEpilogue = mapping.get("cmakeEpilogue", None)
for k in [k for k in data.keys() if k not in handled]:
print(f" XXXX UNHANDLED KEY {k} in feature description")
if not output:
# feature that is only used in the conditions of other features
output = ["internalFeature"]
publicFeature = False # #define QT_FEATURE_featurename in public header
privateFeature = False # #define QT_FEATURE_featurename in private header
negativeFeature = False # #define QT_NO_featurename in public header
internalFeature = False # No custom or QT_FEATURE_ defines
publicDefine = False # #define MY_CUSTOM_DEFINE in public header
publicConfig = False # add to CONFIG in public pri file
privateConfig = False # add to CONFIG in private pri file
publicQtConfig = False # add to QT_CONFIG in public pri file
for o in output:
outputType = o
if isinstance(o, dict):
outputType = o["type"]
if outputType in [
"varAssign",
"varAppend",
"varRemove",
"useBFDLinker",
"useGoldLinker",
"useLLDLinker",
]:
continue
elif outputType == "define":
publicDefine = True
elif outputType == "feature":
negativeFeature = True
elif outputType == "publicFeature":
publicFeature = True
elif outputType == "privateFeature":
privateFeature = True
elif outputType == "internalFeature":
internalFeature = True
elif outputType == "publicConfig":
publicConfig = True
elif outputType == "privateConfig":
privateConfig = True
elif outputType == "publicQtConfig":
publicQtConfig = True
else:
print(f" XXXX UNHANDLED OUTPUT TYPE {outputType} in feature {feature}.")
continue
if not any(
[
publicFeature,
privateFeature,
internalFeature,
publicDefine,
negativeFeature,
publicConfig,
privateConfig,
publicQtConfig,
]
):
print(f" **** Skipping feature {feature}: Not relevant for C++.")
return
normalized_feature_name = featureName(feature)
def writeFeature(
name,
publicFeature=False,
privateFeature=False,
labelAppend="",
superFeature=None,
autoDetect="",
cmakePrelude=None,
cmakeEpilogue=None,
):
if comment:
cm_fh.write(f"# {comment}\n")
if cmakePrelude is not None:
cm_fh.write(cmakePrelude)
cm_fh.write("\n")
cm_fh.write(f'qt_feature("{name}"')
if publicFeature:
cm_fh.write(" PUBLIC")
if privateFeature:
cm_fh.write(" PRIVATE")
cm_fh.write("\n")
cm_fh.write(lineify("SECTION", section))
cm_fh.write(lineify("LABEL", label + labelAppend))
if purpose != label:
cm_fh.write(lineify("PURPOSE", purpose))
cm_fh.write(lineify("AUTODETECT", autoDetect, quote=False))
if superFeature:
feature_condition = f"QT_FEATURE_{superFeature}"
else:
feature_condition = condition
cm_fh.write(lineify("CONDITION", feature_condition, quote=False))
cm_fh.write(lineify("ENABLE", enable, quote=False))
cm_fh.write(lineify("DISABLE", disable, quote=False))
cm_fh.write(lineify("EMIT_IF", emitIf, quote=False))
cm_fh.write(")\n")
if cmakeEpilogue is not None:
cm_fh.write(cmakeEpilogue)
cm_fh.write("\n")
# Write qt_feature() calls before any qt_feature_definition() calls
# Default internal feature case.
featureCalls = {}
featureCalls[feature] = {
"name": feature,
"labelAppend": "",
"autoDetect": autoDetect,
"cmakePrelude": cmakePrelude,
"cmakeEpilogue": cmakeEpilogue,
}
# Go over all outputs to compute the number of features that have to be declared
for o in output:
outputType = o
name = feature
# The label append is to provide a unique label for features that have more than one output
# with different names.
labelAppend = ""
if isinstance(o, dict):
outputType = o["type"]
if "name" in o:
name = o["name"]
labelAppend = f": {o['name']}"
if outputType not in ["feature", "publicFeature", "privateFeature"]:
continue
if name not in featureCalls:
featureCalls[name] = {"name": name, "labelAppend": labelAppend}
if name != feature:
featureCalls[name]["superFeature"] = normalized_feature_name
if outputType in ["feature", "publicFeature"]:
featureCalls[name]["publicFeature"] = True
elif outputType == "privateFeature":
featureCalls[name]["privateFeature"] = True
elif outputType == "publicConfig":
featureCalls[name]["publicConfig"] = True
elif outputType == "privateConfig":
featureCalls[name]["privateConfig"] = True
elif outputType == "publicQtConfig":
featureCalls[name]["publicQtConfig"] = True
# Write the qt_feature() calls from the computed feature map
for _, args in featureCalls.items():
writeFeature(**args)
# Write qt_feature_definition() calls
for o in output:
outputType = o
outputArgs = {}
if isinstance(o, dict):
outputType = o["type"]
outputArgs = o
# Map negative feature to define:
if outputType == "feature":
outputType = "define"
outputArgs = {
"name": f"QT_NO_{normalized_feature_name.upper()}",
"negative": True,
"value": 1,
"type": "define",
}
if outputType != "define":
continue
if outputArgs.get("name") is None:
print(f" XXXX DEFINE output without name in feature {feature}.")
continue
out_name = outputArgs.get("name")
cm_fh.write(f'qt_feature_definition("{feature}" "{out_name}"')
if outputArgs.get("negative", False):
cm_fh.write(" NEGATE")
if outputArgs.get("value") is not None:
cm_fh.write(f' VALUE "{outputArgs.get("value")}"')
if outputArgs.get("prerequisite") is not None:
cm_fh.write(f' PREREQUISITE "{outputArgs.get("prerequisite")}"')
cm_fh.write(")\n")
# Write qt_feature_config() calls
for o in output:
outputType = o
name = feature
modified_name = name
outputArgs = {}
if isinstance(o, dict):
outputType = o["type"]
outputArgs = o
if "name" in o:
modified_name = o["name"]
if outputType not in ["publicConfig", "privateConfig", "publicQtConfig"]:
continue
config_type = ""
if outputType == "publicConfig":
config_type = "QMAKE_PUBLIC_CONFIG"
elif outputType == "privateConfig":
config_type = "QMAKE_PRIVATE_CONFIG"
elif outputType == "publicQtConfig":
config_type = "QMAKE_PUBLIC_QT_CONFIG"
if not config_type:
print(" XXXX config output without type in feature {}.".format(feature))
continue
cm_fh.write('qt_feature_config("{}" {}'.format(name, config_type))
if outputArgs.get("negative", False):
cm_fh.write("\n NEGATE")
if modified_name != name:
cm_fh.write("\n")
cm_fh.write(lineify("NAME", modified_name, quote=True))
cm_fh.write(")\n")
def processSummaryHelper(ctx, entries, cm_fh):
for entry in entries:
if isinstance(entry, str):
name = entry
cm_fh.write(f'qt_configure_add_summary_entry(ARGS "{name}")\n')
elif "type" in entry and entry["type"] in [
"feature",
"firstAvailableFeature",
"featureList",
]:
function_args = []
entry_type = entry["type"]
if entry_type in ["firstAvailableFeature", "featureList"]:
feature_mapping = get_feature_mapping()
unhandled_feature = False
for feature_name, value in feature_mapping.items():
# Skip entries that mention a feature which is
# skipped by configurejson2cmake in the feature
# mapping. This is not ideal, but prevents errors at
# CMake configuration time.
if not value and f"{feature_name}" in entry["args"]:
unhandled_feature = True
break
if unhandled_feature:
print(f" XXXX UNHANDLED FEATURE in SUMMARY TYPE {entry}.")
continue
if entry_type != "feature":
function_args.append(lineify("TYPE", entry_type))
if "args" in entry:
args = entry["args"]
function_args.append(lineify("ARGS", args))
if "message" in entry:
message = entry["message"]
function_args.append(lineify("MESSAGE", message))
if "condition" in entry:
condition = map_condition(entry["condition"])
function_args.append(lineify("CONDITION", condition, quote=False))
entry_args_string = "".join(function_args)
cm_fh.write(f"qt_configure_add_summary_entry(\n{entry_args_string})\n")
elif "type" in entry and entry["type"] == "buildTypeAndConfig":
cm_fh.write("qt_configure_add_summary_build_type_and_config()\n")
elif "type" in entry and entry["type"] == "buildMode":
message = entry["message"]
cm_fh.write(f"qt_configure_add_summary_build_mode({message})\n")
elif "type" in entry and entry["type"] == "buildParts":
message = entry["message"]
cm_fh.write(f'qt_configure_add_summary_build_parts("{message}")\n')
elif "section" in entry:
section = entry["section"]
cm_fh.write(f'qt_configure_add_summary_section(NAME "{section}")\n')
processSummaryHelper(ctx, entry["entries"], cm_fh)
cm_fh.write(f'qt_configure_end_summary_section() # end of "{section}" section\n')
else:
print(f" XXXX UNHANDLED SUMMARY TYPE {entry}.")
report_condition_mapping = {
"(features.rpath || features.rpath_dir) && !features.shared": "(features.rpath || QT_EXTRA_RPATHS) && !features.shared",
"(features.rpath || features.rpath_dir) && var.QMAKE_LFLAGS_RPATH == ''": None,
}
def processReportHelper(ctx, entries, cm_fh):
feature_mapping = get_feature_mapping()
for entry in entries:
if isinstance(entry, dict):
entry_args = []
if "type" not in entry:
print(f" XXXX UNHANDLED REPORT TYPE missing type in {entry}.")
continue
report_type = entry["type"]
if report_type not in ["note", "warning", "error"]:
print(f" XXXX UNHANDLED REPORT TYPE unknown type in {entry}.")
continue
report_type = report_type.upper()
entry_args.append(lineify("TYPE", report_type, quote=False))
message = entry["message"]
# Replace semicolons, qt_parse_all_arguments can't handle
# them due to an escaping bug in CMake regarding escaping
# macro arguments.
# https://gitlab.kitware.com/cmake/cmake/issues/19972
message = message.replace(";", ",")
entry_args.append(lineify("MESSAGE", message))
# Need to overhaul everything to fix conditions.
if "condition" in entry:
condition = entry["condition"]
unhandled_condition = False
for feature_name, value in feature_mapping.items():
# Skip reports that mention a feature which is
# skipped by configurejson2cmake in the feature
# mapping. This is not ideal, but prevents errors at
# CMake configuration time.
if not value and f"features.{feature_name}" in condition:
unhandled_condition = True
break
if unhandled_condition:
print(f" XXXX UNHANDLED CONDITION in REPORT TYPE {entry}.")
continue
if isinstance(condition, str) and condition in report_condition_mapping:
new_condition = report_condition_mapping[condition]
if new_condition is None:
continue
else:
condition = new_condition
condition = map_condition(condition)
entry_args.append(lineify("CONDITION", condition, quote=False))
entry_args_string = "".join(entry_args)
cm_fh.write(f"qt_configure_add_report_entry(\n{entry_args_string})\n")
else:
print(f" XXXX UNHANDLED REPORT TYPE {entry}.")
def parseCommandLineCustomHandler(ctx, data, cm_fh):
cm_fh.write(f"qt_commandline_custom({data})\n")
def parseCommandLineOptions(ctx, data, cm_fh):
for key in data:
args = [key]
option = data[key]
if isinstance(option, str):
args += ["TYPE", option]
else:
if "type" in option:
args += ["TYPE", option["type"]]
if "name" in option:
args += ["NAME", option["name"]]
if "value" in option:
args += ["VALUE", option["value"]]
if "values" in option:
values = option["values"]
if isinstance(values, list):
args += ["VALUES", " ".join(option["values"])]
else:
args += ["MAPPING"]
for lhs in values:
args += [lhs, values[lhs]]
cm_fh.write(f"qt_commandline_option({' '.join(args)})\n")
def parseCommandLinePrefixes(ctx, data, cm_fh):
for key in data:
cm_fh.write(f"qt_commandline_prefix({key} {data[key]})\n")
def parseCommandLineAssignments(ctx, data, cm_fh):
for key in data:
cm_fh.write(f"qt_commandline_assignment({key} {data[key]})\n")
def processCommandLine(ctx, data, cm_fh):
print(" commandline:")
if "subconfigs" in data:
for subconf in data["subconfigs"]:
cm_fh.write(f"qt_commandline_subconfig({subconf})\n")
if "commandline" not in data:
return
commandLine = data["commandline"]
if "custom" in commandLine:
print(" custom:")
parseCommandLineCustomHandler(ctx, commandLine["custom"], cm_fh)
if "options" in commandLine:
print(" options:")
parseCommandLineOptions(ctx, commandLine["options"], cm_fh)
if "prefix" in commandLine:
print(" prefix:")
parseCommandLinePrefixes(ctx, commandLine["prefix"], cm_fh)
if "assignments" in commandLine:
print(" assignments:")
parseCommandLineAssignments(ctx, commandLine["assignments"], cm_fh)
def processInputs(ctx, data, cm_fh):
print(" inputs:")
if "commandline" not in data:
return
commandLine = data["commandline"]
if "options" not in commandLine:
return
for input_option in commandLine["options"]:
parseInput(ctx, input_option, commandLine["options"][input_option], cm_fh)
def processTests(ctx, data, cm_fh):
print(" tests:")
if "tests" not in data:
return
for test in data["tests"]:
parseTest(ctx, test, data["tests"][test], cm_fh)
def processFeatures(ctx, data, cm_fh):
print(" features:")
if "features" not in data:
return
for feature in data["features"]:
parseFeature(ctx, feature, data["features"][feature], cm_fh)
def processLibraries(ctx, data, cm_fh):
cmake_find_packages_set = set()
print(" libraries:")
if "libraries" not in data:
return
for lib in data["libraries"]:
parseLib(ctx, lib, data, cm_fh, cmake_find_packages_set)
def processReports(ctx, data, cm_fh):
if "summary" in data:
print(" summary:")
processSummaryHelper(ctx, data["summary"], cm_fh)
if "report" in data:
print(" report:")
processReportHelper(ctx, data["report"], cm_fh)
if "earlyReport" in data:
print(" earlyReport:")
processReportHelper(ctx, data["earlyReport"], cm_fh)
def processSubconfigs(path, ctx, data):
assert ctx is not None
if "subconfigs" in data:
for subconf in data["subconfigs"]:
subconfDir = posixpath.join(path, subconf)
subconfData = readJsonFromDir(subconfDir)
subconfCtx = ctx
processJson(subconfDir, subconfCtx, subconfData)
class special_cased_file:
def __init__(self, base_dir: str, file_name: str, skip_special_case_preservation: bool):
self.base_dir = base_dir
self.file_path = posixpath.join(base_dir, file_name)
self.gen_file_path = self.file_path + ".gen"
self.preserve_special_cases = not skip_special_case_preservation
def __enter__(self):
self.file = open(self.gen_file_path, "w")
if self.preserve_special_cases:
self.sc_handler = SpecialCaseHandler(
os.path.abspath(self.file_path),
os.path.abspath(self.gen_file_path),
os.path.abspath(self.base_dir),
debug=False,
)
return self.file
def __exit__(self, type, value, trace_back):
self.file.close()
if self.preserve_special_cases and self.sc_handler.handle_special_cases():
os.replace(self.gen_file_path, self.file_path)
else:
os.replace(self.gen_file_path, self.file_path)
def processJson(path, ctx, data, skip_special_case_preservation=False):
ctx["project_dir"] = path
ctx["module"] = data.get("module", "global")
ctx["test_dir"] = data.get("testDir", "config.tests")
ctx = processFiles(ctx, data)
with special_cased_file(path, "qt_cmdline.cmake", skip_special_case_preservation) as cm_fh:
processCommandLine(ctx, data, cm_fh)
with special_cased_file(path, "configure.cmake", skip_special_case_preservation) as cm_fh:
cm_fh.write("\n\n#### Inputs\n\n")
processInputs(ctx, data, cm_fh)
cm_fh.write("\n\n#### Libraries\n\n")
processLibraries(ctx, data, cm_fh)
cm_fh.write("\n\n#### Tests\n\n")
processTests(ctx, data, cm_fh)
cm_fh.write("\n\n#### Features\n\n")
processFeatures(ctx, data, cm_fh)
processReports(ctx, data, cm_fh)
if ctx.get("module") == "global":
cm_fh.write(
'\nqt_extra_definition("QT_VERSION_STR" "\\"${PROJECT_VERSION}\\"" PUBLIC)\n'
)
cm_fh.write('qt_extra_definition("QT_VERSION_MAJOR" ${PROJECT_VERSION_MAJOR} PUBLIC)\n')
cm_fh.write('qt_extra_definition("QT_VERSION_MINOR" ${PROJECT_VERSION_MINOR} PUBLIC)\n')
cm_fh.write('qt_extra_definition("QT_VERSION_PATCH" ${PROJECT_VERSION_PATCH} PUBLIC)\n')
# do this late:
processSubconfigs(path, ctx, data)
def main():
if len(sys.argv) < 2:
print("This scripts needs one directory to process!")
quit(1)
skip_special_case_preservation = False
if len(sys.argv) > 2 and sys.argv[2] == "-s":
skip_special_case_preservation = True
directory = sys.argv[1]
print(f"Processing: {directory}.")
data = readJsonFromDir(directory)
processJson(directory, {}, data, skip_special_case_preservation=skip_special_case_preservation)
if __name__ == "__main__":
main()
| 34.608942 | 147 | 0.580251 | 1,260 | 0.022926 | 0 | 0 | 0 | 0 | 0 | 0 | 20,959 | 0.381357 |
1f7e10137722c6fcc224fdac359159dee3d532fc
| 819 |
py
|
Python
|
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
# open and read web page, decode it if it contains Chinese
html = urlopen('https://mofanpy.com/static/scraping/table.html').read().decode('utf-8')
print(html)
# 'lxml' is parser name
soup = BeautifulSoup(html, features='lxml')
# search by tag name and attribe name (src), use regex match src value
img_list = soup.find_all('img', {'src': re.compile('.*?\.jpg')})
print( [img['src'] for img in img_list] )
# another example
course_links = soup.find_all('a', {'href': re.compile('\/tutorials.*')})
for link in course_links:
print(link['href'])
# another example
tables = soup.find_all('table', {'id': 'course-list'})
for table in tables:
courses = table.find_all('tr', {'class': 'ml'})
print([course['id'] for course in courses])
| 32.76 | 87 | 0.693529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.420024 |
1f839d58efbf61c5507f3e11ca4b447b2e8e7b82
| 1,826 |
py
|
Python
|
ssrlsim/scripts/start_RE.py
|
tangkong/ssrlsim
|
62f8a07989ebc187ecf6d2dc3bd8d97ae4c56536
|
[
"BSD-3-Clause"
] | null | null | null |
ssrlsim/scripts/start_RE.py
|
tangkong/ssrlsim
|
62f8a07989ebc187ecf6d2dc3bd8d97ae4c56536
|
[
"BSD-3-Clause"
] | 2 |
2020-06-18T05:18:15.000Z
|
2021-09-08T21:44:29.000Z
|
ssrlsim/scripts/start_RE.py
|
tangkong/ssrlsim
|
62f8a07989ebc187ecf6d2dc3bd8d97ae4c56536
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import matplotlib
# get_ipython().run_line_magic("matplotlib", "widget") # i.e. %matplotlib widget
import matplotlib.pyplot as plt
from ophyd import Device, Component, EpicsSignal
from ophyd.signal import EpicsSignalBase
from ophyd.areadetector.filestore_mixins import resource_factory
import uuid
import os
from pathlib import Path
import numpy as np
from IPython import get_ipython
# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import PersistentDict
RE = RunEngine({})
# RE.md = PersistentDict(str(Path("~/.bluesky_history").expanduser()))
# Set up SupplementalData.
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
# Set up a Broker.
from databroker import Broker
db = Broker.named("temp") #mongo-intake")
print(f'Using databroker: {db.name}')
# and subscribe it to the RunEngine
RE.subscribe(db.insert)
# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager
# # Register bluesky IPython magics.
# from bluesky.magics import BlueskyMagics
# get_ipython().register_magics(BlueskyMagics)
# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
bec = BestEffortCallback()
RE.subscribe(bec)
peaks = bec.peaks
# Make plots update live while scans run.
from bluesky.utils import install_nb_kicker
install_nb_kicker()
# convenience imports
# some of the * imports are for 'back-compatibility' of a sort -- we have
# taught BL staff to expect LiveTable and LivePlot etc. to be in their
# namespace
import numpy as np
from bluesky.callbacks.mpl_plotting import LivePlot, LiveGrid
import bluesky.plans as bp
import bluesky.plan_stubs as bps
import bluesky.preprocessors as bpp
| 24.675676 | 81 | 0.793538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 736 | 0.403067 |
1f85f7ae96d69285ca7b29169676435a4ce6e57d
| 5,021 |
py
|
Python
|
snakeskin/protos/peer/peer_pb2.py
|
healthverity/snakeskin-fabric
|
31ba7fa5a71445eba76f89723c998d603704e0f9
|
[
"Apache-2.0"
] | 5 |
2019-08-08T17:16:02.000Z
|
2021-05-15T07:28:31.000Z
|
snakeskin/protos/peer/peer_pb2.py
|
healthverity/snakeskin-fabric
|
31ba7fa5a71445eba76f89723c998d603704e0f9
|
[
"Apache-2.0"
] | 4 |
2019-08-20T15:07:12.000Z
|
2020-07-31T17:50:51.000Z
|
snakeskin/protos/peer/peer_pb2.py
|
healthverity/snakeskin-fabric
|
31ba7fa5a71445eba76f89723c998d603704e0f9
|
[
"Apache-2.0"
] | 2 |
2019-08-20T15:22:48.000Z
|
2019-12-17T19:38:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: snakeskin/protos/peer/peer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from snakeskin.protos.peer import proposal_pb2 as snakeskin_dot_protos_dot_peer_dot_proposal__pb2
from snakeskin.protos.peer import proposal_response_pb2 as snakeskin_dot_protos_dot_peer_dot_proposal__response__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='snakeskin/protos/peer/peer.proto',
package='protos',
syntax='proto3',
serialized_options=_b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer'),
serialized_pb=_b('\n snakeskin/protos/peer/peer.proto\x12\x06protos\x1a$snakeskin/protos/peer/proposal.proto\x1a-snakeskin/protos/peer/proposal_response.proto\"\x16\n\x06PeerID\x12\x0c\n\x04name\x18\x01 \x01(\t\";\n\x0cPeerEndpoint\x12\x1a\n\x02id\x18\x01 \x01(\x0b\x32\x0e.protos.PeerID\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t2Q\n\x08\x45ndorser\x12\x45\n\x0fProcessProposal\x12\x16.protos.SignedProposal\x1a\x18.protos.ProposalResponse\"\x00\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3')
,
dependencies=[snakeskin_dot_protos_dot_peer_dot_proposal__pb2.DESCRIPTOR,snakeskin_dot_protos_dot_peer_dot_proposal__response__pb2.DESCRIPTOR,])
_PEERID = _descriptor.Descriptor(
name='PeerID',
full_name='protos.PeerID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='protos.PeerID.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=151,
)
_PEERENDPOINT = _descriptor.Descriptor(
name='PeerEndpoint',
full_name='protos.PeerEndpoint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='protos.PeerEndpoint.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address', full_name='protos.PeerEndpoint.address', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=212,
)
_PEERENDPOINT.fields_by_name['id'].message_type = _PEERID
DESCRIPTOR.message_types_by_name['PeerID'] = _PEERID
DESCRIPTOR.message_types_by_name['PeerEndpoint'] = _PEERENDPOINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PeerID = _reflection.GeneratedProtocolMessageType('PeerID', (_message.Message,), {
'DESCRIPTOR' : _PEERID,
'__module__' : 'snakeskin.protos.peer.peer_pb2'
# @@protoc_insertion_point(class_scope:protos.PeerID)
})
_sym_db.RegisterMessage(PeerID)
PeerEndpoint = _reflection.GeneratedProtocolMessageType('PeerEndpoint', (_message.Message,), {
'DESCRIPTOR' : _PEERENDPOINT,
'__module__' : 'snakeskin.protos.peer.peer_pb2'
# @@protoc_insertion_point(class_scope:protos.PeerEndpoint)
})
_sym_db.RegisterMessage(PeerEndpoint)
DESCRIPTOR._options = None
_ENDORSER = _descriptor.ServiceDescriptor(
name='Endorser',
full_name='protos.Endorser',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=214,
serialized_end=295,
methods=[
_descriptor.MethodDescriptor(
name='ProcessProposal',
full_name='protos.Endorser.ProcessProposal',
index=0,
containing_service=None,
input_type=snakeskin_dot_protos_dot_peer_dot_proposal__pb2._SIGNEDPROPOSAL,
output_type=snakeskin_dot_protos_dot_peer_dot_proposal__response__pb2._PROPOSALRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ENDORSER)
DESCRIPTOR.services_by_name['Endorser'] = _ENDORSER
# @@protoc_insertion_point(module_scope)
| 34.390411 | 550 | 0.768174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,412 | 0.281219 |
1f8ae5e3724e8e2ef10be9cc46bc0a4381cac952
| 3,236 |
py
|
Python
|
tests/test_models/test_place.py
|
lepc1972/AirBnB_clone
|
3ba7f0332e926ff7715272866a00d3a23341b3c3
|
[
"MIT"
] | 2 |
2021-07-16T01:07:40.000Z
|
2021-07-16T01:23:15.000Z
|
tests/test_models/test_place.py
|
lepc1972/AirBnB_clone
|
3ba7f0332e926ff7715272866a00d3a23341b3c3
|
[
"MIT"
] | null | null | null |
tests/test_models/test_place.py
|
lepc1972/AirBnB_clone
|
3ba7f0332e926ff7715272866a00d3a23341b3c3
|
[
"MIT"
] | 1 |
2021-07-09T01:41:16.000Z
|
2021-07-09T01:41:16.000Z
|
#!/usr/bin/python3
'''
test for the place model here.
'''
import unittest
from models.base_model import BaseModel
from models.place import Place
class TestUser(unittest.TestCase):
'''
Testing Place class
'''
def setUp(self):
'''
Create instance for place.
'''
self.new_place = Place()
def test_Place_inheritance(self):
'''
tests City Inherits BaseModel
'''
self.assertIsInstance(self.new_place, BaseModel)
def test_Place_attributes(self):
'''
test attribute is there.
'''
self.assertTrue("city_id" in self.new_place.__dir__())
self.assertTrue("user_id" in self.new_place.__dir__())
self.assertTrue("description" in self.new_place.__dir__())
self.assertTrue("name" in self.new_place.__dir__())
self.assertTrue("number_rooms" in self.new_place.__dir__())
self.assertTrue("max_guest" in self.new_place.__dir__())
self.assertTrue("price_by_night" in self.new_place.__dir__())
self.assertTrue("latitude" in self.new_place.__dir__())
self.assertTrue("longitude" in self.new_place.__dir__())
self.assertTrue("amenity_ids" in self.new_place.__dir__())
def test_type_longitude(self):
'''
Test type long.
'''
longitude = getattr(self.new_place, "longitude")
self.assertIsInstance(longitude, float)
def test_type_latitude(self):
'''
Test type lat
'''
latitude = getattr(self.new_place, "latitude")
self.assertIsInstance(latitude, float)
def test_type_price_by_night(self):
'''
Test type price night
'''
price_by_night = getattr(self.new_place, "price_by_night")
self.assertIsInstance(price_by_night, int)
def test_type_max_guest(self):
'''
Test type max guest
'''
max_guest = getattr(self.new_place, "max_guest")
self.assertIsInstance(max_guest, int)
def test_type_number_bathrooms(self):
'''
Test number bathrooms
'''
number_bathrooms = getattr(self.new_place, "number_bathrooms")
self.assertIsInstance(number_bathrooms, int)
def test_type_number_rooms(self):
'''
Test type number rooms
'''
number_rooms = getattr(self.new_place, "number_rooms")
self.assertIsInstance(number_rooms, int)
def test_type_description(self):
'''
Test type description
'''
description = getattr(self.new_place, "description")
self.assertIsInstance(description, str)
def test_type_name(self):
'''
Test type name
'''
name = getattr(self.new_place, "name")
self.assertIsInstance(name, str)
def test_type_user_id(self):
'''
Test type user id
'''
user_id = getattr(self.new_place, "user_id")
self.assertIsInstance(user_id, str)
def test_type_city_id(self):
'''
Test type city id
'''
city_id = getattr(self.new_place, "city_id")
self.assertIsInstance(city_id, str)
| 28.637168 | 70 | 0.603523 | 3,082 | 0.95241 | 0 | 0 | 0 | 0 | 0 | 0 | 952 | 0.29419 |
1f8af70835313ee879d71169d774cac9ba7f41c9
| 1,714 |
py
|
Python
|
app/__init__.py
|
zjyfdu/flask_huxiaofei
|
2193bfe0aa45626fdb4b270f7532a1e04c5be556
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
zjyfdu/flask_huxiaofei
|
2193bfe0aa45626fdb4b270f7532a1e04c5be556
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
zjyfdu/flask_huxiaofei
|
2193bfe0aa45626fdb4b270f7532a1e04c5be556
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import MetaData
from flask_login import LoginManager
from flask_msearch import Search
from config import config
from jieba.analyse import ChineseAnalyzer
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
db = SQLAlchemy(metadata=MetaData(naming_convention=naming_convention))
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
# db = SQLAlchemy()
search = Search(analyzer=ChineseAnalyzer())
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__, static_url_path='')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.app = app
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
search.init_app(app)
from .alipay import alipay as alipay_blueprint
app.register_blueprint(alipay_blueprint, url_prefix='/alipay')
from .main import main as main_blueprint
app.register_blueprint(main_blueprint, url_prefix='/community')
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .course import course as course_blueprint
app.register_blueprint(course_blueprint)
return app
from models import *
| 28.566667 | 72 | 0.757876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.155193 |
1f8f15b75dc5ee4ca1fc697ef1e5c0863cf598a7
| 1,893 |
py
|
Python
|
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | 4 |
2018-12-09T13:57:59.000Z
|
2019-10-19T19:34:28.000Z
|
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from ..utils import DEFAULT_SETTINGS
from ..utils.DEFAULT_ENCRYPTION import SERVER_encryption, CLIENT_encryption
def json_dumper(data):
return bytes(json.dumps(data), encoding=DEFAULT_SETTINGS.ENCODING)
def json_loader(data):
return json.loads(str(data, encoding=DEFAULT_SETTINGS.ENCODING))
class Protocol(object):
def __init__(self, reader=None, writer=None, *, loop=None, client_encryption=None):
self.reader=reader
self.writer=writer
self.loop=loop or asyncio.get_event_loop()
self.server_encryption = SERVER_encryption(DEFAULT_SETTINGS.ENCODING)
self.client_encryption = client_encryption or CLIENT_encryption(encoding=DEFAULT_SETTINGS.ENCODING)
self.jload = json_loader
self.jdump = json_dumper
@asyncio.coroutine
def send(self, method, *, drain=False, encrypt=True, **kwargs):
data = self.jdump({'method':method.upper(), **kwargs})
if encrypt: # we don't need to encrypt the data when we want to send the public key
data = self.server_encryption.encrypt(data) # the client wont be able to read the encrypted packet
self.writer.write(data)
if drain:
yield from self.writer.drain()
@asyncio.coroutine
def recv(self, dencrypt=True):
data = yield from self.reader.read(DEFAULT_SETTINGS.READ_SIZE)
if dencrypt:
data = self.client_encryption.dencrypt(data)
data = self.jload(data)
return data['method'], {k:i for k, i in data.items() if k != 'method'}
@asyncio.coroutine
def expected(self, *args, dencrypt=True):
method, _ = yield from self.recv(dencrypt)
if args and method not in method:
raise ValueError('expected %s recved %s' %(args, method))
return method, _
| 36.403846 | 111 | 0.661912 | 1,537 | 0.811939 | 961 | 0.50766 | 1,033 | 0.545695 | 0 | 0 | 175 | 0.092446 |
1f8f9e391109c41227336b2bb762cb77a40123c1
| 6,413 |
py
|
Python
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 5 |
2021-02-24T19:10:34.000Z
|
2022-02-24T21:11:24.000Z
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | null | null | null |
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 2 |
2021-05-03T11:57:31.000Z
|
2021-12-09T10:24:29.000Z
|
#! /usr/bin/env python3
#-- harvest scheduler that runs on the compute pool nodes
import argparse
import time
import sys
import logging
import os
import psutil
from applicationinsights import TelemetryClient
from applicationinsights.logging import LoggingHandler
from getargs import getargs
import azlog
azlog.color=False
#-- Timeout between polling the harvest #cores api/file
HARVESTPOLLTIMEOUT = 30
#-- Executable to launch per cpu slot
#ENGINE="burn.sh" # (for testing)
ENGINE="/azfinsim/azfinsim.py"
#KVP_MONITOR="/var/lib/hyperv/.kvp_pool_0"
#-- mounted via: sudo docker run -v /var/lib/hyperv:/kvp -it mkharvestazcr.azurecr.io/azfinsim/azfinsimub1804
KVP_MONITOR="/kvp/.kvp_pool_0"
def read_harvest_cores() :
vcores = psutil.cpu_count(logical=True)
pcores = psutil.cpu_count(logical=False)
log.info("Polling Harvester: Physical Cores: %d Logical Cores: %d" % (pcores,vcores))
kvp=KVP_MONITOR
try:
f = open(kvp, "r")
str=f.read()
if (len(str) > 0):
str = str.replace("CurrentCoreCount","")
str = str.replace('\0','')
ncores = int(str.split('.')[0])
log.info("Harvest file %s has current physical core count: %d" % (kvp,ncores))
else:
ncores = vcores
log.warn("Harvest file %s is empty; using static vcore count: %d" % (kvp,ncores))
except OSError:
ncores = vcores
log.warn("Harvest file %s doesn't exist; using static vcore count: %d" % (kvp,ncores))
tc.track_metric('HARVESTCORES', ncores)
tc.flush()
return ncores
def spawn(ncores) :
env = {"PATH":"."}
args = ("null","null")
log.info("spawning %d processes" % ncores)
for i in range(ncores):
pid = os.fork()
if not pid:
try:
os.execvpe("burn.sh", args, env)
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
else:
pid = os.waitpid(pid,0)
def spawn_one(start_trade,trade_window,inputargs):
#path = os.environ['PATH']
argtup = tuple(inputargs)
pid = os.fork()
if not pid:
#-- child process
log.info("spawning new process %s: pid %d: start_trade=%d, ntrades=%d" % (ENGINE,os.getpid(),start_trade,trade_window))
#logging.info(argtup)
try:
os.execve(ENGINE, argtup, os.environ.copy())
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
#else:
#pid = os.waitpid(pid,0)
def replace_args(start_trade,trade_window,inputargs):
result = []
skip=False
for i in range(len(inputargs)):
if (skip==True):
skip=False
continue
if (inputargs[i]=='start_trade'):
result.append('start_trade')
result.append(str(start_trade))
skip=True
elif (inputargs[i]=='trade_window'):
result.append('trade_window')
result.append(str(trade_window))
skip=True
else:
result.append(inputargs[i])
skip=False
return(result)
#-- register the absolute start time
#launch=time.time_ns() #-- python3.8 only
launch=time.time()
log = azlog.getLogger(__name__)
if __name__ == "__main__":
#-- grab cli args: will be passed through to child processes
args = getargs("harvester")
#-- reformat args into a list of strings for execvpe
inputargs = []
inputargs.append(ENGINE) #-- first arg to execvpe() should be progname
for arg in vars(args):
#print(arg, getattr(args,arg))
val = str(getattr(args,arg))
arg=arg.replace("_","-")
inputargs.append(str("--" + arg)) #-- re-add the stripped "--" prefix
inputargs.append(val)
#print(inputargs)
#-- setup azure application insights handle for telemetry
tc = TelemetryClient("%s" % args.appinsights_key)
# set up logging - STDOUT & Azure AppInsights EventLog
#handler = LoggingHandler(args.appinsights_key)
#logging.basicConfig(
# format="%(asctime)s harvester: %(name)s %(threadName)-10.10s %(levelname)-5.5s %(message)s",
# handlers=[
# LoggingHandler(args.appinsights_key), #-- send to AZURE
# logging.StreamHandler(stream=sys.stdout) #-- send to STDOUT
# ],level=args.loglevel)
#-- log start time
log.info("TRADE %10d: LAUNCH : %d" % (args.start_trade,launch))
tc.track_metric('STARTTIME', launch)
tc.flush()
#-- get initial harvest core count
slots = read_harvest_cores()
log.info("%d x Cores available." % slots)
#-- calculate number of trades per process/batch/cpu
max_batch_size = 10
total_trades = args.trade_window
lastbatch = total_trades % max_batch_size
nbatchesfl = total_trades / max_batch_size
nbatches = int(nbatchesfl)
offset = args.start_trade
log.info("%d trades to process in this task (%.2f batches of %d)" % (total_trades,nbatchesfl,max_batch_size))
#-- Main loop: monitor harvest api/file & dispatch processes to available cores
batchesdone=0
trades_processed=0
while (batchesdone <= nbatches):
procs = psutil.Process().children()
gone, alive = psutil.wait_procs(procs,timeout=1,callback=None)
nprocs = len(alive)
freeslots = slots - nprocs
log.info("%d processes running on %d total slots: %d slots available." % (nprocs,slots,freeslots))
if (nprocs < slots):
for i in range(freeslots):
if (batchesdone == nbatches): batch_size = lastbatch
else: batch_size = max_batch_size
inputargs = replace_args(offset,batch_size,inputargs) # substitute the command line args
spawn_one(offset,batch_size,inputargs)
trades_processed += batch_size
offset += batch_size
batchesdone+=1
if (batch_size == lastbatch):
break
time.sleep(HARVESTPOLLTIMEOUT)
#-- re-read the harvest file - check if #slots has changed
slots = read_harvest_cores()
log.info("%d trades processed. No trades left to process; relinquishing cores" % trades_processed)
# flush all un-sent telemetry items
tc.flush()
#logging.shutdown()
#-- when all work done, exit and allow orchestration to recover node.
exit(0)
| 34.478495 | 127 | 0.626072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,412 | 0.376111 |
1f8faaab50ba1792d26b495c5cba37135b67c989
| 7,758 |
py
|
Python
|
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | null | null | null |
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | null | null | null |
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | 1 |
2020-03-13T12:53:35.000Z
|
2020-03-13T12:53:35.000Z
|
#!/usr/bin/env python3
#loading tf is slow, so don't do it unless we're using it
USE_TENSORFLOW = False
import collections
import numpy as np
import os
import pickle
if USE_TENSORFLOW:
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import modelInput
#used to compare a trained model to a basic model for the same inputs
#can also be used if we want to train a model using the behavior of a basic model
class CombinedModel:
def __init__(self, trainedModel, basicModel):
self.trainedModel = trainedModel
self.basicModel = basicModel
#t controls output of getExpValue
#0 for basic model, 1 for trained, in between for weighted average
self.t = 0
self.compare = False
self.compPointsBasic = []
self.compPointsTrained = []
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
basicValue = self.basicModel.getExpValue(stateHash, stateObj, action1, action2, bulk_input)
trainedValue = self.trainedModel.getExpValue(stateHash, stateObj, action1, action2, bulk_input)
if type(basicValue) == list:
value = []
for i in range(len(basicValue)):
value.append([None if basicValue[i][0] == None else basicValue[i][0] * (1-self.t) + trainedValue[i][0] * self.t])
else:
value = None if basicValue == None else basicValue * (1-self.t) + trainedValue * self.t
if self.compare:
if type(basicValue) == list:
for i in range(len(basicValue)):
#None means basic has never seen it, so we have no good data
if basicValue[i][0] != None:
self.compPointsBasic.append(basicValue[i][0])
self.compPointsTrained.append(trainedValue[i][0])
else:
self.compPointsBasic.append(basicValue)
self.compPointsTrained.append(trainedValue)
return value
def addReward(self, *args):
self.basicModel.addReward(*args)
self.trainedModel.addReward(*args)
def train(self, epochs=1, batch_size=None):
self.trainedModel.train(epochs, batch_size)
def purge(self, seenStates):
self.basicModel.purge(seenStates)
self.trainedModel.purge(seenStates)
def getMSE(self, clear=False):
sum = 0
count = 0
for i in range(len(self.compPointsBasic)):
b = self.compPointsBasic[i]
t = self.compPointsTrained[i]
sum += (b - t) ** 2
count += 1
if clear:
self.compPointsBasic = []
self.compPointsTrained = []
self.compare = False
if count == 0:
return 0
else:
return sum / count
class TrainedModel:
def __init__(self, alpha=0.001, model=None, width=256):
self.alpha = alpha
if model == None:
#simple feedforward
inputs = keras.Input(modelInput.inputShape)
x = keras.layers.Dense(width, activation='relu')(inputs)
y = keras.layers.Dense(width, activation='relu')(x)
prediction = keras.layers.Dense(1, activation='sigmoid')(y)
self.model = keras.Model(inputs=inputs, outputs=prediction)
self._compile()
else:
self.model = model
#used for training
self.training = True
self.savedInputs = []
self.savedLabels = []
self.expValueCache = {}
def _compile(self):
self.model.compile(
optimizer=tf.train.AdamOptimizer(self.alpha),
loss='logcosh')
#uses the cached expValue if possible
#otherwise generates it, adds it to cache
def OLDgetExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if (stateHash, action1, action2) in self.expValueCache:
return self.expValueCache[(stateHash, action1, action2)]
value = self.genExpValue(stateHash, stateObj, action1, action2)
self.expValueCache[(stateHash, action1, action2)] = value
return value
#returns the expected value from the network
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if bulk_input:
data = [modelInput.toInput(so, a1, a2) for _, so, a1, a2 in bulk_input]
return self.model.predict(np.array(data))
else:
data = modelInput.toInput(stateObj, action1, action2)
return self.model.predict(np.array([data]))[0][0]
#saves the data-label pair for training later
def addReward(self, stateHash, stateObj, action1, action2, reward):
if not self.training:
return
data = modelInput.toInput(stateObj, action1, action2)
self.savedInputs.append(data)
self.savedLabels.append(np.array([reward]))
#trains on all the saved data-label pairs, then removing
def train(self, epochs=1, batch_size=None):
self.model.fit(np.array(self.savedInputs),
np.array(self.savedLabels),
verbose=0,
epochs=epochs,
batch_size=batch_size)
self.savedInputs = []
self.savedLabels = []
self.expValueCache = {}
#this doesn't need to purge, as memory usage doesn't grow much
def purge(self, seenStates):
pass
#Save and load, also saves/loads the idMap from modeInput
#dir should not include a trailing /
def saveModel(self, dir, name):
self.model.save(dir + '/' + name + '-model.h5', include_optimizer=False)
idMapData = pickle.dumps(modelInput.idMap)
with open(dir + '/' + name + '-map.pickle', 'wb') as mapFile:
mapFile.write(idMapData)
def loadModel(self, dir, name):
self.model = keras.models.load_model(dir + '/' + name + '-model.h5', compile=False)
self._compile()
with open(dir + '/' + name + '-map.pickle', 'rb') as mapFile:
idMapData = mapFile.read()
modelInput.idMap = pickle.loads(idMapData)
class BasicModel:
def __init__(self):
self.rewardTable = collections.defaultdict(int)
self.countTable = collections.defaultdict(int)
#log holds a list of (stateHash, stateObj, action1, action2, reward) tuples
#so these can be written out at some point an analyzed
self.shouldLog = False
self.log = []
#returns the actual average reward for the (s,a,a) tuple
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if bulk_input:
#have to make this look like it came out of tf
return [[self.getExpValue(*b, bulk_input=None)] for b in bulk_input]
if self.shouldLog:
self.log.append((stateHash, stateObj, action1, action2, reward))
cumReward = self.rewardTable[(stateHash, action1, action2)]
count = self.countTable[(stateHash, action1, action2)]
return None if count == 0 else cumReward / count
#adds the count and reward for the (s,a,a) tuple
def addReward(self, stateHash, stateObj, action1, action2, reward):
self.rewardTable[(stateHash, action1, action2)] += reward
self.countTable[(stateHash, action1, action2)] += 1
#removes information on states that haven't been seen
def purge(self, seenStates):
keys = list(self.rewardTable)
for key in keys:
stateHash = key[0]
if not stateHash in seenStates:
del self.rewardTable[key]
del self.countTable[key]
| 37.298077 | 130 | 0.620907 | 7,284 | 0.938902 | 0 | 0 | 0 | 0 | 0 | 0 | 1,258 | 0.162155 |
1f907167fd216693dde972de5a46db5460599384
| 183 |
py
|
Python
|
src/emuvim/api/util/process_utils.py
|
RafaelSche/vim-emu
|
6503ba9fcbe13ca73c94d318157a1ba78ef26b5b
|
[
"Apache-2.0"
] | 34 |
2016-09-05T06:11:12.000Z
|
2021-12-24T08:45:24.000Z
|
src/emuvim/api/util/process_utils.py
|
RafaelSche/vim-emu
|
6503ba9fcbe13ca73c94d318157a1ba78ef26b5b
|
[
"Apache-2.0"
] | 89 |
2016-07-19T14:14:27.000Z
|
2020-01-09T07:19:45.000Z
|
src/emuvim/api/util/process_utils.py
|
RafaelSche/vim-emu
|
6503ba9fcbe13ca73c94d318157a1ba78ef26b5b
|
[
"Apache-2.0"
] | 32 |
2016-07-19T14:58:06.000Z
|
2020-05-05T13:30:01.000Z
|
import logging
import subprocess
import time
def wait_until(cmd):
logging.debug('waiting for %s\n' % cmd)
while subprocess.call(cmd, shell=True) != 0:
time.sleep(1)
| 18.3 | 48 | 0.677596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.098361 |
1f944de947e6f066710ae464c5f7cd8435c93b21
| 189 |
py
|
Python
|
zesty_metrics/defaults.py
|
Crossway/django-zesty-metrics
|
863532dce1379039e3db99355c90e84ac2288534
|
[
"BSD-3-Clause"
] | 1 |
2015-07-07T19:22:42.000Z
|
2015-07-07T19:22:42.000Z
|
zesty_metrics/defaults.py
|
Crossway/django-zesty-metrics
|
863532dce1379039e3db99355c90e84ac2288534
|
[
"BSD-3-Clause"
] | 4 |
2016-08-01T18:11:18.000Z
|
2018-02-06T18:02:02.000Z
|
zesty_metrics/defaults.py
|
Crossway/django-zesty-metrics
|
863532dce1379039e3db99355c90e84ac2288534
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
ZESTY_TRACKING_CLASSES = [
'zesty_metrics.tracking.UserAccounts',
]
ZESTY_TIMING_SAMPLE_RATE = 1
ZESTY_TIME_RESPONSES = True
ZESTY_TRACK_USER_ACTIVITY = True
| 17.181818 | 42 | 0.756614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.31746 |
1f9508b579771bc7e41b7b6de9c4a49ddf05f51e
| 3,368 |
py
|
Python
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 177 |
2020-01-31T08:32:07.000Z
|
2022-03-28T02:20:29.000Z
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 10 |
2020-06-26T04:46:26.000Z
|
2022-02-01T18:17:10.000Z
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 44 |
2020-03-11T17:21:51.000Z
|
2022-03-16T16:09:22.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super(Bottleneck, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 1, padding=0, bias=False),
nn.ReLU(inplace=True),
single_conv(in_channels, out_channels, 3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, padding=0, bias=False),
)
def forward(self, x):
return F.relu(self.conv(x) + x, inplace=True)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
Bottleneck(out_channels, out_channels)
)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.pool = nn.Sequential(
nn.AvgPool2d(2, 1),
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
single_conv(in_channels, out_channels)
)
def forward(self, x):
return self.pool(x)
def single_conv(in_channels, out_channels, ks=3):
return nn.Sequential(
nn.ReflectionPad2d(ks//2),
nn.Conv2d(in_channels, out_channels, 3, bias=False),
nn.ReLU(inplace=True)
)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = single_conv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.res = nn.Sequential(
Bottleneck(512, 512),
Bottleneck(512, 512),
Bottleneck(512, 512),
)
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = nn.Conv2d(64, n_classes, 1, padding=0)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x5 = self.res(x5)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
| 32.384615 | 101 | 0.561758 | 3,080 | 0.914489 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.007126 |
1f964a207f38c7145c92fc77855d4848bb25de63
| 1,716 |
py
|
Python
|
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
import math
from datetime import datetime
AVAILABLE_ACTIONS = [{'action': 'add', 'admin_required': False,
'operator': '+'},
{'action': 'subtract', 'admin_required': False,
'operator': '-'},
{'action': 'multiply', 'admin_required': False,
'operator': '*'},
{'action': 'divide', 'admin_required': False,
'operator': '/'},
{'action': 'power', 'admin_required': True,
'operator': '**'},
{'action': 'sqrt', 'admin_required': True,
'operator': 'sqrt'},
]
def get_available_options(action):
"""
Go through the available options and find it, then return that object
:param action: string
:return: list
"""
return [obj for obj in AVAILABLE_ACTIONS
if obj['action'] == action.lower()]
def do_calculation(action, x, y):
"""
This function does all the calculation thig
:param action: string
:param x: int
:param y: int
:return: int ( the result )
"""
operator = get_available_options((action))[0]['operator']
ops = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y if y else 0,
'**': lambda x, y: x ** y,
'sqrt': lambda x, y: math.sqrt(int(x))
}
return ops[operator](int(x), int(y))
def get_current_month():
now = datetime.now()
return now.month
def get_current_year():
now = datetime.now()
return now.year
def get_current_date():
return datetime.now().date()
| 28.131148 | 73 | 0.501166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.346154 |
2f06bad44169797de0c1276f26ece53ea110fad2
| 6,009 |
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3 |
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1 |
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
""" API v1 models. """
import logging
from itertools import groupby
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from lms.djangoapps.verify_student.models import VerificationDeadline
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
UNDEFINED = object()
class Course:
""" Pseudo-course model used to group CourseMode objects. """
id = None # pylint: disable=invalid-name
modes = None
_deleted_modes = None
def __init__(self, id, modes, **kwargs): # pylint: disable=redefined-builtin
self.id = CourseKey.from_string(str(id)) # pylint: disable=invalid-name
self.modes = list(modes)
self.verification_deadline = UNDEFINED
if 'verification_deadline' in kwargs:
self.verification_deadline = kwargs['verification_deadline']
self._deleted_modes = []
@property
def name(self):
""" Return course name. """
course_id = CourseKey.from_string(str(self.id))
try:
return CourseOverview.get_from_id(course_id).display_name
except CourseOverview.DoesNotExist:
# NOTE (CCB): Ideally, the course modes table should only contain data for courses that exist in
# modulestore. If that is not the case, say for local development/testing, carry on without failure.
log.warning('Failed to retrieve CourseOverview for [%s]. Using empty course name.', course_id)
return None
def get_mode_display_name(self, mode):
""" Returns display name for the given mode. """
slug = mode.mode_slug.strip().lower()
if slug == 'credit':
return 'Credit'
if 'professional' in slug:
return 'Professional Education'
elif slug == 'verified':
return 'Verified Certificate'
elif slug == 'honor':
return 'Honor Certificate'
elif slug == 'audit':
return 'Audit'
return mode.mode_slug
@transaction.atomic
def save(self, *args, **kwargs): # pylint: disable=unused-argument
""" Save the CourseMode objects to the database. """
if self.verification_deadline is not UNDEFINED:
# Override the verification deadline for the course (not the individual modes)
# This will delete verification deadlines for the course if self.verification_deadline is null
VerificationDeadline.set_deadline(self.id, self.verification_deadline, is_explicit=True)
for mode in self.modes:
mode.course_id = self.id
mode.mode_display_name = self.get_mode_display_name(mode)
mode.save()
deleted_mode_ids = [mode.id for mode in self._deleted_modes]
CourseMode.objects.filter(id__in=deleted_mode_ids).delete()
self._deleted_modes = []
def update(self, attrs):
""" Update the model with external data (usually passed via API call). """
# There are possible downstream effects of settings self.verification_deadline to null,
# so don't assign it a value here unless it is specifically included in attrs.
if 'verification_deadline' in attrs:
self.verification_deadline = attrs.get('verification_deadline')
existing_modes = {mode.mode_slug: mode for mode in self.modes}
merged_modes = set()
merged_mode_keys = set()
for posted_mode in attrs.get('modes', []):
merged_mode = existing_modes.get(posted_mode.mode_slug, CourseMode())
merged_mode.course_id = self.id
merged_mode.mode_slug = posted_mode.mode_slug
merged_mode.mode_display_name = posted_mode.mode_slug
merged_mode.min_price = posted_mode.min_price
merged_mode.currency = posted_mode.currency
merged_mode.sku = posted_mode.sku
merged_mode.bulk_sku = posted_mode.bulk_sku
merged_mode.expiration_datetime = posted_mode.expiration_datetime
merged_mode.save()
merged_modes.add(merged_mode)
merged_mode_keys.add(merged_mode.mode_slug)
# Masters degrees are not sold through the eCommerce site.
# So, Masters course modes are not included in PUT calls to this API,
# and their omission which would normally cause them to be deleted.
# We don't want that to happen, but for the time being,
# we cannot include in Masters modes in the PUT calls from eCommerce.
# So, here's hack to handle Masters course modes, along with any other
# modes that end up in that boat.
MODES_TO_NOT_DELETE = {
CourseMode.MASTERS,
}
modes_to_delete = set(existing_modes.keys()) - merged_mode_keys
modes_to_delete -= MODES_TO_NOT_DELETE
self._deleted_modes = [existing_modes[mode] for mode in modes_to_delete]
self.modes = list(merged_modes)
@classmethod
def get(cls, course_id):
""" Retrieve a single course. """
try:
course_id = CourseKey.from_string(str(course_id))
except InvalidKeyError:
log.debug('[%s] is not a valid course key.', course_id)
raise ValueError # lint-amnesty, pylint: disable=raise-missing-from
course_modes = CourseMode.objects.filter(course_id=course_id)
if course_modes:
verification_deadline = VerificationDeadline.deadline_for_course(course_id)
return cls(course_id, list(course_modes), verification_deadline=verification_deadline)
return None
@classmethod
def iterator(cls):
""" Generator that yields all courses. """
course_modes = CourseMode.objects.order_by('course_id')
for course_id, modes in groupby(course_modes, lambda o: o.course_id):
yield cls(course_id, list(modes))
| 40.328859 | 112 | 0.669496 | 5,546 | 0.922949 | 258 | 0.042936 | 2,364 | 0.39341 | 0 | 0 | 1,830 | 0.304543 |
2f082e2906c7c51226d4204e5140aa52273e420e
| 984 |
py
|
Python
|
model_code/grid_search/DecisionTreeClassifier.py
|
lacava/sklearn-benchmarks
|
bec1d5468f40b1fea08b605a11d5f7795fe5bb1b
|
[
"MIT"
] | 213 |
2016-02-03T02:56:40.000Z
|
2022-02-26T06:44:27.000Z
|
model_code/grid_search/DecisionTreeClassifier.py
|
lacava/sklearn-benchmarks
|
bec1d5468f40b1fea08b605a11d5f7795fe5bb1b
|
[
"MIT"
] | 30 |
2016-02-03T14:32:27.000Z
|
2020-05-12T17:32:40.000Z
|
model_code/grid_search/DecisionTreeClassifier.py
|
arunsinghyadav/sklearn-benchmarks
|
a917336f6fd3ffb89efd94b1c7f60b3a05ba780f
|
[
"MIT"
] | 59 |
2016-02-03T14:32:58.000Z
|
2021-01-12T23:48:46.000Z
|
import sys
import pandas as pd
import numpy as np
import itertools
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
pipeline_components = [RobustScaler, DecisionTreeClassifier]
pipeline_parameters = {}
min_impurity_decrease_values = np.arange(0., 0.005, 0.00025)
max_features_values = [0.1, 0.25, 0.5, 0.75, 'sqrt', 'log2', None]
criterion_values = ['gini', 'entropy']
random_state = [324089]
all_param_combinations = itertools.product(min_impurity_decrease_values, max_features_values, criterion_values, random_state)
pipeline_parameters[DecisionTreeClassifier] = \
[{'min_impurity_decrease': min_impurity_decrease, 'max_features': max_features, 'criterion': criterion, 'random_state': random_state}
for (min_impurity_decrease, max_features, criterion, random_state) in all_param_combinations]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| 39.36 | 136 | 0.813008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.090447 |
2f0914ec0565214e9bbc4b09ca688ebda76940dd
| 3,428 |
py
|
Python
|
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
'''
Wrapper function to run PPO algorithm for training
'''
import numpy as np
import matplotlib.pyplot as plt
import time
import math
import logging
from scipy.optimize import minimize, LinearConstraint
# custom libraries
from training.PPO.run_helper import buyerPenaltiesCalculator, buyerUtilitiesCalculator, evaluation
from training.PPO.run_helper import logger_handle, initialize_agent, get_ys, choose_prob, cumlativeBuyerExp, getPurchases
def learn_policy(run_config, seller_info, buyer_info, train_config, logger_pass):
# Initialize the logger
logger = logger_handle(logger_pass)
# get required parameters for WolFPHC algorithm
aux_price_min = 1 / seller_info.max_price
aux_price_max = 1 / seller_info.min_price
logger.info("Fetched raw market information..")
# initialize seller agents
sellers, logger = initialize_agent(seller_info, buyer_info, train_config, logger)
# Get Containers to record history(Interesting insight: append in python list is O(1))
price_history = []
purchase_history = []
provided_resource_history = []
seller_utility_history = []
seller_penalty_history = []
buyer_utility_history = []
buyer_penalty_history = []
# Start Loop for training
logger.info("Starting training iterations...")
start_time = time.time()
for train_iter in range(0, train_config.iterations):
if train_iter % 1000 == 0:
logger.info("Finished %d training iterations in %.3f secs..." % (train_iter, time.time() - start_time))
# get the prices for all seller agents
ys = get_ys(sellers, train_config, seller_info)
# print(ys, '==', train_iter)
probAll, yAll = choose_prob(ys, compare=False, yAll=None)
# Save prices in history
prices = 1 / ys
price_history.append(prices)
cumulativeBuyerExperience = cumlativeBuyerExp(buyer_info, sellers)
X = getPurchases(buyer_info, cumulativeBuyerExperience, ys, probAll)
# Save purchased history
purchases = X.sum(axis=0)
purchase_history.append(purchases)
# Get Buyer utilities and penalties in history
buyerUtilities = buyerUtilitiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, probAll,
buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_utility_history.append(buyerUtilities)
buyerPenalties = buyerPenaltiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_penalty_history.append(buyerPenalties)
# loop parameters
lr = 1 / (20 + train_iter)
seller_utilities, seller_penalties, seller_provided_resources = evaluation(sellers, train_config, yAll, X, lr, train=True)
# Get seller utilties and penalties in history
seller_utilities = np.array(seller_utilities)
seller_penalties = np.array(seller_penalties)
seller_utility_history.append(seller_utilities)
seller_penalty_history.append(seller_penalties)
# update provided resources history
seller_provided_resources = np.array(seller_provided_resources)
provided_resource_history.append(seller_provided_resources)
...
| 38.516854 | 130 | 0.698658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.191949 |
2f093dab61a4920e6658955efc331ab3c70a322c
| 850 |
py
|
Python
|
tests/custom/test_clean_dateTime.py
|
arkhn/cleaning-scripts
|
ffe88598b476b2e6b53fd06e8ce6092ef0351b19
|
[
"Apache-2.0"
] | 9 |
2019-03-31T03:46:51.000Z
|
2020-05-20T13:05:06.000Z
|
tests/custom/test_clean_dateTime.py
|
arkhn/cleaning-scripts
|
ffe88598b476b2e6b53fd06e8ce6092ef0351b19
|
[
"Apache-2.0"
] | 18 |
2019-09-11T09:19:45.000Z
|
2021-07-13T09:16:23.000Z
|
tests/custom/test_clean_dateTime.py
|
arkhn/cleaning-scripts
|
ffe88598b476b2e6b53fd06e8ce6092ef0351b19
|
[
"Apache-2.0"
] | 2 |
2019-09-18T15:20:10.000Z
|
2021-07-25T06:46:57.000Z
|
import pytest
from scripts.custom import clean_dateTime
@pytest.mark.parametrize(
"test_input,expected",
[
("2015", "2015"),
("2015-02", "2015-02"),
("201502", "2015-02"),
("2015-02-07", "2015-02-07"),
("20150207", "2015-02-07"),
("2015-02-07T13:28:17", "2015-02-07T13:28:17+02:00"),
("2015-02-07 13:28:17", "2015-02-07T13:28:17+02:00"),
("2015-02-07T13:28:17+05:00", "2015-02-07T13:28:17+05:00"),
("2015-02-07T13:28:17-05:00", "2015-02-07T13:28:17-05:00"),
("Wed, 13 Mar 2075 00:00:00 GMT", "2075-03-13T00:00:00+00:00"),
("201502071740", "2015-02-07T17:40:00+02:00"),
("", ""),
("0010-04-30", "0010-04-30"),
],
)
def test_clean_dateTime(test_input, expected):
output = clean_dateTime(test_input)
assert output == expected
| 31.481481 | 71 | 0.555294 | 0 | 0 | 0 | 0 | 790 | 0.929412 | 0 | 0 | 445 | 0.523529 |
2f0957f3db94b5ef71452361a51b110a5a627030
| 14,927 |
py
|
Python
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 9 |
2020-05-24T11:25:01.000Z
|
2022-03-28T15:32:10.000Z
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 87 |
2020-05-09T08:56:55.000Z
|
2022-03-31T14:46:45.000Z
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/NL2Prog
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 3 |
2021-02-22T20:38:29.000Z
|
2021-11-11T18:48:44.000Z
|
import os
import traceback
from dataclasses import dataclass
from typing import Any, Callable, List, Optional, Union
import pytorch_pfn_extras as ppe
import torch
from pytorch_pfn_extras.training import extension, extensions
from torch import nn
from torch.utils.data import DataLoader
from mlprogram import distributed, logging
from mlprogram.builtins import Environment
from mlprogram.pytorch_pfn_extras import SaveTopKModel, StopByThreshold
from mlprogram.synthesizers import Synthesizer
logger = logging.Logger(__name__)
@dataclass
class Epoch:
n: int
def n_iter(self, iter_per_epoch: int) -> int:
return self.n * iter_per_epoch
@dataclass
class Iteration:
n: int
def n_iter(self, iter_per_epoch: int) -> int:
return self.n
Length = Union[Epoch, Iteration]
class Trigger:
def __init__(self, interval: int, n_iter: int):
self.interval = interval
self.n_iter = n_iter
def __call__(self, manager):
return (manager.iteration == self.n_iter) or \
(manager.iteration % self.interval == 0)
class Call(extension.Extension):
def __init__(self, f: Callable[[], None]):
super().__init__()
self.f = f
def __call__(self, manager):
self.f()
def create_extensions_manager(n_iter: int, evaluation_interval_iter: int,
snapshot_interval_iter: int,
iter_per_epoch: int,
model: nn.Module,
optimizer: torch.optim.Optimizer,
evaluate: Optional[Callable[[], None]],
metric: str, maximize: bool,
threshold: Optional[float],
output_dir: str,
report_metrics: Optional[List[str]] = None):
model_dir = os.path.join(output_dir, "model")
logger.info("Prepare pytorch-pfn-extras")
manager = ppe.training.ExtensionsManager(
model, optimizer, n_iter / iter_per_epoch,
out_dir=os.path.join(output_dir),
extensions=[],
iters_per_epoch=iter_per_epoch,
)
manager.extend(
extensions.FailOnNonNumber(),
trigger=Trigger(evaluation_interval_iter, n_iter)
)
if evaluate is not None:
manager.extend(
Call(evaluate),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_main_process():
manager.extend(
extensions.LogReport(
trigger=Trigger(100, n_iter),
filename="log.json",
)
)
manager.extend(extensions.ProgressBar())
manager.extend(
SaveTopKModel(model_dir, 1, metric, model, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
metrics = report_metrics or []
manager.extend(
extensions.PrintReport(entries=[
"loss", *metrics,
"iteration", "epoch",
"time.iteration", "gpu.time.iteration", "elapsed_time"
]),
trigger=Trigger(100, n_iter),
)
if threshold is not None:
manager.extend(
StopByThreshold(metric, threshold, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_initialized():
snapshot = extensions.snapshot(autoload=True, n_retains=1,
saver_rank=0)
snapshot._rank = distributed.rank()
snapshot._size = distributed.size()
snapshot._local_rank = distributed.rank()
else:
snapshot = extensions.snapshot(autoload=True, n_retains=1)
manager.extend(snapshot, trigger=Trigger(snapshot_interval_iter, n_iter))
return manager
def create_dataloader(dataset: torch.utils.data.Dataset,
batch_size: int, n_worker: int, collate_fn: Callable) \
-> torch.utils.data.DataLoader:
if hasattr(dataset, "__len__"):
is_iterable = False
else:
is_iterable = True
if is_iterable:
return DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=n_worker,
collate_fn=collate_fn)
else:
return DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=n_worker,
collate_fn=collate_fn)
def get_world_process_group(device: torch.device) \
-> Optional[torch.distributed.group]:
if not distributed.is_initialized():
return None
else:
if device.type == "cuda":
return distributed.groups["world_nccl"]
else:
return distributed.groups["world_gloo"]
def setup_distributed_training(
model: nn.Module,
loss: nn.Module,
group: torch.distributed.group
):
class TrainModule(nn.Module):
def __init__(self, model: nn.Module, loss: nn.Module):
super().__init__()
self.model = model
self.loss = loss
def forward(self, *args, **kwargs):
return self.loss(self.model(*args, **kwargs))
model = TrainModule(model, loss)
if group is None:
return model
else:
return ppe.nn.parallel.distributed.DistributedDataParallel(
module=model,
process_group=group,
)
def save_results(output_dir: str,
model: nn.Module, optimizer: torch.optim.Optimizer) -> None:
if distributed.is_main_process():
logger.info("Dump the last model")
torch.save(model.state_dict(), os.path.join(output_dir, "model.pt"))
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
def train_supervised(output_dir: str,
dataset: torch.utils.data.Dataset,
model: nn.Module,
optimizer: torch.optim.Optimizer,
loss: Callable[[Any], torch.Tensor],
evaluate: Optional[Callable[[], None]],
metric: str,
collate: Callable[[List[Any]], Any],
batch_size: int,
length: Length,
evaluation_interval: Optional[Length] = None,
snapshot_interval: Optional[Length] = None,
maximize: bool = True,
threshold: Optional[float] = None,
n_dataloader_worker: int = 1,
device: torch.device = torch.device("cpu")) \
-> None:
logger.info("Prepare model")
model.to(device)
model.train()
group = get_world_process_group(device)
global_batch_size = batch_size * distributed.size(group)
if hasattr(dataset, "__len__"):
iter_per_epoch = len(dataset) // global_batch_size
else:
iter_per_epoch = 1
evaluation_interval = evaluation_interval or Epoch(1)
snapshot_interval = snapshot_interval or Epoch(1)
n_iter = length.n_iter(iter_per_epoch)
evaluation_interval_iter = evaluation_interval.n_iter(iter_per_epoch)
snapshot_interval_iter = snapshot_interval.n_iter(iter_per_epoch)
# Initialize extensions manager
manager = \
create_extensions_manager(
n_iter, evaluation_interval_iter, snapshot_interval_iter,
iter_per_epoch,
model, optimizer,
evaluate, metric, maximize, threshold, output_dir)
train_model = setup_distributed_training(model, loss, group)
logger.info("Start training")
try:
while manager.iteration < n_iter:
loader = create_dataloader(dataset, batch_size, n_dataloader_worker,
collate)
for batch in logger.iterable_block("iteration", loader, True):
if manager.iteration >= n_iter:
break
if len(batch.to_dict()) == 0:
logger.warning(f"Skip {manager.iteration} th batch")
continue
with manager.run_iteration():
train_model.train()
with logger.block("to"):
batch.to(device=device)
with logger.block("forward"):
bloss = train_model(batch)
with logger.block("backward"):
optimizer.zero_grad(set_to_none=True)
bloss.backward()
with logger.block("optimizer.step"):
optimizer.step()
ppe.reporting.report({"loss": bloss.item()})
logger.dump_elapsed_time_log()
if device.type == "cuda":
ppe.reporting.report({
"gpu.max_memory_allocated":
torch.cuda.max_memory_allocated(device)
})
except RuntimeError as e: # noqa
logger.critical(traceback.format_exc())
save_results(output_dir, model, optimizer)
def train_REINFORCE(input_dir: str, output_dir: str,
dataset: torch.utils.data.Dataset,
synthesizer: Synthesizer,
model: nn.Module,
optimizer: torch.optim.Optimizer,
loss: Callable[[Any], torch.Tensor],
evaluate: Optional[Callable[[], None]],
metric: str,
reward: Callable[[Environment, Any], float],
collate: Callable[[List[Any]], Any],
batch_size: int,
n_rollout: int,
length: Length,
evaluation_interval: Optional[Length] = None,
snapshot_interval: Optional[Length] = None,
maximize: bool = True,
threshold: Optional[float] = None,
use_pretrained_model: bool = False,
use_pretrained_optimizer: bool = False,
n_dataloader_worker: int = 2,
device: torch.device = torch.device("cpu")) \
-> None:
logger.info("Prepare model")
model.to(device)
model.train()
group = get_world_process_group(device)
if hasattr(dataset, "__len__"):
iter_per_epoch = len(dataset) // batch_size
else:
iter_per_epoch = 1
evaluation_interval = evaluation_interval or Epoch(1)
snapshot_interval = snapshot_interval or Epoch(1)
n_iter = length.n_iter(iter_per_epoch)
evaluation_interval_iter = evaluation_interval.n_iter(iter_per_epoch)
snapshot_interval_iter = snapshot_interval.n_iter(iter_per_epoch)
if use_pretrained_model:
logger.info("Load pretrained model")
pretrained_model = os.path.join(input_dir, "model.pt")
state_dict = torch.load(pretrained_model,
map_location=torch.device("cpu"))
model.load_state_dict(state_dict)
if use_pretrained_optimizer:
logger.info("Load pretrained optimizer")
pretrained_optimizer = os.path.join(input_dir, "optimizer.pt")
state_dict = torch.load(pretrained_optimizer,
map_location=torch.device("cpu"))
optimizer.load_state_dict(state_dict)
# Initialize extensions manager
manager = \
create_extensions_manager(
n_iter, evaluation_interval_iter, snapshot_interval_iter,
iter_per_epoch,
model, optimizer,
evaluate, metric, maximize, threshold, output_dir,
report_metrics=["reward"])
train_model = setup_distributed_training(model, loss, group)
logger.info("Start training")
try:
while manager.iteration < n_iter:
loader = create_dataloader(dataset, batch_size, n_dataloader_worker,
lambda x: x)
for samples in logger.iterable_block("iteration", loader, True):
if manager.iteration >= n_iter:
break
# Rollout
rollouts = []
train_model.train()
with torch.no_grad():
for sample in logger.iterable_block("rollout", samples):
sample_inputs = sample.clone_without_supervision()
sample_inputs.to(device)
for rollout in logger.iterable_block(
"sample",
synthesizer(sample_inputs,
n_required_output=n_rollout)):
if not rollout.is_finished:
continue
for _ in range(rollout.num):
output = sample.clone()
output["ground_truth"] = rollout.output
output.mark_as_supervision("ground_truth")
output["reward"] = \
torch.tensor(reward(sample.clone(), rollout.output))
rollouts.append(output)
if len(rollouts) == 0:
logger.warning("No rollout")
continue
if len(rollouts) != n_rollout:
logger.warning(
"#rollout is unexpected: "
f"expected={n_rollout} actual={len(rollouts)}")
with manager.run_iteration():
model.train()
with logger.block("collate"):
batch2 = collate(rollouts)
with logger.block("to"):
batch2.to(device)
with logger.block("forward"):
train_model.train()
bloss = train_model(batch2)
with logger.block("backward"):
optimizer.zero_grad(set_to_none=True)
bloss.backward()
with logger.block("optimizer.step"):
optimizer.step()
ppe.reporting.report({"loss": bloss.item()})
ppe.reporting.report({
"reward": batch2["reward"].float().mean().item()
})
logger.dump_elapsed_time_log()
if device.type == "cuda":
ppe.reporting.report({
"gpu.max_memory_allocated":
torch.cuda.max_memory_allocated(device)
})
except RuntimeError as e: # noqa
logger.critical(traceback.format_exc())
save_results(output_dir, model, optimizer)
| 37.599496 | 88 | 0.554231 | 945 | 0.063308 | 0 | 0 | 235 | 0.015743 | 0 | 0 | 842 | 0.056408 |
2f09b816cae5d16accf1cca62376da23fd995e52
| 3,381 |
py
|
Python
|
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import math
import numpy as np
class Visualization:
"""
This class contains methods for reducing the dimensions of the points to 2-D and
visualization of the reduced points.
Attributes
----------
OUTLIERS : list
List of points marked as outliers.
NON_OUTLIERS : list
List of points that are not marked as outliers.
"""
def __init__(self):
self.OUTLIERS = []
self.NON_OUTLIERS = []
self.K = 1
def dimension_reduction(self, point):
"""
This method is used for reducing the dimensions of the given point to 2-D.
Parameters
----------
point : list
A list of coordinates representing an n-dimensional vector.
Returns
-------
type list
A list representing a 2-D point in the x-y plane.
"""
temp_point = []
reduced_point = [0,0]
index = 1
for element in point:
if not math.isnan(element % index):
# Using modulo operation to spread values of coordinates.
temp_point.append(element % index)
index = index + 1
for element in temp_point:
# The modulo results are distributed among the two coordinates according to
# their divisibilty by 2.
if element % 2 == 0:
reduced_point[1] = reduced_point[1] + element
else:
reduced_point[0] = reduced_point[0] + element
reduced_point[0] = round(reduced_point[0], 2)
reduced_point[1] = round(reduced_point[1], 2)
return reduced_point
def outlier_plot(self,save_path=None):
"""
This mehtod takes the points marked as outliers and non-outliers and plots them as
a scatter plot.
Returns
-------
None
The result of this method is a matplotlib scatter plot.
"""
for element in self.OUTLIERS:
plt.scatter(element[0], element[0], facecolors='none', edgecolors='r', marker='o')
for element in self.NON_OUTLIERS:
plt.scatter(element[0], element[1], facecolors='none', edgecolors='b', marker = 'o')
plt.xlabel("K = " + str(self.K))
if save_path != None:
plt.savefig(save_path+'.png')
else:
plt.show()
def outlier_plot_numpy(self,save_path=None):
"""
This mehtod takes the points marked as outliers and non-outliers and plots them as
a scatter plot.
Returns
-------
None
The result of this method is a matplotlib scatter plot.
"""
if len(self.OUTLIERS) > 0:
self.OUTLIERS = np.array(self.OUTLIERS)
plt.scatter(self.OUTLIERS[:,0],self.OUTLIERS[:,0], facecolors='none', edgecolors='r', marker='o')
if len(self.NON_OUTLIERS) > 0:
self.NON_OUTLIERS = np.array(self.NON_OUTLIERS)
plt.scatter(self.NON_OUTLIERS[:,0], self.NON_OUTLIERS[:,1], facecolors='none', edgecolors='b', marker = 'o')
# plt.xlabel("K = " + str(self.K))
if save_path != None:
plt.savefig(save_path+'.png')
else:
plt.show()
| 34.85567 | 121 | 0.55102 | 3,313 | 0.979888 | 0 | 0 | 0 | 0 | 0 | 0 | 1,437 | 0.425022 |
2f0b0a77f9fa1f45efa368882434f52b3044f388
| 322 |
py
|
Python
|
20211001_PythonIntro/ex2/ex2.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | 15 |
2021-10-01T16:10:48.000Z
|
2022-02-19T20:45:35.000Z
|
20211001_PythonIntro/ex2/ex2.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | null | null | null |
20211001_PythonIntro/ex2/ex2.py
|
alessandro-massarenti/Cybersec2021
|
3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0
|
[
"BSD-3-Clause"
] | 2 |
2021-11-06T08:32:41.000Z
|
2021-12-11T16:18:54.000Z
|
from operator import add, itruediv, mul, sub
ops = [add, sub, mul, itruediv]
a = float(input("Inserisci un numero: "))
b = float(input("Inserisci un altro numero: "))
op = int(
input("Inserisci un operatore (0 per addizione, 1 per sottrazione, 2 per moltiplicazione oppure 3 per divisione: ")
)
print(ops[op](a, b))
| 29.272727 | 119 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.496894 |
2f0df6e28987fcaa913b236b22575fcae954bfe4
| 3,639 |
py
|
Python
|
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
from robot.api.parsing import ModelTransformer, get_model, ModelVisitor, Token
import os, sys
keywordlist = []
other_keywords = []
used_keywords = []
class ext_ExtraIndentForKeywordArguments(ModelTransformer):
def __init__(self):
self.cont = 0
def visit_File(self, node):
# Get keywords in python libraries
for path in sys.path:
if 'site-packages' in path:
goodpath = path
for path, subdirs, files in os.walk(goodpath.replace('\\', '\\\\')):
for name in files:
if '.py' in name and '.pyc' not in name and '_init_' not in name and ('robot' in path or 'wslw' in path or 'gurux' in path):
# print(os.path.join(path, name))
with open(os.path.join(path, name), 'r', errors='ignore') as f:
for line in f.readlines():
if 'def' == line.lstrip()[0:3] and '__init__' not in line:
# print(line.split('def')[1].split('(')[0].lstrip().rstrip())
other_keywords.append(line.split('def')[1].split('(')[0].lstrip().rstrip().lower().replace('_', ' '))
# Get keywords in resource files
for path, subdirs, files in os.walk(os.getcwd().replace('in_dev', 'keywords').replace('\\', '\\\\')):
for name in files:
if('.robot' in name):
# print(os.path.join(path, name))
model = get_model(os.path.join(path, name))
printer = TestNamePrinter()
printer.visit(model)
# Get keywords in the Keywords section
model = get_model(node.source)
printer = TestNamePrinter()
printer.visit(model)
# Get keywords used in the test
model = get_model(node.source)
printer = KeywordsNamePrinter()
printer.visit(model)
self.generic_visit(node)
def visit_KeywordCall(self, node):
keywords_name = [sec[0].value for sec in used_keywords]
for token in node.data_tokens:
for i, sec in enumerate(used_keywords[:-1]):
if token.lineno >= sec[1] and token.lineno < used_keywords[i + 1][1]:
# print(repr(token) + ' va con seccion: ' + sec[0].value + ' y indent_level: ' + str(sec[3]))
if token.type == Token.ARGUMENT and token.value in keywords_name:
token.value = ' ' * 4*(sec[3] - 1) + token.value
elif token.type == Token.ARGUMENT and token.value not in keywords_name:
token.value = ' ' * 4*(sec[3]) + token.value
return node
class TestNamePrinter(ModelVisitor):
def visit_KeywordName(self, node):
# print(node.name)
keywordlist.append(node.name.lower())
class KeywordsNamePrinter(ModelVisitor):
def visit_KeywordCall(self, node):
for token in node.data_tokens:
if((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.KEYWORD):
used_keywords.append([token, token.lineno, True, 0])
# print(repr(token) + ' ES KEYWORD RECONOCIDA')
elif((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.ARGUMENT):
extra_indent_level = used_keywords[-1][3] + 1
used_keywords.append([token, token.lineno, False, extra_indent_level])
# print(repr(token) + ' ES KEYWORD NO RECONOCIDA' + ' extra_indent_level: ' + str(used_keywords[-1][3]))
| 50.541667 | 140 | 0.569387 | 3,466 | 0.952459 | 0 | 0 | 0 | 0 | 0 | 0 | 674 | 0.185216 |
2f0e2ccc0b7fb78f69f72c37d56b7289930132ef
| 6,581 |
py
|
Python
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 2 |
2020-03-04T11:18:38.000Z
|
2020-05-10T15:36:42.000Z
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 6 |
2020-03-30T16:42:47.000Z
|
2021-12-13T20:37:21.000Z
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 1 |
2020-04-14T11:26:16.000Z
|
2020-04-14T11:26:16.000Z
|
from typing import Tuple
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Common.Strategies.TechIndicators.AbstractTechStrategy import AbstractTechStrategy
from Common.TechIndicators.MacdIndicator import MacdIndicator
class MacdStrategy(AbstractTechStrategy):
_macd_indicator: MacdIndicator
_summary: pd.DataFrame
def __init__(self, macd_indicator: MacdIndicator):
self._macd_indicator = macd_indicator
a_df: pd.DataFrame = self._macd_indicator.GetData()
self._col = self._macd_indicator.Column
self._lower_label = a_df.columns[self._macd_indicator.LowMedHighTuple[0]]
#
self._upper_label = a_df.columns[self._macd_indicator.LowMedHighTuple[1]]
self._data = a_df[self._macd_indicator.Column].to_frame()
self._data[self._lower_label] = a_df[self._lower_label]
#
self._data[self._upper_label] = a_df[self._upper_label]
self._buy_label = self._macd_indicator.Label + self._buy_label
self._sell_label = self._macd_indicator.Label + self._sell_label
buyNsellTuple = self._buyNsell()
self._data[self._buy_label] = buyNsellTuple[0]
self._data[self._sell_label] = buyNsellTuple[1]
print('DATA', self._data.columns)
self._setSummary()
@property
def Summary(self):
return self._summary
def PlotAx(self, ax: object) -> object:
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
self._data[col].plot(alpha=an_alpha, ax=ax)
ax.scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], label=self._buy_label, marker='^',
color='green')
ax.scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], label=self._sell_label,
marker='v', color='red')
return ax
def Plot(self) -> plt:
plt.figure(figsize=self._macd_indicator.FigSizeTuple)
plt.style.use(self._macd_indicator.FigStyle)
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
self._data[col].plot(alpha=an_alpha)
print('i', an_alpha)
plt.scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], label=self._buy_label,
marker='^', color='green')
plt.scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], label=self._sell_label,
marker='v', color='red')
plt.title(self._macd_indicator.LabelMain)
plt.xlabel(self._macd_indicator.LabelX)
plt.xticks(rotation=self._macd_indicator.LabelXangle)
plt.ylabel(self._macd_indicator.LabelY)
plt.legend(loc=self._macd_indicator.LegendPlace)
plt.tight_layout()
return plt
def PlotAll(self) -> plt:
n_col: int = 1
n_row: int = 3
a_title: str = self._macd_indicator.LabelMain
x_title: str = self._macd_indicator.LabelX
y_title: str = self._macd_indicator.LabelY
f_size: Tuple[float, float] = (self._macd_indicator.FigSizeTuple[0], self._macd_indicator.FigSizeTuple[0])
fig, ax = plt.subplots(n_row, n_col, figsize=f_size, sharex=True)
plt.style.use(self._macd_indicator.FigStyle)
# ax0 strategy
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
ax[0].plot(self._data[col], alpha=an_alpha, label=col)
ax[0].scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], marker='^', color='green',
label=self._buy_label)
ax[0].scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], marker='v', color='red',
label=self._sell_label)
ax[0].set(ylabel=y_title, title=a_title)
ax[0].legend(loc=self._macd_indicator.LegendPlace)
# ax1 index
for a_ind, col in enumerate(self._macd_indicator.GetData().columns[-2:self._macd_indicator.GetData().columns.size]):
an_alpha: float = 0.5 if a_ind != 0 else 1.0
ax[1].plot(self._macd_indicator.GetData()[col], alpha=an_alpha, label=col)
#ax[1].xaxis.set_tick_params(rotation=self._macd_indicator.LabelXangle)
ax[1].set(ylabel='Index')
ax[1].legend(loc=self._macd_indicator.LegendPlace)
# ax2
ax[2].plot(self._summary, alpha=an_alpha)
ax[2].legend(loc=self._macd_indicator.LegendPlace)
ax[2].xaxis.set_tick_params(rotation=self._macd_indicator.LabelXangle)
ax[2].set(ylabel='Buy & Sell', xlabel=x_title)
plt.tight_layout()
return plt
def _buyNsell(self):
buySignal = []
sellSignal = []
flag = -1
for i in range(len(self._data)):
if self._data[self._lower_label][i] > self._data[self._upper_label][i]:
sellSignal.append(np.nan)
if flag != 1:
buySignal.append(self._data[self._col][i])
flag = 1
else:
buySignal.append(np.nan)
elif self._data[self._lower_label][i] < self._data[self._upper_label][i]:
buySignal.append(np.nan)
if flag != 0:
sellSignal.append(self._data[self._col][i])
flag = 0
else:
sellSignal.append(np.nan)
else:
buySignal.append(np.nan)
sellSignal.append(np.nan)
return buySignal, sellSignal
def _setSummary(self):
self._summary = pd.DataFrame(index=self._data.index)
self._summary['Buy'] = self._data[self._buy_label].replace(np.nan, 0)
self._summary['Buy'][self._summary['Buy'] > 0] = 1
self._summary['Sell'] = self._data[self._sell_label].replace(np.nan, 0)
self._summary['Sell'][self._summary['Sell'] > 0] = 1
self._summary['BuyAndSell'] = 0
last_float: float = 0.0
for ind in self._summary.index:
if self._summary['Buy'][ind] > self._summary['Sell'][ind]:
self._summary['BuyAndSell'][ind] = 1.0
last_float = 1.0
elif self._summary['Buy'][ind] < self._summary['Sell'][ind]:
self._summary['BuyAndSell'][ind] = -1.0
last_float = -1.0
else: # row['Buy'] == row['Sell']
self._summary['BuyAndSell'][ind] = last_float
| 46.34507 | 124 | 0.621942 | 6,333 | 0.962316 | 0 | 0 | 61 | 0.009269 | 0 | 0 | 315 | 0.047865 |
2f1305b235214a028b433be662b9539aa5ea50e7
| 7,572 |
py
|
Python
|
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : [email protected]
###################################################################
from collections import defaultdict
import utils
from qt import *
from separator import DayuHSeparator
from field_mixin import MFieldMixin
class MWizardPage(QWidget, MFieldMixin):
sig_complete_changed = Signal()
def __init__(self, subtitle=None, parent=None):
super(MWizardPage, self).__init__(parent)
self.field_dict = defaultdict(None)
self.wizard = parent
self.initialized = False
self.subtitle = subtitle
def init_page(self):
pass
def _is_complete(self):
for name, f_obj in self.field_dict.items():
if f_obj.required:
if not self.field(name):
return False
return True
def callback(self, *args, **kwargs):
pass
class MStepLabel(QLabel, MFieldMixin):
def __init__(self, parent=None):
super(MStepLabel, self).__init__(parent)
self.setProperty('status', 'waiting')
self.register_field('my_index', -1)
self.register_field('parent_index', -1)
self.register_field('title', '')
self.register_field('title_text', self.computed_title_text)
self.register_field('current_status', self.computed_status)
self.register_field('enable', self.computed_enable)
self.setObjectName('wizard-step')
self.setAlignment(Qt.AlignCenter)
self.bind('title_text', self, 'text')
self.bind('enable', self, 'enabled')
self.bind('current_status', self, 'status', callback=self.polish_qss)
def polish_qss(self):
self.style().polish(self)
def computed_title_text(self):
return '<span style="font-size:13pt;font-weight:bold;">Step {}</span><br/>{}'.format(
self.field('my_index') + 1,
self.field('title'))
def computed_enable(self):
return self.field('current_status') == 'waiting'
def computed_status(self):
if self.field('parent_index') == self.field('my_index'):
return 'current'
elif self.field('parent_index') < self.field('my_index'):
return 'waiting'
else:
return 'passed'
class MWizard(QDialog, MFieldMixin):
@utils.dayu_css()
def __init__(self, parent=None):
super(MWizard, self).__init__(parent)
self.field_dict = defaultdict(None)
title_label = QLabel()
title_label.setObjectName('wizard-title')
title_label.setAlignment(Qt.AlignCenter)
step_frame = QFrame()
step_frame.setObjectName('wizard-frame')
self.step_lay = QHBoxLayout()
self.step_lay.setContentsMargins(0, 0, 0, 0)
self.step_lay.setSpacing(0)
step_frame.setLayout(self.step_lay)
subtitle_label = QLabel()
subtitle_label.setObjectName('wizard-subtitle')
self.stacked_lay = QStackedLayout()
self.next_button = QPushButton('Next')
self.previous_button = QPushButton('Previous')
self.previous_button.clicked.connect(self.slot_back)
self.next_button.clicked.connect(self.slot_next)
button_lay = QHBoxLayout()
button_lay.addStretch()
button_lay.addWidget(self.previous_button)
button_lay.addWidget(self.next_button)
main_lay = QVBoxLayout()
main_lay.addWidget(title_label)
main_lay.addWidget(step_frame)
main_lay.addSpacing(20)
main_lay.addWidget(subtitle_label)
main_lay.addWidget(DayuHSeparator())
main_lay.addLayout(self.stacked_lay)
main_lay.addWidget(DayuHSeparator())
main_lay.addLayout(button_lay)
self.setLayout(main_lay)
self.register_field('current_index', 1)
self.register_field('current_subtitle', '')
self.register_field('window_title', '')
self.register_field('next_button_text', self.computed_next_button_text)
self.register_field('previous_visible', self.computed_previous_visible)
self.register_field('next_button_enable', self.computed_next_button_enable)
self.bind('window_title', title_label, 'text')
self.bind('current_index', self.stacked_lay, 'currentIndex')
self.bind('window_title', self, 'windowTitle')
self.bind('current_subtitle', subtitle_label, 'text')
self.bind('next_button_text', self.next_button, 'text')
self.bind('previous_visible', self.previous_button, 'visible')
self.bind('next_button_enable', self.next_button, 'enabled')
def computed_next_button_text(self):
return 'Finish' if self.field('current_index') >= (self.stacked_lay.count() - 1) else 'Next'
def computed_previous_visible(self):
return self.field('current_index') != 0
def computed_next_button_enable(self):
current_widget = self.stacked_lay.currentWidget()
if current_widget:
return current_widget._is_complete()
else:
return False
def add_page(self, page):
index = self.stacked_lay.addWidget(page)
page.wizard = self
# page.sig_complete_changed.connect(self._update_button_states)
# for f in page.field_dict.values():
# self.combine_field(f)
label = MStepLabel()
label.set_field('my_index', index)
label.set_field('title', page.subtitle)
self.bind('current_index', label, 'parent_index')
self.step_lay.addWidget(label)
return index
def combine_field(self, field):
if field.name in self.fields():
raise Exception('Field name {} already exists'.format(field.name))
self.field_dict.update({field.name: field})
if field.required and field.signal:
field.signal.connect(field.page.sig_complete_changed)
def set_title(self, text):
self.set_field('window_title', text)
@Slot()
def slot_back(self):
self.go_to(self.field('current_index') - 1)
@Slot()
def slot_next(self):
if self.field('next_button_text') == 'Finish':
self.accept()
self.go_to(self.field('current_index') + 1)
def go_to(self, index):
self.set_field('current_index', index)
page = self.stacked_lay.currentWidget()
self.set_field('current_subtitle', page.subtitle)
if not page.initialized:
try:
page.init_page()
except Exception:
import traceback
error_detail = traceback.format_exc()
self.set_field('current_subtitle', error_detail)
self.next_button.setEnabled(False)
self.previous_button.setEnabled(False)
page.initialized = True
return
page.initialized = True
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = MWizard()
test.register_field('formats', [])
test.register_field('type_group', 'element')
test.register_field('current_step', 'prep')
test.set_title('Publish Element')
page0 = MWizardPage('Select Publish Type')
page1 = MWizardPage('Write Comment')
page2 = MWizardPage('Upload Thumbnail')
page3 = MWizardPage('Quality Check')
test.add_page(page0)
test.add_page(page3)
test.add_page(page1)
test.add_page(page2)
test.go_to(0)
test.show()
sys.exit(app.exec_())
| 34.108108 | 100 | 0.633386 | 6,568 | 0.867406 | 0 | 0 | 2,531 | 0.334258 | 0 | 0 | 1,445 | 0.190835 |
2f14ec3187ef5944e2d523b10e6eabf13148caae
| 897 |
py
|
Python
|
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division, print_function, generators
import numpy as np
pi = np.pi
def techChange_rhs(uB_pB, t, rvar, pBmin, pE, delta, smax, sBmax):
uB, pB = uB_pB
if sBmax == 0.:
p = pE
else:
if smax < sBmax * uB:
p = pE + smax / uB
else:
p = sBmax + pE
duB = rvar * uB * (1 - uB) * (p - pB)
dpB = -(pB - pBmin) * ((pB - pBmin) * uB - delta)
return np.array([duB, dpB])
def techChange_sunny(p):
"""sunny constraint for techChangeModel"""
return p[:, 0] > 0.325
def techChange_rhsPS(uB_pB, t, rvar, pBmin, pE, delta, smax, sBmax):
uB, pB = uB_pB
p = np.zeros_like(pB)
p[:] = sBmax + pE
mask = (smax < sBmax * uB)
p[mask] = (pE + smax / uB[mask])
duB = rvar * uB * (1 - uB) * (p - pB)
dpB = -(pB - pBmin) * ((pB - pBmin) * uB - delta)
return np.array([duB, dpB])
| 20.860465 | 68 | 0.528428 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.046823 |
2f1545a93541c971b7ff89f3c71a62f913a542c9
| 2,502 |
py
|
Python
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 5 |
2020-03-05T20:31:23.000Z
|
2021-11-24T00:22:18.000Z
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 3 |
2021-01-14T15:23:04.000Z
|
2021-11-24T00:30:37.000Z
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 1 |
2020-06-12T01:29:10.000Z
|
2020-06-12T01:29:10.000Z
|
# -*- coding: utf-8 -*-
"""
:Authors: cykooz
:Date: 23.06.2019
"""
from pathlib import Path
import piexif
import pytest
from PIL import Image
from cykooz.heif.errors import HeifError
from cykooz.heif.image import RawHeifImage
from cykooz.heif.pil import register_heif_opener
@pytest.fixture(scope='session', autouse=True)
def reg_pil_opener():
register_heif_opener()
@pytest.fixture(name='data_path')
def data_path_fixture() -> Path:
return Path(__file__).parent / 'data'
def test_raw_heif_image_form_path(data_path):
img = RawHeifImage.from_path(data_path / 'test.heic')
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader_errors(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
# File is closed
with pytest.raises(HeifError):
_ = img.data
@pytest.mark.parametrize(
['source_type'],
[
('path',),
('stream',),
]
)
@pytest.mark.parametrize(
['file_name'],
[
('test.heic',),
('heic_as.jpg',),
]
)
def test_open_pillow_image(data_path, source_type, file_name):
fp = data_path / file_name
if source_type == 'stream':
fp = open(str(fp), 'rb')
img: Image.Image = Image.open(fp)
assert img.size == (3024, 4032)
assert img.mode == 'RGB'
assert 'exif' in img.info
exif = piexif.load(img.info['exif'])
assert exif['Exif'][42035] == b'Apple'
assert exif['Exif'][42036] == b'iPhone 7 Plus back dual camera 6.6mm f/2.8'
pixel = img.getpixel((100, 100))
assert pixel == (73, 74, 69)
def test_open_png_as_heif(data_path):
fp = data_path / 'png_as.heif'
img: Image.Image = Image.open(fp)
assert img.size == (1280, 720)
assert img.mode == 'RGB'
assert 'exif' not in img.info
pixel = img.getpixel((100, 100))
assert pixel == (132, 185, 255)
| 24.529412 | 79 | 0.63709 | 0 | 0 | 0 | 0 | 941 | 0.376099 | 0 | 0 | 338 | 0.135092 |
2f15770186ad88ae65932854e1cbbe4f54f58e9d
| 3,960 |
py
|
Python
|
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | 1 |
2018-10-22T17:50:00.000Z
|
2018-10-22T17:50:00.000Z
|
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import signal
import threading
import logging
import multiprocessing
from ambari_agent.PythonReflectiveExecutor import PythonReflectiveExecutor
from ambari_agent.RemoteDebugUtils import bind_debug_signal_handlers
from ambari_agent.ExitHelper import ExitHelper
logger = logging.getLogger(__name__)
class StatusCommandsExecutor(multiprocessing.Process):
"""
A process which executes status/security status commands.
It dies and respawns itself on timeout of the command. Which is the most graceful way to end the currently running status command.
"""
def __init__(self, config, actionQueue):
multiprocessing.Process.__init__(self)
self.config = config
self.actionQueue = actionQueue
self.status_command_timeout = int(self.config.get('agent', 'status_command_timeout', 5)) # in seconds
self.hasTimeoutedEvent = multiprocessing.Event()
ExitHelper().register(self.kill)
def run(self):
try:
bind_debug_signal_handlers()
logger.info("StatusCommandsExecutor starting")
while True:
command = self.actionQueue.statusCommandQueue.get(True) # blocks until status status command appears
logger.debug("Running status command for {0}".format(command['componentName']))
timeout_timer = threading.Timer( self.status_command_timeout, self.respawn, [command])
timeout_timer.start()
self.process_status_command(command)
timeout_timer.cancel()
logger.debug("Completed status command for {0}".format(command['componentName']))
except:
logger.exception("StatusCommandsExecutor process failed with exception:")
raise
logger.warn("StatusCommandsExecutor process has finished")
def process_status_command(self, command):
component_status_result = self.actionQueue.customServiceOrchestrator.requestComponentStatus(command)
component_security_status_result = self.actionQueue.customServiceOrchestrator.requestComponentSecurityState(command)
result = (command, component_status_result, component_security_status_result)
self.actionQueue.statusCommandResultQueue.put(result)
def respawn(self, command):
try:
if hasattr(PythonReflectiveExecutor, "last_context"):
# Force context to reset to normal. By context we mean sys.path, imports, etc. They are set by specific status command, and are not relevant to ambari-agent.
PythonReflectiveExecutor.last_context.revert()
logger.warn("Command {0} for {1} is running for more than {2} seconds. Terminating it due to timeout.".format(command['commandType'], command['componentName'], self.status_command_timeout))
self.hasTimeoutedEvent.set()
except:
logger.exception("StatusCommandsExecutor.finish thread failed with exception:")
raise
def kill(self):
os.kill(self.pid, signal.SIGKILL)
# prevent queue from ending up with non-freed semaphores, locks during put. Which would result in dead-lock in process executing get.
self.actionQueue.statusCommandResultQueue.close()
self.actionQueue.statusCommandResultQueue.join_thread()
self.actionQueue.statusCommandResultQueue = multiprocessing.Queue()
| 41.684211 | 195 | 0.769192 | 2,863 | 0.72298 | 0 | 0 | 0 | 0 | 0 | 0 | 1,786 | 0.45101 |
2f16819a3d5eb873ef8eef277cfd895042d5e5d1
| 5,630 |
py
|
Python
|
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
import bpy
from bpy.props import *
from bpy.types import Operator, AddonPreferences
class MFT_PT_PanelPose(bpy.types.Panel):
bl_label = "Bones"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "posemode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
op = layout.operator("mft.copy_bones_transform", text="CopyBonesTransform")
op.mode = 'Copy'
op = layout.operator("mft.copy_bones_transform", text="PasteBonesTransform")
op.mode = 'Paste'
class MFT_PT_PanelAnimation(bpy.types.Panel):
bl_label = "Animations"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
layout.operator("mft.curveanimator", text="Curve Animator")
layout.prop(mifthTools, "doUseSceneFrames", text='UseSceneFrames')
row = layout.row()
row.prop(mifthTools, "curveAniStartFrame", text='Start')
row.prop(mifthTools, "curveAniEndFrame", text='End')
row = layout.row()
row.prop(mifthTools, "curveAniStepFrame", text='Steps')
row.prop(mifthTools, "curveAniInterpolation", text='Interpolation')
layout.separator()
layout.separator()
layout.operator("mft.morfcreator", text="Morfer")
layout.prop(mifthTools, "morfCreatorNames")
layout.prop(mifthTools, "morfUseWorldMatrix", text='useWorldMatrix')
layout.prop(mifthTools, "morfApplyModifiers", text='applyModifiers')
class MFT_PT_PanelPlaykot(bpy.types.Panel):
bl_label = "PlaykotTools"
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
layout.operator("mft.render_scene_2x", text="ScaleCrop")
layout.operator("mft.cropnoderegion", text="CropNodeRegion")
layout.operator("mft.crop_to_viewport", text="CropToViewport")
layout.separator()
layout.operator("mft.outputcreator", text="Create Output")
layout.prop(mifthTools, "outputFolder")
row = layout.row()
row.prop(mifthTools, "outputSubFolder")
row.prop(mifthTools, "doOutputSubFolder", text='')
layout.prop(mifthTools, "outputSequence")
layout.prop(mifthTools, "outputSequenceSize")
class MFT_PT_PanelCloning(bpy.types.Panel):
bl_label = "Cloning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = bpy.context.scene.mifthTools
mifthCloneTools = bpy.context.scene.mifthCloneTools
layout.label(text="Draw Clones:")
layout.operator("mft.draw_clones", text="DrawClones")
layout.operator("mft.pick_obj_to_clone_draw", text="PickObjects")
layout.prop(mifthCloneTools, "drawClonesDirectionRotate", text='DirectionRotate')
layout.prop(mifthCloneTools, "drawClonesRadialRotate", text='RadialRotate')
layout.prop(mifthCloneTools, "drawClonesNormalRotate", text='NormalRotate')
#layout.prop(mifthCloneTools, "drawClonesOptimize", text='Optimize')
layout.prop(mifthCloneTools, "drawStrokeLength", text='Stroke')
layout.prop(mifthCloneTools, "drawRandomStrokeScatter", text='Scatter')
layout.prop(mifthCloneTools, "randNormalRotateClone", text='RandNormal')
layout.prop(mifthCloneTools, "randDirectionRotateClone", text='RandDirection')
layout.prop(mifthCloneTools, "randScaleClone", text='RandScale')
layout.prop(mifthCloneTools, "drawPressure", text='DrawPressure')
row = layout.row()
row.prop(mifthCloneTools, "drawPressureRelativeStroke", text='S')
row.prop(mifthCloneTools, "drawPressureScale", text='S')
row.prop(mifthCloneTools, "drawPressureScatter", text='S')
layout.prop(mifthCloneTools, "drawClonesAxis", text='Axis')
layout.separator()
layout.label(text="Clone Selected:")
layout.operator("mft.clonetoselected", text="CloneToSelected")
layout.separator()
layout.label(text="Radial Clone:")
layout.operator("mft.radialclone", text="Radial Clone")
# layout.prop(mifthTools, "radialClonesNumber", text='')
row = layout.row()
row.prop(mifthCloneTools, "radialClonesAxis", text='')
row.prop(mifthCloneTools, "radialClonesAxisType", text='')
layout.separator()
layout.label(text="Position Group:")
layout.operator("mft.group_instance_to_cursor", text="Position Group")
layout.prop(mifthCloneTools, "getGroupsLst", text='')
layout.separator()
layout.operator("mft.group_to_mesh", text="Groups To Mesh")
class MFT_PT_PanelVertexPaint(bpy.types.Panel):
bl_label = "Vertex Paint"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "vertexpaint"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = bpy.context.scene.mifthTools
layout.operator("mftv.set_colors_to_selected", text="Set Colors")
layout.operator("mftv.invert_colors", text="Invert Colors")
| 37.533333 | 89 | 0.675311 | 5,531 | 0.982416 | 0 | 0 | 0 | 0 | 0 | 0 | 1,969 | 0.349734 |
2f1729df6cf48161f37c48656ac64fd0cceb2a63
| 11,830 |
py
|
Python
|
fabfile.py
|
bbayles/link
|
48cf656fac6c31c0aa82152ce68767e469ed5f06
|
[
"Apache-2.0"
] | 9 |
2015-03-18T18:23:41.000Z
|
2016-11-18T09:16:02.000Z
|
fabfile.py
|
bbayles/link
|
48cf656fac6c31c0aa82152ce68767e469ed5f06
|
[
"Apache-2.0"
] | 3 |
2015-11-07T16:56:51.000Z
|
2016-11-22T19:32:09.000Z
|
fabfile.py
|
bbayles/link
|
48cf656fac6c31c0aa82152ce68767e469ed5f06
|
[
"Apache-2.0"
] | 7 |
2015-05-15T18:12:40.000Z
|
2017-03-16T18:42:25.000Z
|
"""
Fabfile for deploying and setting up code that looks like the production
environment. it also makes it easy to start up the servers
If you want to run on the localhost you may need to first do::
rm -rf ~/.ssh/known_hosts
"""
from __future__ import with_statement
import os
import re
from fabric.api import local, settings, abort, run , cd, env, lcd, sudo, prompt
from fabric.contrib.console import confirm
from fabric.contrib import files
env.roledefs = {'local':['localhost']}
env.use_ssh_config=True
TAG_REGEX = re.compile('^[0-9]+\.[0-9]+\.[0-9]+')
STABLE_MSG = '**stable**'
LINK_CODE_DIR = os.path.split(os.path.abspath(__file__))[0]
def dir_code_base():
"""
If you are using any localhost then it will use the current directory.
Otherwise you will use the code_dir
"""
if 'localhost' in env.host_string:
return os.getcwd()
return code_dir
def dir_scripts():
"""
The directory where you house all the scripts
"""
return '%s/scripts' % (dir_code_base())
config_dir = '~/.link'
def test_install():
import os
#set the link dir to something silly
os.environ['LNK_DIR']='saodusah'
#create a virtual environment
local('echo $LNK_DIR')
local('virtualenv env')
#remove everything from the build directory
local('rm -rf build')
#run this and see that it works
local('source env/bin/activate && python setup.py install')
def configure():
"""
Create the base configuration so that you can change it. Might want to
include the configuration in a different repo
"""
if not files.exists(config_dir):
run('mkdir %s' % config_dir)
lnk_config = '%s/link.config' % config_dir
if not files.exists(lnk_config):
run('touch %s' % lnk_config)
def script(script_name, command = 'python', **args):
"""
Will run the script that is in the scripts folder. you can pass in a
dictionory of args and it will pass it through to the script as command line
args in this format
fab -R local script:example.py,arg1=value1,arg2=value2
that will result in running this command
<command> <scripts_directory>/<scriptname> --arg1=value1 --arg2=value2
"""
with cd(dir_scripts()):
parameters = ''
if args:
parameters = ' '.join(['--%s=%s' % (key, value) for key,value in
args.iteritems()])
run("%s %s %s" % (command , script_name, parameters))
def commit(msg=None):
"""
Commit your changes to git
:msg: @todo
:returns: @todo
"""
print '---Commiting---'
print
msg = msg or prompt('Commit message: ')
commit = False
commit = prompt('Confirm commit? [y/n]') == 'y'
if commit:
with settings(warn_only=True):
_commit = not local('git commit -a -m "%s"' % msg).failed
if not _commit:
#nothing was committed
commit = False
print "Nothing to commit"
else:
abort('commit aborted')
print
print '---Done---'
return commit
def tag_names(number = 10, stable=False):
number = int(number)
print "fetching tags first"
local('git fetch --tags ')
print "Showing latest tags for reference"
tags = local('git tag -n1 ', capture = True)
tags = [x for x in tags.split('\n') if TAG_REGEX.findall(x) and
(not stable or STABLE_MSG in x)]
tags.sort(reverse=True)
#take the first <number> things in the list
tags = tags[0:min(len(tags), number)]
print '\n'.join(tags)
print
return tags
def check_tag_format(tag):
"""
Checks the tag format and returns the component parts
"""
parsed = tag.split('.')
try:
#allow for at most 2 minor decimals...i mean comeon
major = int(parsed[0])
minor = int(parsed[1])
build = int(parsed[2][0:2])
return (major, minor, build)
except Exception as e:
print e
abort("""Must be of the form <major_version>.<minor>.<maintence>, like
0.0.1. Only integers allowed""")
def write_version(version):
"""
Write out the version python file to the link directory before installing
version needs to be a list or tuple of the form (<major>, <minor>, <build>)
or a string in the format <major>.<minor>.<build> all ints
"""
file_name ='link/__init__.py'
init = open(file_name)
init_read = init.readlines()
init.close()
version_line = [idx for idx, x in enumerate(init_read) if '__version__ = ' in x]
if len(version_line)>1:
raise Exception('version is in there more than once')
if isinstance(version, str):
try:
version_split = map(int, version.split('.'))
except:
raise Exception("Version string must be in the format <major>.<minor>.<build>")
if not isinstance(version_split, (list, tuple)) or len(version_split)!=3:
raise Exception('invalid version %s' % version)
init_read[version_line[0]] = "__version__ = '%s'\n" % version
init = open(file_name, 'w')
try:
init.write(''.join(init_read))
finally:
init.close()
def prompt_for_tag(default_offset=1, stable_only = False):
"""
Prompt for the tag you want to use, offset for the default by input
"""
tags = tag_names(10, stable_only)
print "Showing latest tags for reference"
default = '0.0.1'
if tags:
default = tags[0]
(major, minor, build) = check_tag_format(default)
build = build+default_offset
new_default = '%s.%s.%s' % (major, minor, build)
tag = prompt('Tag name [in format x.xx] (default: %s) ? ' % new_default)
tag = tag or new_default
return tag
def push_to_pypi():
"""
Will push the code to pypi
"""
if prompt('would you like to tag a new version first [y/n]') == 'y':
tag()
local('python setup.py sdist upload')
def prompt_commit():
"""
prompts if you would like to commit
"""
local('git status')
print
print
_commit = prompt('Do you want to commit? [y/n]') == 'y'
if _commit:
msg = prompt('Commit message: ')
return commit(msg)
def tag(mark_stable=False):
"""
Tag a release, will prompt you for the tag version. You can mark it as
stable here as well
"""
tag = prompt_for_tag()
print "writing this tag version to version.py before commiting"
write_version(tag)
print
_commit = prompt_commit()
print
if not _commit and not tag:
print
print "Nothing commited, using default tag %s" % default
print
tag = default
else:
msg = ''
if mark_stable:
msg = STABLE_MSG + ' '
msg += prompt("enter msg for tag: ")
local('git tag %(ref)s -m "%(msg)s"' % { 'ref': tag, 'msg':msg})
local('git push --tags')
return tag
def merge(branch=None, merge_to = 'master'):
"""
Merge your changes and delete the old branch
"""
if not branch:
print "no branch specified, using current"
branch = current_branch()
if prompt('confirm merge with of branch %s to %s [y/N]' % (branch, merge_to)) == 'y':
prompt_commit()
local('git checkout %s ' % merge_to)
local('git merge %s ' % branch)
if prompt('delete the old branch locally and remotely? [y/N]') == 'y':
local('git branch -d %s' % branch)
local('git push origin :%s' % branch)
else:
print "leaving branch where it is"
if prompt('push results [y/N]' ) == 'y':
local('git push')
def tag_deploy(mark_stable=False):
"""
Asks you to tag this release and Figures out what branch you are on.
It then calls the deploy function
"""
local('git fetch --tags')
branch = local('git branch | grep "^*" | cut -d" " -f2', capture=True)
_tag = tag(mark_stable=mark_stable)
deploy(_tag, branch)
def retag(tag, msg):
"""
Retag a tag with a new message
"""
local('git tag %s %s -f -m "%s"' % (tag, tag, msg))
local('git push --tags')
def mark_stable(tag, msg = None):
"""
Mark a previous tag as stable
"""
retag(tag, '%s %s' % (STABLE_MSG, msg) )
def current_branch():
current_branch = local('git branch | grep "^*"', capture=True).lstrip('* ')
print "Current branch is %s" % current_branch
return current_branch
def deploy(tag=None, branch=None, stable_only=False):
"""
This is only for deployment on a dev box where everything can be owned by
this user. This is NOT for production deployment. Put's the code in
code_dir
"""
if not tag:
tag = prompt_for_tag(0, stable_only = stable_only)
configure()
setup_environment()
#check out all the code in the right place
with cd(code_dir):
# i **THINK** you have to have the branch checked out before you can
# checkout the tag
if branch:
#then you haven't even checkout this branch
branches = run('git branch')
if branch not in branches:
run('git checkout -b %s' % branch)
_current_branch = current_branch()
if "* %s" % branch != _current_branch:
run('git checkout %s' % branch)
#pull the latest
run('git pull origin %s' % branch)
else:
run("git pull origin master")
#check out a specific tag
if tag:
run("git fetch --tags")
run("git checkout %s" % tag)
#hacky
if env.user == 'root':
#make sure everything is still owned by the deployer
run('chown -R %s %s' % (deploy_user, code_dir))
###
# How to setup a fresh box. You probably have to run this as root for it to
# work
###
def install_easy_install():
"""
Installs setup tool, this should also go into an RPM
"""
run('wget http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg#md5=fe1f997bc722265116870bc7919059ea')
run('sh setuptools-0.6c11-py2.7.egg')
def install_python():
"""
Installs python, I should be able to create an RPM eventually
"""
run('wget http://python.org/ftp/python/2.7.2/Python-2.7.2.tgz')
run('tar -xvf Python-2.7.2.tgz')
with cd('Python-2.7.2'):
run('./configure')
run('make')
run('make install')
###
# This isn't reall necessary but i'll keep it for now
###
def install_python_dependancies():
"""
Easy install all the packages we need
"""
run('easy_install requests')
run('easy_install numpy')
run('easy_install pandas')
run('easy_install happybase')
run('easy_install flask')
run('easy_install ipython')
run('easy_install gunicorn')
run('easy_install link')
run('easy_install pymongo')
run('easy_install mysql-python')
run('easy_install docutils')
def install_box_libraries():
"""
Installs the libs you need like readlines and libsqlite. This will only
run on a ubuntu machine with apt-get
"""
with settings(warn_only=True):
has_apt = run('which apt-get')
if has_apt:
run('apt-get install make')
run('apt-get install libsqlite3-dev')
run('apt-get install libreadline6 libreadline6-dev')
run('apt-get install libmysqlclient-dev')
else:
print "this is not an ubuntu system...skipping"
def setup_box():
"""
Will install python and all libs needed to set up this box to run the
examjam code. Eventually this needs to be more RPM based
"""
#place_pub_key()
install_box_libraries()
install_python()
install_easy_install()
install_python_dependancies()
| 28.995098 | 129 | 0.608876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,752 | 0.486221 |
2f17c5de8625cd4bead31cfebf12c8291e262c52
| 183 |
py
|
Python
|
jails/routing.py
|
himrock922/jaisting
|
a1a53371043c05f0bb82fb7e2e3e16aecb1eba42
|
[
"Apache-2.0"
] | 9 |
2019-03-23T08:38:58.000Z
|
2021-01-27T05:54:32.000Z
|
jails/routing.py
|
himrock922/jaisting
|
a1a53371043c05f0bb82fb7e2e3e16aecb1eba42
|
[
"Apache-2.0"
] | 16 |
2019-03-23T07:35:01.000Z
|
2022-01-22T04:23:46.000Z
|
jails/routing.py
|
himrock922/jaisting
|
a1a53371043c05f0bb82fb7e2e3e16aecb1eba42
|
[
"Apache-2.0"
] | 1 |
2019-03-24T13:17:18.000Z
|
2019-03-24T13:17:18.000Z
|
from channels.routing import ProtocolTypeRouter
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'/websocket', consumers.VNCConsumer)
]
| 22.875 | 49 | 0.803279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.071038 |
2f190acf1519186091c3bd6551e361c43ae96fd6
| 515 |
py
|
Python
|
layers/poky/meta/lib/oeqa/runtime/case.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 53 |
2018-02-28T08:51:32.000Z
|
2022-02-28T06:49:23.000Z
|
layers/poky/meta/lib/oeqa/runtime/case.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 27 |
2018-01-25T00:26:53.000Z
|
2020-08-09T05:20:04.000Z
|
layers/poky/meta/lib/oeqa/runtime/case.py
|
dtischler/px30-test
|
55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f
|
[
"Apache-2.0"
] | 51 |
2018-02-21T04:46:08.000Z
|
2022-03-02T04:20:41.000Z
|
# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
from oeqa.core.case import OETestCase
from oeqa.utils.package_manager import install_package, uninstall_package
class OERuntimeTestCase(OETestCase):
# target instance set by OERuntimeTestLoader.
target = None
def setUp(self):
super(OERuntimeTestCase, self).setUp()
install_package(self)
def tearDown(self):
super(OERuntimeTestCase, self).tearDown()
uninstall_package(self)
| 28.611111 | 73 | 0.735922 | 310 | 0.601942 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.258252 |
2f194f4c6d0e43f1d9af761e30aabf62de1d5d85
| 393 |
py
|
Python
|
tests/analysis/test_general.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 121 |
2019-12-12T15:30:16.000Z
|
2022-02-28T02:00:54.000Z
|
tests/analysis/test_general.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 8 |
2020-04-04T15:37:26.000Z
|
2021-11-17T07:30:31.000Z
|
tests/analysis/test_general.py
|
trumanw/ScaffoldGraph
|
a594e5c5effe6c5e45c0061a235ccbeb64e416f9
|
[
"MIT"
] | 28 |
2019-12-16T11:58:53.000Z
|
2021-11-19T09:57:46.000Z
|
"""
scaffoldgraph tests.analysis.test_general
"""
from scaffoldgraph.analysis import get_singleton_scaffolds, get_virtual_scaffolds
from ..test_network import long_test_network
def test_get_virtual_scaffolds(network):
v = get_virtual_scaffolds(network)
assert len(v) == 19
def test_get_singleton_scaffolds(network):
s = get_singleton_scaffolds(network)
assert len(s) == 3
| 23.117647 | 81 | 0.78626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.124682 |
2f1989e325bb85e0738bbeae4175fa2a163031d0
| 1,750 |
py
|
Python
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 2 |
2015-02-11T05:47:42.000Z
|
2015-02-11T05:47:51.000Z
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 1 |
2015-04-13T06:36:21.000Z
|
2015-04-13T06:36:21.000Z
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | null | null | null |
import math
import time
t1 = time.time()
N = 1000000
n = (N+1)//2
p = [True]*(n)
i = 1
prime = [2]
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
j = i
while j < n:
p[j] = False
j += t
i += 1
def isPrime(item):
root = math.floor(math.sqrt(item))
i = 0
t = prime[i]
while t <= root:
if item%t == 0:
return False
if t < prime[-1]:
i += 1
t = prime[i]
else:
t += 2
return True
# define a binary search
def isInList(item,lst):
firstPoint = 0
endPoint = len(lst)-1
index = -1
while firstPoint <= endPoint:
midPoint = (firstPoint+endPoint)//2
if lst[midPoint] == item:
index = midPoint
return index
elif item > lst[midPoint]:
firstPoint = midPoint +1
else:
endPoint = midPoint -1
return index
target = prime[:]
count = 0
while len(target) > 0:
#print(target)
#print (count)
test = target[0]
dig = math.floor(math.log10(test))+1
target.pop(0)
if dig == 1:
count += 1
continue
if dig > 1:
i = 1
counted = 0
tl = True
while i < dig:
test = test//10 + (test%10)*math.pow(10,dig-1)
if isPrime(test):
i += 1
ind = isInList(test,target)
if ind >= 0:
target.pop(ind)
else:
counted += 1
else:
tl = False
break
if tl:
count += dig - counted
print (count)
print("time:",time.time()-t1)
| 18.617021 | 58 | 0.430857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.034286 |
2f19e1c9987607e703c57f23deb45035eb248b71
| 87 |
py
|
Python
|
izone/apps/secret/apps.py
|
shenjl/vmatrix
|
8f510d04005aa707cb6b296825f459f852cb59f6
|
[
"MIT"
] | null | null | null |
izone/apps/secret/apps.py
|
shenjl/vmatrix
|
8f510d04005aa707cb6b296825f459f852cb59f6
|
[
"MIT"
] | 2 |
2020-02-11T23:34:28.000Z
|
2020-06-05T17:33:09.000Z
|
izone/apps/secret/apps.py
|
selonsy/vmatrix
|
8f510d04005aa707cb6b296825f459f852cb59f6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class SecretConfig(AppConfig):
name = 'secret'
| 14.5 | 33 | 0.747126 | 50 | 0.574713 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.091954 |
2f1a5c2760e9a1b86d6eb2f562c21e3dbc87be05
| 2,190 |
py
|
Python
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | 11 |
2021-05-12T14:11:58.000Z
|
2022-01-25T04:23:38.000Z
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | 3 |
2021-05-13T11:37:35.000Z
|
2021-05-13T11:50:15.000Z
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | null | null | null |
import torch
import torch.nn as nn
from collections import OrderedDict
class AdapterLayer(nn.Module):
def __init__(self, input_size, reduction_factor):
super(AdapterLayer, self).__init__()
self.skip_adapter = False
self.adapter = nn.Sequential(nn.Linear(input_size, input_size//reduction_factor),
nn.ReLU(),
nn.Linear(input_size//reduction_factor, input_size))
self.adapter.apply(self.init_weights)
def init_weights(self, m, std = 1e-2):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, std = std)
torch.nn.init.normal_(m.bias, std = std)
m.weight.data = torch.clamp(m.weight.data, min = -2*std, max = 2*std)
m.bias.data = torch.clamp(m.bias.data, min = -2*std, max = 2*std)
def forward(self, X):
if self.skip_adapter:
return X
else:
return self.adapter(X) + X
### GPT NEO VERSION ######
'''
# couldn't get it to work with class inheritance
def add_adapters(model, reduction_factor):
n_layers = len(model.h)
hidden_size = model.config.hidden_size
for n in range(n_layers):
model.h[n].mlp = nn.Sequential(OrderedDict([('MLP', model.h[n].mlp),
('Adapter', AdapterLayer(hidden_size, reduction_factor))]))
return model
'''
# couldn't get it to work with class inheritance
def add_adapters(model, reduction_factor):
n_layers = len(model.transformer.h)
hidden_size = model.config.hidden_size
for n in range(n_layers):
model.transformer.h[n].mlp = nn.Sequential(OrderedDict([('MLP', model.transformer.h[n].mlp),
('Adapter', AdapterLayer(hidden_size, reduction_factor))]))
return model
def add_adapter_skip(model):
def adapter_skip(self, skip):
n_layers = len(self.transformer.h)
for n in range(n_layers):
self.transformer.h[n].mlp.Adapter.skip_adapter = skip
model.adapter_skip = adapter_skip.__get__(model)
return model
| 39.818182 | 111 | 0.594977 | 937 | 0.427854 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.230137 |
2f1b669092b8b167d53d53cce79bec39a591e1c1
| 3,934 |
py
|
Python
|
tests/test_PrependError.py
|
hutoTUM/macke-opt-llvm
|
95830cb4e1416a6d1fb538f2b91d1c4720d4bde7
|
[
"Apache-2.0"
] | 4 |
2018-05-11T08:33:46.000Z
|
2019-12-16T01:49:37.000Z
|
tests/test_PrependError.py
|
aheroine/use-llvm-opt
|
407102740f563f57a7abb952e198f6a65800deaa
|
[
"Apache-2.0"
] | null | null | null |
tests/test_PrependError.py
|
aheroine/use-llvm-opt
|
407102740f563f57a7abb952e198f6a65800deaa
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
import re
import subprocess
class TestPrependError(unittest.TestCase):
def test_symmain_directory(self):
self.assertIn("LLVMBIN", os.environ, "Path to llvm-bin not set")
self.assertIn("KLEEBIN", os.environ, "Path to klee-bin not set")
bitcodefile = "bin/klee_symmain.bc"
prependtofunction = "faulty"
modedbitcodefile = "bin/mod-klee_symmain.bc"
# First run KLEE normaly
out = subprocess.check_output([
os.environ["KLEEBIN"] + "/klee",
"--optimize", "--only-output-states-covering-new",
bitcodefile],
stderr=subprocess.STDOUT)
# Read the directory with all error asserts
kleedir = re.search(
r"^KLEE: output directory is \"(.*)\"",
out.decode("utf-8")).group(1)
# Prepend the error summaries
subprocess.check_output([
os.environ["LLVMBIN"] + "/opt",
"-load", "bin/libMackeOpt.so",
"-preprenderror", bitcodefile,
"-prependtofunction", prependtofunction,
"-previouskleerundirectory", kleedir,
"-o", modedbitcodefile])
out = subprocess.check_output([
os.environ["KLEEBIN"] + "/klee",
"--optimize", "--only-output-states-covering-new",
modedbitcodefile],
stderr=subprocess.STDOUT)
self.assertTrue(b"KLEE: done: generated tests = 7" in out)
self.assertEqual(6, out.count(b"KLEE: ERROR:"))
out = subprocess.check_output(
[os.environ["KLEEBIN"] + "/ktest-tool"] +
["bin/klee-last/test00000%d.ktest" % i for i in range(1, 8)])
self.assertEqual(2, out.count(b"\\x15\\x00\\x00\\x00"))
self.assertEqual(2, out.count(b"*\\x00\\x00\\x00"))
self.assertEqual(2, out.count(b"9\\x05\\x00\\x00"))
def test_symmain_direct_files(self):
self.assertIn("LLVMBIN", os.environ, "Path to llvm-bin not set")
self.assertIn("KLEEBIN", os.environ, "Path to klee-bin not set")
bitcodefile = "bin/klee_symmain.bc"
prependtofunction = "faulty"
modedbitcodefile = "bin/mod-klee_symmain.bc"
# First run KLEE normaly
out = subprocess.check_output([
os.environ["KLEEBIN"] + "/klee",
"--optimize", "--only-output-states-covering-new",
bitcodefile],
stderr=subprocess.STDOUT)
# Read the directory with all error asserts
kleedir = re.search(
r"^KLEE: output directory is \"(.*)\"",
out.decode("utf-8")).group(1)
# Build a list, where all .err files are named explicitly
errfilelist = []
for file in os.listdir(kleedir):
if file.endswith(".err"):
errfilelist.append("-errorfiletoprepend")
errfilelist.append(os.path.join(kleedir, file))
# Prepend the error summaries
subprocess.check_output([
os.environ["LLVMBIN"] + "/opt",
"-load", "bin/libMackeOpt.so",
"-preprenderror", bitcodefile,
"-prependtofunction", prependtofunction] +
errfilelist +
["-o", modedbitcodefile])
out = subprocess.check_output([
os.environ["KLEEBIN"] + "/klee",
"--optimize", "--only-output-states-covering-new",
modedbitcodefile],
stderr=subprocess.STDOUT)
self.assertTrue(b"KLEE: done: generated tests = 7" in out)
self.assertEqual(6, out.count(b"KLEE: ERROR:"))
out = subprocess.check_output(
[os.environ["KLEEBIN"] + "/ktest-tool"] +
["bin/klee-last/test00000%d.ktest" % i for i in range(1, 8)])
self.assertEqual(2, out.count(b"\\x15\\x00\\x00\\x00"))
self.assertEqual(2, out.count(b"*\\x00\\x00\\x00"))
self.assertEqual(2, out.count(b"9\\x05\\x00\\x00"))
| 36.766355 | 73 | 0.576004 | 3,877 | 0.985511 | 0 | 0 | 0 | 0 | 0 | 0 | 1,387 | 0.352567 |
2f1da8ae305ab06e7ec0677f650d3ae476d39207
| 1,851 |
py
|
Python
|
water_modelling/hydrus/desktop/hydrus_desktop_deployer.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/hydrus/desktop/hydrus_desktop_deployer.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/hydrus/desktop/hydrus_desktop_deployer.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from typing import Optional
from hydrus import hydrus_log_analyzer
from hydrus.hydrus_deployer_interface import IHydrusDeployer
from simulation.simulation_error import SimulationError
from utils import path_formatter
class _HydrusDesktopDeployer(IHydrusDeployer):
LOG_FILE = "simulation.log"
def __init__(self, hydrus_exe_path: str, path: str):
self.hydrus_exe_path = path_formatter.convert_backslashes_to_slashes(hydrus_exe_path)
self.path = path_formatter.convert_backslashes_to_slashes(path)
self.proc = None
def run(self):
print(f"Starting Hydrus calculations for: {self.path}")
with open(self._get_path_to_log(), 'w') as handle:
self.proc = subprocess.Popen([self.hydrus_exe_path, self.path], shell=True, text=True,
stdin=subprocess.PIPE, stdout=handle, stderr=handle)
def wait_for_termination(self) -> Optional[SimulationError]:
self.proc.communicate(input="\n") # Press enter to close program (blocking)
# analyze output and return SimulationError if made
with open(self._get_path_to_log(), 'r') as handle:
log_lines = handle.readlines()
simulation_error = hydrus_log_analyzer.analyze_log(self._get_model_name(), log_lines)
if simulation_error:
print(f"{self.path}: error occurred: {simulation_error.error_description}")
return simulation_error
# successful scenario
print(f"{self.path}: calculations completed successfully")
return None
def _get_model_name(self) -> str:
return path_formatter.convert_backslashes_to_slashes(self.path).split('/hydrus/')[1]
def _get_path_to_log(self) -> str:
return os.path.join(self.path, _HydrusDesktopDeployer.LOG_FILE)
| 41.133333 | 98 | 0.703944 | 1,602 | 0.865478 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.170719 |
2f1dfd7483d1c7356a889232b88033380a6fbee8
| 3,600 |
py
|
Python
|
src/openprocurement/framework/electroniccatalogue/views/submission.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 10 |
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/framework/electroniccatalogue/views/submission.py
|
quintagroup/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 26 |
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/framework/electroniccatalogue/views/submission.py
|
ProzorroUKR/openprocurement.api
|
2855a99aa8738fb832ee0dbad4e9590bd3643511
|
[
"Apache-2.0"
] | 15 |
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
from openprocurement.api.utils import APIResource, json_view, context_unpack, get_now, generate_id
from openprocurement.framework.core.utils import (
submissionsresource,
apply_patch,
save_qualification,
)
from openprocurement.framework.core.validation import (
validate_patch_submission_data,
validate_operation_submission_in_not_allowed_period,
validate_submission_status,
validate_update_submission_in_not_allowed_status,
validate_activate_submission,
validate_action_in_not_allowed_framework_status,
)
from openprocurement.framework.electroniccatalogue.models import Qualification
@submissionsresource(
name="electronicCatalogue:Submissions",
path="/submissions/{submission_id}",
submissionType="electronicCatalogue",
description="", # TODO: add description
)
class SubmissionResource(APIResource):
@json_view(permission="view_submission")
def get(self):
"""
Get info by submission
"""
submission_data = self.context.serialize("view")
return {"data": submission_data}
@json_view(
content_type="application/json",
validators=(
validate_patch_submission_data,
validate_operation_submission_in_not_allowed_period,
validate_update_submission_in_not_allowed_status,
validate_action_in_not_allowed_framework_status("submission"),
validate_submission_status,
validate_activate_submission,
),
permission="edit_submission",
)
def patch(self):
"""
Submission edit(partial)
"""
submission = self.request.context
old_status = submission.status
new_status = self.request.validated["data"].get("status", old_status)
now = get_now()
if new_status != old_status:
submission.date = now
activated = new_status == "active" and old_status != new_status
if activated:
submission.datePublished = now
self.create_qualification()
apply_patch(self.request, src=self.request.validated["submission_src"], obj_name="submission")
self.LOGGER.info("Updated submission {}".format(submission.id),
extra=context_unpack(self.request, {"MESSAGE_ID": "submission_patch"}))
return {"data": submission.serialize("view")}
def create_qualification(self):
submission = self.request.context
framework = self.request.validated["framework"]
qualification_id = generate_id()
qualification_data = {
"id": qualification_id,
"frameworkID": framework["_id"],
"submissionID": submission.id,
"framework_owner": framework["owner"],
"framework_token": framework["owner_token"],
"qualificationType": framework["frameworkType"],
"mode": framework.get("type")
}
qualification = Qualification(qualification_data)
self.request.validated["qualification_src"] = {}
self.request.validated["qualification"] = qualification
if save_qualification(self.request):
submission.qualificationID = qualification_id
self.LOGGER.info(
"Created qualification {}".format(qualification_id),
extra=context_unpack(
self.request,
{"MESSAGE_ID": "qualification_create"},
{"qualification_id": qualification_id,
"qualification_mode": qualification.mode},
),
)
| 36.734694 | 102 | 0.656111 | 2,780 | 0.772222 | 0 | 0 | 2,976 | 0.826667 | 0 | 0 | 673 | 0.186944 |
2f222448d0c305c6158a8a8cb410ef32dcbf5429
| 7,090 |
py
|
Python
|
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
from __future__ import division
from torch.autograd import Variable
import cv2
import numpy as np
import torch
def bbox_iou(box1, box2):
# returns IoU of two bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
if torch.cuda.is_available():
inter_area = torch.max(
inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape).cuda()
) * torch.max(
inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).cuda()
)
else:
inter_area = torch.max(
inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape)
) * torch.max(
inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape)
)
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y1 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def get_test_input_cv(imglist, input_dim, CUDA):
img = cv2.imread(imglist[0])
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:, :, ::-1].transpose((2, 0, 1))
img_ = img_[np.newaxis, :, :, :] / 255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def predict_transform(prediction, input_dim, anchors, num_classes, use_gpu=True):
batch_size = prediction.size(0)
stride = input_dim // prediction.size(2)
grid_size = input_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs * num_anchors, grid_size * grid_size
)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size * grid_size * num_anchors, bbox_attrs
)
anchors = [(anchor[0] / stride, anchor[1] / stride) for anchor in anchors]
# Sigmoid the centerX,centerY and objectness score
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4])
# Add center offsets
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1, 1)
y_offset = torch.FloatTensor(b).view(-1, 1)
if use_gpu:
prediction = prediction.cuda()
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = (
torch.cat((x_offset, y_offset), 1)
.repeat(1, num_anchors)
.view(-1, 2)
.unsqueeze(0)
)
prediction[:, :, :2] += x_y_offset
# Log Space transform of height and width
anchors = torch.FloatTensor(anchors)
if use_gpu:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4]) * anchors
# sigmoid activation to the the class scores
prediction[:, :, 5 : 5 + num_classes] = torch.sigmoid(
(prediction[:, :, 5 : 5 + num_classes])
)
prediction[
:, :, :4
] *= stride # resize the detections map to the size of the input image
return prediction
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def write_results(prediction, confidence, num_classes, nms=True, nms_conf=0.4):
# Object Confidence Thresholding
conf_mask = (prediction[:, :, 4] > confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
# NMS
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
for ind in range(batch_size):
# select the image from the batch
img_pred = prediction[ind] # Image Tensor
max_conf, max_conf_score = torch.max(img_pred[:, 5 : 5 + num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (img_pred[:, :5], max_conf, max_conf_score)
img_pred = torch.cat(seq, 1)
# Get rid of the zero entries
non_zero_ind = torch.nonzero((img_pred[:, 4]))
img_pred_ = img_pred[non_zero_ind.squeeze(), :].view(-1, 7)
try:
img_classes = unique(img_pred_[:, -1])
except:
continue
for cls in img_classes:
# get detections with one particular class
cls_mask = img_pred_ * (img_pred_[:, -1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
img_pred_class = img_pred_[class_mask_ind].view(-1, 7)
# sort the detections for maximum objectness confidence
conf_sort_index = torch.sort(img_pred_class[:, 4], descending=True)[1]
img_pred_class = img_pred_class[conf_sort_index]
idx = img_pred_class.size(0)
if nms:
# for each detection
for i in range(idx):
try:
ious = bbox_iou(
img_pred_class[i].unsqueeze(0), img_pred_class[i + 1 :]
)
except ValueError:
break
except IndexError:
break
iou_mask = (ious < nms_conf).float().unsqueeze(1)
img_pred_class[i + 1 :] *= iou_mask
non_zero_ind = torch.nonzero(img_pred_class[:, 4]).squeeze()
img_pred_class = img_pred_class[non_zero_ind].view(-1, 7)
batch_ind = img_pred_class.new(img_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, img_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output, out))
return output
| 34.754902 | 87 | 0.572779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.070381 |
2f2228d6057ad9c4100fbf0aed98528ab280f726
| 743 |
py
|
Python
|
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
A.sort(key=lambda x: (x % 2 != 0))
b = []
for i in range(int(len(A) / 2)):
b.append(A[i])
b.append(A[-(1+i)])
return b
# ---------- 320ms, 15.9MB ---------- #
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
odd = []
even = []
ans = []
A.sort()
for i in range(len(A)):
if A[i] % 2 == 0:
even.append(A[i])
else:
odd.append(A[i])
for i in range(len(odd)):
ans.append(even[i])
ans.append(odd[i])
return ans
# ---------- 320ms, 16.1MB ---------- #
| 28.576923 | 61 | 0.414536 | 661 | 0.889637 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.10498 |
2f224c8f917dc2d903a60f297bdfff121e03b7dc
| 1,190 |
py
|
Python
|
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
from rabbitConsumer import *
from socketConsumer import SocketConsumer
from dlx import *
import threading
import sys
if __name__ == '__main__':
work_with = sys.argv[1]
r_k = ['*.jpg', '*.jpeg', '#']
threads = []
dlx = ReconnectingDlx()
threads.append(threading.Thread(target=dlx.run))
for j in range(1, 4):
if work_with == 'rabbit':
# consumer = RabbitConsumer(_id_consumer=j, _exchange='exchange1',
# _queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
# _producer_to_dlx=dlx)
consumer = RabbitReconnectingConsumer(_id_consumer=j, _exchange='exchange1',
_queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
_producer_to_dlx=dlx)
elif work_with == 'socket':
consumer = SocketConsumer(_id_consumer=j)
else:
print("the parameter in args must be 'rabbit' or 'socket'!")
threads.append(threading.Thread(target=consumer.run))
for thread in threads:
thread.start()
| 34 | 118 | 0.561345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.281513 |
2f237c48f402b5312560d0ad14f693b93cf182f6
| 1,797 |
py
|
Python
|
backend/flask-api/migrations/versions/6fdbb9233bd6_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | null | null | null |
backend/flask-api/migrations/versions/6fdbb9233bd6_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | null | null | null |
backend/flask-api/migrations/versions/6fdbb9233bd6_.py
|
lucasbibianot/inova-cnj-time16
|
e621d7027bd462d348e233ffd6ed88648c53704b
|
[
"Apache-2.0"
] | 2 |
2020-10-19T22:03:31.000Z
|
2020-11-29T21:22:33.000Z
|
"""Mapeamento das tabelas para persistir os processos datajud
Revision ID: 6fdbb9233bd6
Revises: 8d2eb6149b1d
Create Date: 2020-10-18 09:22:06.650559
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6fdbb9233bd6'
down_revision = '8d2eb6149b1d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tb_processo',
sa.Column('cd_processo', sa.String(length=100), nullable=False),
sa.Column('nu_processo', sa.String(length=50), nullable=False),
sa.Column('cd_classe', sa.String(length=50), nullable=False),
sa.Column('cd_orgao_julgador', sa.String(length=50), nullable=False),
sa.Column('ds_orgao_julgador', sa.String(length=4000), nullable=False),
sa.Column('sg_tribunal', sa.String(length=30), nullable=False),
sa.Column('sg_grau', sa.String(length=30), nullable=False),
sa.Column('ind_presidencia', sa.String(length=1), nullable=False),
sa.PrimaryKeyConstraint('cd_processo')
)
op.create_table('tb_processo_evento',
sa.Column('id_processo_evento', sa.Integer(), nullable=False),
sa.Column('dt_ocorrencia', sa.Time(), nullable=False),
sa.Column('cd_processo', sa.String(length=100), nullable=False),
sa.Column('id_evento', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['cd_processo'], ['tb_processo.cd_processo'], ),
sa.ForeignKeyConstraint(['id_evento'], ['tb_desc_evento.id_evento'], ),
sa.PrimaryKeyConstraint('id_processo_evento')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tb_processo_evento')
op.drop_table('tb_processo')
# ### end Alembic commands ###
| 36.673469 | 76 | 0.709516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 752 | 0.418475 |
2f239d716de5c5b3e73637e42e5427fd0197839a
| 1,991 |
py
|
Python
|
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
"""
A set of CC412022, CC416168 were run back to back without blanks on 2019-11-12.
Rough quantification is done by the below.
"""
__package__ = 'Z'
from datetime import datetime
from settings import CORE_DIR, DB_NAME
from IO.db import connect_to_db, GcRun, Integration, Standard, SampleQuant
from processing import blank_subtract
from reporting import compile_quant_report
engine, session = connect_to_db(DB_NAME, CORE_DIR)
standard_to_quantify_with = session.query(Standard).filter(Standard.name == 'cc416168').one_or_none()
# get standard cert values for the quantifier
certified_values_of_sample = (session.query(Standard)
.filter(Standard.name == 'cc412022_noaa_provided')
.one().quantifications)
# get standard cert values for the sample being quantified
vocs = session.query(Standard).filter(Standard.name == 'vocs').one_or_none()
vocs = [q.name for q in vocs.quantifications]
samples = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC412022___.D'))
.order_by(GcRun.date)
.all())
standards = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC416168___.D'))
.order_by(GcRun.date)
.all())
quants = []
for sample, standard in zip(samples, standards):
blank_subtract(sample, vocs, session, blank=None, force_no_blank=True)
blank_subtract(standard, vocs, session, blank=None, force_no_blank=True)
quant = SampleQuant(sample, standard, None, standard_to_quantify_with)
quant.quantify()
quants.append(quant)
compile_quant_report(quants, 'CC412022', 'CC416168', certified_values_of_sample, date=datetime(2019, 11, 12))
| 40.632653 | 109 | 0.70668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.165244 |
2f25439acb972903c75d41093b0f43be910845ab
| 310 |
py
|
Python
|
main.py
|
mesmacosta/datacatalog-fileset-enricher
|
0792632fc181b13696f89ef3335da4e2ce1dca4a
|
[
"MIT"
] | 3 |
2020-04-01T15:28:25.000Z
|
2020-06-06T18:30:34.000Z
|
main.py
|
mesmacosta/datacatalog-fileset-enricher
|
0792632fc181b13696f89ef3335da4e2ce1dca4a
|
[
"MIT"
] | null | null | null |
main.py
|
mesmacosta/datacatalog-fileset-enricher
|
0792632fc181b13696f89ef3335da4e2ce1dca4a
|
[
"MIT"
] | 1 |
2020-07-09T06:05:24.000Z
|
2020-07-09T06:05:24.000Z
|
import logging
import sys
from datacatalog_fileset_enricher import datacatalog_fileset_enricher_cli
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
argv = sys.argv
datacatalog_fileset_enricher_cli.\
DatacatalogFilesetEnricherCLI.run(argv[1:] if len(argv) > 0 else argv)
| 31 | 78 | 0.780645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.032258 |
2f2590662675a6fa11503eafa56e671b78fe7a23
| 10,473 |
py
|
Python
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 17 |
2015-06-26T08:49:07.000Z
|
2021-09-11T09:02:40.000Z
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 5 |
2015-04-27T13:44:58.000Z
|
2022-02-07T19:00:42.000Z
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 12 |
2015-02-13T15:34:47.000Z
|
2021-09-11T09:02:30.000Z
|
# Copyright (C) 2013 Peter Rowlands
"""csgo events module
Contains event classes for CS:S and CS:GO events
"""
from __future__ import absolute_import, unicode_literals
from future.utils import python_2_unicode_compatible
from .generic import (BaseEvent, PlayerEvent, PlayerTargetEvent, KillEvent,
AttackEvent)
@python_2_unicode_compatible
class SwitchTeamEvent(PlayerEvent):
"""Player switched team event"""
regex = ''.join([
BaseEvent.regex,
r'"(?P<player_name>.*)<(?P<uid>\d*)><(?P<steam_id>[\w:]*)>" ',
r'switched from team <(?P<orig_team>\w*)> to <(?P<new_team>\w*)>',
])
def __init__(self, timestamp, player_name, uid, steam_id, orig_team,
new_team):
super(SwitchTeamEvent, self).__init__(timestamp, player_name, uid,
steam_id, team=None)
self.orig_team = orig_team
self.new_team = new_team
def text(self):
player = self.player
player.team = None
msg = ' '.join([
'"%s"' % player,
'switched from team <%s> to <%s>' % (self.orig_team,
self.new_team),
])
return ' '.join([super(PlayerEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class BuyEvent(PlayerEvent):
"""Player buy event"""
regex = ''.join([
PlayerEvent.regex,
r'purchased "(?P<item>\w*)"',
])
def __init__(self, timestamp, player_name, uid, steam_id, team, item):
super(BuyEvent, self).__init__(timestamp, player_name, uid, steam_id,
team)
self.item = item
def text(self):
msg = 'purchased "%s"' % (self.item)
return ' '.join([super(BuyEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class ThrowEvent(PlayerEvent):
"""Player threw grenade event"""
regex = ''.join([
PlayerEvent.regex,
r'threw (?P<nade>\w*) \[(?P<location>-?\d+ -?\d+ -?\d+)\]',
])
def __init__(self, timestamp, player_name, uid, steam_id, team, nade,
location):
if not isinstance(location, tuple) or not len(location) == 3:
raise TypeError('Expected 3-tuple for location')
super(ThrowEvent, self).__init__(timestamp, player_name, uid, steam_id,
team)
self.location = location
self.nade = nade
def text(self):
msg = 'threw %s [%d %d %d]' % (self.nade, self.location[0],
self.location[1], self.location[2])
return ' '.join([super(ThrowEvent, self).text(), msg])
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
location = kwargs['location'].split()
kwargs['location'] = (int(location[0]), int(location[1]),
int(location[2]))
return cls(**kwargs)
@python_2_unicode_compatible
class CsgoAssistEvent(PlayerTargetEvent):
"""Player assist event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r' assisted killing ',
PlayerTargetEvent.target_regex
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, target_name, target_uid, target_steam_id,
target_team):
super(CsgoAssistEvent, self).__init__(timestamp, player_name,
player_uid, player_steam_id,
player_team, target_name, target_uid,
target_steam_id, target_team)
def text(self):
msg = '"%s" assisted killing "%s" ' % (self.player, self.target)
return ' '.join([super(CsgoAssistEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class CsgoKillEvent(KillEvent):
"""CS:GO specific kill event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r'\[(?P<player_location>-?\d+ -?\d+ -?\d+)\]',
r' killed ',
PlayerTargetEvent.target_regex,
r'\[(?P<target_location>-?\d+ -?\d+ -?\d+)\]',
r' with "(?P<weapon>\w*)"',
r'( \(headshot\))?',
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, player_location, target_name, target_uid,
target_steam_id, target_team, target_location, weapon,
headshot=False):
super(CsgoKillEvent, self).__init__(timestamp, player_name, player_uid,
player_steam_id, player_team,
target_name, target_uid,
target_steam_id, target_team,
weapon)
if (not isinstance(player_location, tuple)
or not len(player_location) == 3):
raise TypeError('Expected 3-tuple for player_location')
if (not isinstance(target_location, tuple)
or not len(target_location) == 3):
raise TypeError('Expected 3-tuple for target_location')
self.player_location = player_location
self.target_location = target_location
self.headshot = headshot
def text(self):
msg = [
'L %s:' % (self.timestamp_to_str(self.timestamp)),
'"%s" [%d %d %d]' % (self.player, self.player_location[0],
self.player_location[1],
self.player_location[2]),
'killed',
'"%s" [%d %d %d]' % (self.target, self.target_location[0],
self.target_location[1],
self.target_location[2]),
'with "%s"' % (self.weapon),
]
if self.headshot:
msg.append('(headshot)')
return ' '.join(msg)
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
player_location = kwargs['player_location'].split()
kwargs['player_location'] = (int(player_location[0]),
int(player_location[1]),
int(player_location[2]))
target_location = kwargs['target_location'].split()
kwargs['target_location'] = (int(target_location[0]),
int(target_location[1]),
int(target_location[2]))
if match.string.endswith('(headshot)'):
kwargs['headshot'] = True
return cls(**kwargs)
@python_2_unicode_compatible
class CsgoAttackEvent(AttackEvent):
"""CS:GO specific attack event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r'\[(?P<player_location>-?\d+ -?\d+ -?\d+)\]',
r' attacked ',
PlayerTargetEvent.target_regex,
r'\[(?P<target_location>-?\d+ -?\d+ -?\d+)\]',
r' with "(?P<weapon>\w*)"',
r' \(damage "(?P<damage>\d+)"\)',
r' \(damage_armor "(?P<damage_armor>\d+)"\)',
r' \(health "(?P<health>\d+)"\)',
r' \(armor "(?P<armor>\d+)"\)',
r' \(hitgroup "(?P<hitgroup>[\w ]+)"\)',
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, player_location, target_name, target_uid,
target_steam_id, target_team, target_location, weapon,
damage, damage_armor, health, armor, hitgroup):
super(CsgoAttackEvent, self).__init__(timestamp, player_name,
player_uid, player_steam_id,
player_team, target_name,
target_uid, target_steam_id,
target_team, weapon, damage)
if (not isinstance(player_location, tuple)
or not len(player_location) == 3):
raise TypeError('Expected 3-tuple for player_location')
if (not isinstance(target_location, tuple)
or not len(target_location) == 3):
raise TypeError('Expected 3-tuple for target_location')
self.player_location = player_location
self.target_location = target_location
self.damage_armor = int(damage_armor)
self.health = int(health)
self.armor = int(armor)
self.hitgroup = hitgroup
def text(self):
msg = [
'L %s:' % (self.timestamp_to_str(self.timestamp)),
'"%s" [%d %d %d]' % (self.player, self.player_location[0],
self.player_location[1],
self.player_location[2]),
'attacked',
'"%s" [%d %d %d]' % (self.target, self.target_location[0],
self.target_location[1],
self.target_location[2]),
'with "%s"' % (self.weapon),
'(damage "%d")' % (self.damage),
'(damage_armor "%d")' % (self.damage_armor),
'(health "%d")' % (self.health),
'(armor "%d")' % (self.armor),
'(hitgroup "%s")' % (self.hitgroup),
]
return ' '.join(msg)
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
player_location = kwargs['player_location'].split()
kwargs['player_location'] = (int(player_location[0]),
int(player_location[1]),
int(player_location[2]))
target_location = kwargs['target_location'].split()
kwargs['target_location'] = (int(target_location[0]),
int(target_location[1]),
int(target_location[2]))
return cls(**kwargs)
CSGO_EVENTS = [
SwitchTeamEvent,
BuyEvent,
ThrowEvent,
CsgoAssistEvent,
CsgoKillEvent,
CsgoAttackEvent,
]
| 36.491289 | 83 | 0.531175 | 9,813 | 0.936981 | 0 | 0 | 9,987 | 0.953595 | 0 | 0 | 1,857 | 0.177313 |
2f2781811c4aeb325fd30cc295a58030636b2c7d
| 695 |
py
|
Python
|
formacao-python/brasilidades/Telefone.py
|
hollowrm08/python-alura
|
eb43be24c7160b38f1598d8da25582bfe04ade29
|
[
"MIT"
] | null | null | null |
formacao-python/brasilidades/Telefone.py
|
hollowrm08/python-alura
|
eb43be24c7160b38f1598d8da25582bfe04ade29
|
[
"MIT"
] | null | null | null |
formacao-python/brasilidades/Telefone.py
|
hollowrm08/python-alura
|
eb43be24c7160b38f1598d8da25582bfe04ade29
|
[
"MIT"
] | null | null | null |
import re
class Telefone:
padrao = "([0-9]{2,3})?([0-9]{2})([0-9]{4,5})([0-9]{4})"
def __init__(self, telefone):
if self.valida_telefone(telefone):
self._numero = telefone
else:
raise ValueError("Número Incorreto!")
def __str__(self):
return self.format_numero()
def valida_telefone(self, telefone):
resposta = re.findall(self.padrao, telefone)
if resposta:
return True
else:
return False
def format_numero(self):
resposta = re.search(self.padrao, self._numero)
return f'+{resposta.group(1)} ({resposta.group(2)}) {resposta.group(3)}-{resposta.group(4)}'
| 24.821429 | 100 | 0.579856 | 683 | 0.981322 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.218391 |
2f27bd70a0bac448a69a312f5b0f06826fe66bdd
| 670 |
py
|
Python
|
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
# Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| 29.130435 | 81 | 0.649254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 318 | 0.474627 |
2f2f6a510aa43446af03b23b36744744444b6c67
| 1,532 |
py
|
Python
|
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
import six
import docker_emperor.logger as logger
from docker_emperor.nodes.context import Context
def run(root, *args, **kwargs):
name = args[0].strip() if args else None
if name:
if name in root.project['contexts']:
root.project.config['context'] = name
logger.success(u'Context <b>%s</b> selected.' % root.context.name)
else:
logger.error(u'Context <b>%s</b> unknow.' % name)
exit(0)
else:
contexts = root.project['contexts']
if not contexts:
contexts['default'] = Context('default')
root.project.config['context'] = 'default'
logger.warning(u'No context defines, use <b>%s</b>.' % root.context.name)
else:
def select_context_name(contexts):
logger.ask(u'Please select the <b>{}</b> context to work on'.format(root.project.name))
for i, c in enumerate(contexts):
logger.choice(u'<b>%s</b>] %s' % (i+1, c.name))
ci = six.moves.input(': ')
try:
if ci == '0':
raise Exception
return contexts[int(ci)-1].name
except Exception as e:
logger.error(u'<b>%s/b> is not a valid choice' % ci)
return select_context_name(contexts)
root.project.config['context'] = select_context_name(contexts)
logger.success(u'Context <b>%s</b> selected.' % root.context.name)
| 39.282051 | 103 | 0.539817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.198433 |
2f2f9ccd72b1ada4944e0fb6d3cba3a6b6b3d3fc
| 759 |
py
|
Python
|
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
from instance_lock import InstanceLock
################################################################################
def main():
print(sys.argv[0])
instance_lock = InstanceLock("/home/ted/BNC/logs/.__MY_TEST_LOCK__", sys.argv[0], 3)
try:
instance_lock.lock()
except Exception as e:
print("Failed to start: " + e.message)
sys.exit(-1)
print("sleeping ..")
time.sleep(60*10)
print("Exit ..")
instance_lock.unlock()
################################################################################
if __name__ == '__main__':
main()
| 19.973684 | 88 | 0.524374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.355731 |
2f30a5cc06c93cc21cd8f006b81cb7e3a4339ab4
| 1,194 |
py
|
Python
|
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
import modelexp
from modelexp.experiments.sas import Sans
from modelexp.models.sas import Sphere
from modelexp.data import XyeData
from modelexp.fit import LevenbergMarquardt
from modelexp.models.sas import InstrumentalResolution
app = modelexp.App()
app.setExperiment(Sans)
dataRef = app.setData(XyeData)
dataRef.loadFromFile('./sansSphereData_sa.xye', 'sa')
dataRef.loadFromFile('./sansSphereData_la.xye', 'la')
dataRef.plotData()
modelRef = app.setModel(Sphere, InstrumentalResolution)
modelRef.setParam("r", 50.115979438653525, minVal = 0, maxVal = 100, vary = True)
modelRef.setParam("sldSphere", 4.5e-05, minVal = 0, maxVal = 0.00045000000000000004, vary = False)
modelRef.setParam("sldSolvent", 1e-05, minVal = 0, maxVal = 0.0001, vary = False)
modelRef.setParam("sigR", 0.0446, minVal = 0, maxVal = 0.2, vary = True)
modelRef.setParam("i0", 1.0082741570299425, minVal = 0, maxVal = 10, vary = True)
modelRef.setParam("bg", 0.0, minVal = 0, maxVal = 1, vary = False)
modelRef.setParam("dTheta_sa", 0.000174, minVal = 0, maxVal = 0.001, vary = True)
modelRef.setParam("dTheta_la", 0.000765, minVal = 0, maxVal = 0.001, vary = True)
app.setFit(LevenbergMarquardt)
app.show()
| 39.8 | 99 | 0.742881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.100503 |
2f34112711a7f4d8c6fd98347f5ba592ca3f8d4f
| 345 |
py
|
Python
|
chapter03/demo_3_2_1_7_1.py
|
NetworkRanger/python-spider-project
|
f501e331a59608d9a321a0d7254fcbcf81b50ec2
|
[
"MIT"
] | 1 |
2019-02-08T03:14:17.000Z
|
2019-02-08T03:14:17.000Z
|
chapter03/demo_3_2_1_7_1.py
|
NetworkRanger/python-spider-project
|
f501e331a59608d9a321a0d7254fcbcf81b50ec2
|
[
"MIT"
] | null | null | null |
chapter03/demo_3_2_1_7_1.py
|
NetworkRanger/python-spider-project
|
f501e331a59608d9a321a0d7254fcbcf81b50ec2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/9 上午12:35
"""
使用ProxyHandler在程序中动态设置代理
"""
import urllib2
proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8087'})
opener = urllib2.build_opener([proxy])
urllib2.install_opener(opener)
response = urllib2.urlopen('http://www.zhichu.com/')
print response.read()
| 21.5625 | 56 | 0.713043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.522788 |
2f34c3e2255c1aaf56cddd4bf264efb8253bf37a
| 1,254 |
py
|
Python
|
scripts/run_metasv_bed2vcf.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 43 |
2015-01-12T20:58:24.000Z
|
2021-11-24T07:30:06.000Z
|
scripts/run_metasv_bed2vcf.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 80 |
2015-01-08T00:34:55.000Z
|
2022-02-16T08:30:34.000Z
|
scripts/run_metasv_bed2vcf.py
|
willrockout/metasv
|
b46f15cbe8a28941661855da6587451c971dc2e3
|
[
"BSD-2-Clause"
] | 25 |
2015-04-30T06:30:28.000Z
|
2022-02-22T02:48:20.000Z
|
#!/usr/bin/env python
import argparse
import logging
from metasv.generate_final_vcf import convert_metasv_bed_to_vcf
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Convert MetaSV final BED to VCF",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--sample", help="Sample name", required=True)
parser.add_argument("--bed", help="MetaSV final BED", required=True)
parser.add_argument("--vcf", help="Final VCF to output", required=True)
parser.add_argument("--reference", help="Reference FASTA")
parser.add_argument("--work", help="Work directory", default="work")
parser.add_argument("--pass_only", action="store_true",
help="Output only PASS calls")
args = parser.parse_args()
convert_metasv_bed_to_vcf(bedfile=args.bed, vcf_out=args.vcf,
workdir=args.work,
sample=args.sample,
reference=args.reference,
pass_calls=args.pass_only)
| 41.8 | 75 | 0.651515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.241627 |
2f373ae8b308ab8313e26c9ce9ba782726162914
| 2,273 |
py
|
Python
|
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | 4 |
2020-08-04T10:59:10.000Z
|
2021-08-23T13:42:03.000Z
|
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | null | null | null |
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | 2 |
2021-07-20T04:49:22.000Z
|
2021-08-23T13:42:23.000Z
|
from __future__ import annotations
from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional, Set
from .page_path import PagePath, PagePathLike
class AbstractPage(ABC):
"""The base abstract page interface."""
def __init__(
self,
path: PagePathLike,
) -> None:
self._path = PagePath(path)
self._parent: Optional[AbstractPage] = None
self._children: Set[AbstractPage] = set()
@abstractproperty
def help_text(
self
) -> str:
"""The help text about this page.
Think of this as a static explanation about the page type's role within the
greater application, rather than reflecting the current state of this
particular page.
"""
@abstractproperty
def info_text(
self
) -> str:
"""The info text about this page.
Think of this as a more dynamic output (in contrast to :meth:`help_text`),
which reflect the current state of this page.
"""
@abstractmethod
def get_prompt(
self
) -> str:
"""Return the prompt text for this page.
This is what is shown on the application's current line, acting as the
input prompt.
"""
@property
def path(
self
) -> PagePath:
"""This page's path."""
return self._path
@property
def parent(
self
) -> Optional[AbstractPage]:
"""The parent page of this page."""
return self._parent
@parent.setter
def parent(
self,
new_parent: AbstractPage
) -> None:
self._parent = new_parent
@property
def children(
self
) -> Set[AbstractPage]:
"""The immediate children of this page."""
return self._children
def __hash__(
self
) -> int:
return hash(self._path)
def __eq__(
self,
other: Any
) -> bool:
if not isinstance(other, AbstractPage):
return NotImplemented
return self._path == other._path
def __str__(
self
) -> str:
return str(self.path)
def __repr__(
self
) -> str:
return f'<{self.__class__.__qualname__} [{self.path}]>'
| 21.647619 | 83 | 0.57985 | 2,095 | 0.921689 | 0 | 0 | 1,319 | 0.58029 | 0 | 0 | 760 | 0.33436 |
2f3740dbe908121e76457672fb1354e03d0a203a
| 3,022 |
py
|
Python
|
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
from trame import state
from trame.html import vuetify, vtk
from trame.layouts import SinglePage
from vtkmodules.vtkImagingCore import vtkRTAnalyticSource
from vtkmodules.vtkFiltersGeometry import vtkGeometryFilter
from vtkmodules.vtkRenderingCore import (
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkDataSetMapper,
vtkActor,
)
# VTK factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
import vtkmodules.vtkRenderingOpenGL2 # noqa
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
DEFAULT_RESOLUTION = 10
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
source = vtkRTAnalyticSource()
filter = vtkGeometryFilter()
filter.SetInputConnection(source.GetOutputPort())
mapper = vtkDataSetMapper()
actor = vtkActor()
mapper.SetInputConnection(filter.GetOutputPort())
actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.ResetCamera()
renderWindow.Render()
filter.Update()
_min, _max = filter.GetOutput().GetPointData().GetScalars().GetRange()
mapper.SetScalarRange(_min, _max)
actor.GetProperty().SetEdgeVisibility(1)
actor.GetProperty().SetEdgeColor(1, 1, 1)
# -----------------------------------------------------------------------------
@state.change("resolution")
def update_resolution(resolution=DEFAULT_RESOLUTION, **kwargs):
source.SetWholeExtent(
-resolution, resolution, -resolution, resolution, -resolution, resolution
)
html_view.reset_camera()
html_view.update()
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
# html_view = vtk.VtkLocalView(renderWindow)
# html_view = vtk.VtkRemoteView(renderWindow)
html_view = vtk.VtkRemoteLocalView(renderWindow, mode="local")
layout = SinglePage("Geometry export", on_ready=html_view.update)
layout.logo.click = html_view.reset_camera
layout.title.set_text("Geometry export")
with layout.toolbar as tb:
vuetify.VSpacer()
tb.add_child("{{ resolution }}")
vuetify.VSlider(
v_model=("resolution", DEFAULT_RESOLUTION),
min=10,
max=100,
step=1,
hide_details=True,
dense=True,
style="max-width: 300px",
)
vuetify.VBtn("Update", click=html_view.update)
with layout.content:
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 29.627451 | 81 | 0.617141 | 0 | 0 | 0 | 0 | 258 | 0.085374 | 0 | 0 | 844 | 0.279285 |
2f37d9b321c1b357a652919715d0a963e96430ee
| 601 |
py
|
Python
|
server/toolz_swap_app/migrations/0021_auto_20211217_2310.py
|
minerva-university/cs162-toolz-swap-service
|
d514d9b04118f26479cba71497c12dfa824c7c42
|
[
"MIT"
] | null | null | null |
server/toolz_swap_app/migrations/0021_auto_20211217_2310.py
|
minerva-university/cs162-toolz-swap-service
|
d514d9b04118f26479cba71497c12dfa824c7c42
|
[
"MIT"
] | null | null | null |
server/toolz_swap_app/migrations/0021_auto_20211217_2310.py
|
minerva-university/cs162-toolz-swap-service
|
d514d9b04118f26479cba71497c12dfa824c7c42
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-17 22:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('toolz_swap_app', '0020_auto_20211217_1402'),
]
operations = [
migrations.AddField(
model_name='listing',
name='item_image',
field=models.ImageField(blank=True, null=True, upload_to='listing_images'),
),
migrations.AddField(
model_name='listing',
name='item_image_url',
field=models.TextField(blank=True, null=True),
),
]
| 25.041667 | 87 | 0.599002 | 508 | 0.845258 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.249584 |
2f382211712726ce3bebece3524ea17b01c0cd4f
| 2,540 |
py
|
Python
|
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from ....store.models import SpecialPage
from ...views import staff_member_required
from .forms import SpecialPageForm
@staff_member_required
@permission_required('site.manage_settings')
def special_page_add(request, site_settings_pk):
special_page = SpecialPage(site_settings_id=site_settings_pk)
form = SpecialPageForm(request.POST or None, instance=special_page)
if form.is_valid():
special_page = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added special page %s') % (special_page,)
messages.success(request, msg)
return redirect('dashboard:site-details', pk=site_settings_pk)
ctx = {'form': form, 'site_settings_pk': site_settings_pk,
'special_page': special_page}
return TemplateResponse(
request, 'dashboard/store/special_pages/form.html', ctx)
@staff_member_required
@permission_required('site.manage_settings')
def special_page_edit(request, site_settings_pk, pk):
special_page = get_object_or_404(SpecialPage, pk=pk)
form = SpecialPageForm(request.POST or None, instance=special_page)
if form.is_valid():
special_page = form.save()
msg = pgettext_lazy(
'dashboard message', 'Updated special page %s') % (special_page,)
messages.success(request, msg)
return redirect('dashboard:site-details', pk=site_settings_pk)
ctx = {'form': form, 'site_settings_pk': site_settings_pk,
'special_page': special_page}
return TemplateResponse(
request, 'dashboard/store/special_pages/form.html', ctx)
@staff_member_required
@permission_required('site.manage_settings')
def special_page_delete(request, site_settings_pk, pk):
special_page = get_object_or_404(SpecialPage, pk=pk)
if request.method == 'POST':
special_page.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Removed site special page %s') %
(special_page,))
return redirect(
'dashboard:site-details', pk=site_settings_pk)
return TemplateResponse(
request, 'dashboard/store/special_pages/modal/confirm_delete.html',
{'special_page': special_page, 'site_settings_pk': site_settings_pk})
| 40.31746 | 77 | 0.715748 | 0 | 0 | 0 | 0 | 2,150 | 0.846457 | 0 | 0 | 526 | 0.207087 |
2f38dea668d3c57cb5f9fffdb2e8a23821880993
| 96 |
py
|
Python
|
pacote-download/Ex24.py
|
nkonai/Curso-em-video-Python
|
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
|
[
"MIT"
] | null | null | null |
pacote-download/Ex24.py
|
nkonai/Curso-em-video-Python
|
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
|
[
"MIT"
] | null | null | null |
pacote-download/Ex24.py
|
nkonai/Curso-em-video-Python
|
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
|
[
"MIT"
] | null | null | null |
cidade = str(input('Qual cidade voce mora?'))
print(cidade.strip().lower().startswith('santo'))
| 32 | 49 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.322917 |
2f3a828848ad3ed2bdecff21215f6a9e0ea54453
| 8,417 |
py
|
Python
|
src/salt_finder_charts/standard_finder_charts.py
|
saltastroops/salt_finder_charts
|
f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de
|
[
"MIT"
] | null | null | null |
src/salt_finder_charts/standard_finder_charts.py
|
saltastroops/salt_finder_charts
|
f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de
|
[
"MIT"
] | null | null | null |
src/salt_finder_charts/standard_finder_charts.py
|
saltastroops/salt_finder_charts
|
f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from typing import BinaryIO, Generator, Optional, Tuple
import astropy.units as u
import pytz
from astropy.units import Quantity
from salt_finder_charts.image import Survey, SurveyImageService
from salt_finder_charts.mode import (
Mode,
ModeDetails,
ImagingModeDetails,
LongslitModeDetails,
SlotModeDetails,
MOSModeDetails,
)
from salt_finder_charts.output import output_pdf, output_png, output_svg, OutputFormat
from salt_finder_charts.util import (
MagnitudeRange,
MOSMask,
julian_day_start,
julian_day_end,
)
from salt_finder_charts import finder_charts
from salt_finder_charts.ephemerides import (
HorizonsEphemerisService,
ConstantEphemerisService,
EphemerisService,
)
TimeInterval = Tuple[datetime, datetime]
def standard_finder_charts(
# arguments which are always required
mode: Mode,
output_format: OutputFormat,
# time interval
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
# ephemerides
ra: Optional[Quantity] = None,
dec: Optional[Quantity] = None,
min_magnitude: Optional[float] = None,
max_magnitude: Optional[float] = None,
bandpass: Optional[str] = None,
horizons_id: Optional[str] = None,
horizons_stepsize: Optional[Quantity] = None,
# image
survey: Survey = Survey.POSS2UKSTU_RED,
# instrument mode details
position_angle: Optional[Quantity] = None,
slitwidth: Optional[Quantity] = None,
mos_mask_rsmt: Optional[BinaryIO] = None,
# miscellaneous
basic_annotations: bool = False,
title: Optional[str] = None,
) -> Generator[BinaryIO, None, None]:
"""
Create standard SALT finder charts.
Some of the parameters are mutually exclusive. For example, it does mot make sense
to specify a slit width if you generate finding charts for imaging mode. In some
cases such combinations will raise an error, but in others some of the parameters
may just be ignored.
If no start time is given, the beginning of the current Julian day is assumed. If no
end time is given, the end of the current Julian day is assumed.
Parameters
----------
mode : Mode
Observation mode (such as imaging or MOS).
basic_annotations : bool
Whether only basic annotations should be added to the finder chart.
output_format : OutputFormat
Output format (such as PDF) to use for the generated finder charts.
start_time : datetime
Start time from which to generate finder charts.
end_time : datetime
End time until which to generate finder charts.
ra : Quantity
Right ascension of the finder chart center.
dec : Quantity
Declination of the finder chart center.
min_magnitude : float
Minimum magnitude of the target.
max_magnitude L: float
Maximum magnitude of the target.
bandpass : str
Bandpass (such as V) for the magnitudes,
horizons_id : str
Identifier for a target in the Horizons database.
horizons_stepsize : Quantity
Time between ephemerides queried from the Horizons service. The default is 5
minutes.
survey : Survey
The image survey from which the finder chart image shall be taken.
position_angle : Quantity
The position angle.
slitwidth : Quantity
The width of the longslit, as an angle.
mos_mask_rsmt : BinaryIO
Input stream containing an RSMT file for a MOS setup.
title : str
Title for the finder chart.
Returns
-------
Generator of BinaryIO
The finder charts as input streams.
"""
# time interval
# get default start and end time if need be
now = datetime.now(pytz.utc)
if not start_time:
start_time = julian_day_start(now)
if not end_time:
end_time = julian_day_end(now)
# ensure there are timezones
if start_time.tzinfo is None:
raise ValueError("The start time must be timezone-aware.")
if end_time.tzinfo is None:
raise ValueError("The end time must be timezone aware.")
# ephemerides
mos_mask: Optional[MOSMask] = None
if mode == Mode.MOS:
if mos_mask_rsmt is None:
raise ValueError(
"A RSMT file must be supplied if a finding chart is generated for MOS mode."
)
if ra or dec or position_angle:
raise ValueError(
"You must not supply a right ascension, declination or position angle in MOS mode, as they are taken from the MOS mask definition."
)
mos_mask = MOSMask(mos_mask_rsmt)
ra = mos_mask.right_ascension
dec = mos_mask.declination
position_angle = mos_mask.position_angle
if horizons_id:
# get ephemerides from Horizons
if ra is not None or dec is not None:
raise ValueError(
"No right ascension or declination must be supplied if a Horizons identifier is supplied."
)
if horizons_stepsize is None:
horizons_stepsize = 5 * u.minute
ephemeris_service: EphemerisService = HorizonsEphemerisService(
object_id=horizons_id,
start_time=start_time - timedelta(days=2),
end_time=end_time + timedelta(days=2),
stepsize=horizons_stepsize,
)
else:
# use ephemerides for a non-sidereal target
if ra is None:
raise ValueError("The right ascension is missing.")
if dec is None:
raise ValueError("The declination is missing.")
if min_magnitude is not None and (max_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a maximum magnitude and bandpass if you supply a minimum magnitude."
)
if max_magnitude is not None and (min_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a minimum magnitude and bandpass if you supply a maximum magnitude."
)
if bandpass is not None and (min_magnitude is None or max_magnitude is None):
raise ValueError(
"You must supply a minimum and maximum magnitude if you supply a bandpass."
)
magnitude_range: Optional[MagnitudeRange] = None
if (
min_magnitude is not None
and max_magnitude is not None
and bandpass is not None
):
magnitude_range = MagnitudeRange(
min_magnitude=min_magnitude,
max_magnitude=max_magnitude,
bandpass=bandpass,
)
ephemeris_service = ConstantEphemerisService(
ra=ra, dec=dec, magnitude_range=magnitude_range
)
# image
image_service = SurveyImageService(survey=survey)
# mode details
if mode is None:
raise ValueError("You must specify an instrument mode.")
if mode == Mode.IMAGING or mode == Mode.HRS:
mode_details: ModeDetails = ImagingModeDetails(position_angle)
elif mode == Mode.SLOT:
mode_details = SlotModeDetails(pa=position_angle)
elif mode == Mode.LONGSLIT:
if slitwidth is None:
raise ValueError(
"A slit width is required if a finding chart is generated for longslit mode."
)
mode_details = LongslitModeDetails(
slitwidth=slitwidth, pa=position_angle, center_ra=ra, center_dec=dec
)
elif mode == Mode.MOS:
if not mos_mask:
raise ValueError("No MOS mask has been supplied.")
mode_details = MOSModeDetails(mos_mask)
else:
raise ValueError(f"Mode unsupported: {mode.value}")
# output
if output_format == OutputFormat.PDF:
output = output_pdf
elif output_format == OutputFormat.PNG:
output = output_png
elif output_format == OutputFormat.SVG:
output = output_svg
else:
raise ValueError(f"Output format unsupported: {output_format.value}")
# generate the finder charts
return finder_charts(
mode_details=mode_details,
start_time=start_time,
end_time=end_time,
ephemeris_service=ephemeris_service,
image_service=image_service,
title=title,
basic_annotations=basic_annotations,
output=output,
)
| 34.63786 | 147 | 0.661756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,244 | 0.38541 |
2f3aae6740fa544f6fcbafd5b09e5b47c616d5d2
| 2,449 |
py
|
Python
|
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import sys
from datetime import datetime
import satstac
from satstac import Catalog
import satstac.landsat as landsat
from .version import __version__
# quiet loggers
logging.getLogger('urllib3').propagate = False
logging.getLogger('requests').propagate = False
logger = logging.getLogger(__name__)
def parse_args(args):
desc = 'sat-stac-landsat (v%s)' % __version__
dhf = argparse.ArgumentDefaultsHelpFormatter
parser0 = argparse.ArgumentParser(description=desc)
pparser = argparse.ArgumentParser(add_help=False)
pparser.add_argument('--version', help='Print version and exit', action='version', version=__version__)
pparser.add_argument('--log', default=2, type=int,
help='0:all, 1:debug, 2:info, 3:warning, 4:error, 5:critical')
# add subcommands
subparsers = parser0.add_subparsers(dest='command')
# command 1
parser = subparsers.add_parser('ingest', parents=[pparser], help='Ingest records into catalog', formatter_class=dhf)
parser.add_argument('catalog', help='Catalog that contains the Collection')
valid_date = lambda d: datetime.strptime(d, '%Y-%m-%d').date()
parser.add_argument('-c', '--collections', help='Collection to ingest (pre, c1, or all)', default='all')
parser.add_argument('--realtime', help='Also ingest realtime data', action='store_true', default=False)
parser.add_argument('--missing', help='Only ingest missing items', action='store_true', default=False)
parser.add_argument('--start', help='Start date of ingestion', default=None, type=valid_date)
parser.add_argument('--end', help='End date of ingestion', default=None, type=valid_date)
# command 2
#parser = subparsers.add_parser('cmd2', parents=[pparser], help='Command 2', formatter_class=dhf)
# parser.add_argument()
# turn Namespace into dictinary
parsed_args = vars(parser0.parse_args(args))
return parsed_args
def cli():
args = parse_args(sys.argv[1:])
logging.basicConfig(stream=sys.stdout, level=args.pop('log') * 10)
cmd = args.pop('command')
if cmd == 'ingest':
cat = Catalog.open(args['catalog'])
landsat.add_items(cat, collections=args['collections'], realtime=args['realtime'],
missing=args['missing'], start_date=args['start'], end_date=args['end'])
elif cmd == 'cmd2':
print(cmd)
if __name__ == "__main__":
cli()
| 37.106061 | 120 | 0.694978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 778 | 0.317681 |
2f3ae02cd059cdf4b269302e970b02d87301e8cf
| 3,005 |
py
|
Python
|
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | 5 |
2020-12-20T14:50:31.000Z
|
2021-09-20T06:39:18.000Z
|
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | null | null | null |
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | 3 |
2020-12-20T18:18:09.000Z
|
2021-11-14T09:42:07.000Z
|
import sqlite3
import random
import string
import re
import sys
# domain name
args = sys.argv
if len(args)==2:
if args[1] == 'localhost':
domain = "localhost:5000/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
# URL verification regex
regex = r"""(?i)\b((?:https?://|www\d{0,3}[.]{1}|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"""
# check_same_thread=False to disable thread sync
conn = sqlite3.connect("url.db", check_same_thread=False)
def check_if_exists(id: str, flag: bool):
"""
returns true if record exists
params:
id: data to check in db
flag: True if shortened URL, else False
returns:
True if record exists else False
"""
if flag:
query = f'''SELECT COUNT(*) FROM URLS WHERE ID="{id}";'''
else:
query = f'''SELECT COUNT(*) FROM URLS WHERE ORIGINAL="{id}";'''
db_res = conn.execute(query)
if [i[0] for i in db_res] == [0]:
return False
return True
def insert_data(id: str, og: str, value: int):
"""
Insert data in db
Params:
id: short url(primary key)
og: original url
value: number of visit
returns:
True if successful else False
"""
query = f'''INSERT INTO URLS (ID, ORIGINAL, VISITS) VALUES ("{str(id)}", "{str(og)}", {int(value)});'''
db_res = conn.execute(query)
conn.commit()
if not db_res:
return False
return True
def get_original_url(id: str, flag: bool):
"""
returns record data if exists
params:
id: shortened or original url
flag: True for shortened id else False
returns:
False if data doesn't exist else return data
"""
if flag:
query = f'''SELECT ORIGINAL FROM URLS WHERE ID="{str(id)}";'''
else:
query = f'''SELECT ID FROM URLS WHERE ORIGINAL="{str(id)}";'''
db_res = conn.execute(query)
url = [i[0] for i in db_res]
if url:
return url[0]
return False
def get_valid_combination(url: str)-> str:
"""
finds and returns shortened URL
params:
url: original url
returns:
False if operation failed else return whole shortened link
"""
res = re.findall(regex, url)
url = re.sub(r"^(http://|https://){0,1}(www.|ww.|w.){0,1}", "", url)
data = False
if res:
if not check_if_exists(url, False):
while 1:
shrt = ''.join(random.choice(string.ascii_letters) for _ in range(8))
if not check_if_exists(shrt, True):
if not insert_data(shrt, url, 0):
return False
data = "".join([domain, shrt])
break
else:
shrt = get_original_url(url, False)
data = "".join([domain, shrt])
return data
| 28.084112 | 200 | 0.547088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,525 | 0.505804 |
2f3e4585789dca549a8fbdd15c298b8c2bf0a041
| 1,954 |
py
|
Python
|
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
from turtle import Turtle
from scoreboard import Scoreboard
WIDTH = 800
HEIGHT = 600
START_SPEED = 0.1
class Ball(Turtle):
"""Class for creating and moving the ball. Extends Turtle"""
def __init__(self) -> None:
super().__init__()
self.y_trajectory = 10
self.x_trajectory = 10
self.shape('circle')
self.penup()
self.shapesize(stretch_len=1,stretch_wid=1)
self.color('white')
self.move_speed = START_SPEED
def move(self):
"""Move the ball forward by x and y trajectories"""
new_x = self.xcor() + self.x_trajectory
new_y = self.ycor() + self.y_trajectory
self.goto(new_x,new_y)
def detect_wall_collision(self):
"""Detect a wall colision, reverse y trajectory to "bounce" the ball"""
if self.ycor() >= HEIGHT/2 - 15 or self.ycor() <= HEIGHT/-2 + 15:
self.y_trajectory *= -1
def detect_paddle_collision(self, r_paddle, l_paddle):
"""Detect a collision with the paddles
If collision, reverse x trajectory"""
if ((self.distance(r_paddle) < 50 and self.xcor() > WIDTH/2 -60)
or (self.distance(l_paddle) < 50 and self.xcor() < WIDTH/-2 +60) ):
self.x_trajectory *= -1
self.move_speed *= 0.9
def detect_goal(self,score:Scoreboard):
"""Detect a collision with walls. If collision, then goal.
Reset ball to startign values, move in opposite of previous x trajectory """
if self.xcor() > WIDTH/2 -20:
print("Left player scored a goal")
score.left_point()
self.goto(0,0)
self.move_speed = START_SPEED
self.x_trajectory *=-1
if self.xcor() < WIDTH/-2 +20:
print("Right player scored a goal")
score.right_point()
self.goto(0,0)
self.move_speed = START_SPEED
self.x_trajectory *=-1
| 34.892857 | 84 | 0.590583 | 1,848 | 0.945752 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.245138 |
2f3f7fbb2e9c92a49ae40445269e03dc87f8856d
| 185 |
py
|
Python
|
tsai/data/basics.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 1 |
2022-01-02T18:21:27.000Z
|
2022-01-02T18:21:27.000Z
|
tsai/data/basics.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 31 |
2021-12-01T23:08:51.000Z
|
2021-12-29T02:59:49.000Z
|
tsai/data/basics.py
|
radi-cho/tsai
|
32f24d55ee58df1a14d1e68618f230097a266c77
|
[
"Apache-2.0"
] | 1 |
2022-03-13T16:47:04.000Z
|
2022-03-13T16:47:04.000Z
|
from .validation import *
from .preparation import *
from .external import *
from .core import *
from .preprocessing import *
from .transforms import *
from .mixed_augmentation import *
| 26.428571 | 33 | 0.778378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2f3f9137757f79baedb08f68f1da6c337e1ee99a
| 703 |
py
|
Python
|
push_notifications/migrations/0002_auto_20180408_1513.py
|
walison17/pulso-api
|
b9edfc3f6042676dbdb50d7efcdb461a19ea90ed
|
[
"MIT"
] | null | null | null |
push_notifications/migrations/0002_auto_20180408_1513.py
|
walison17/pulso-api
|
b9edfc3f6042676dbdb50d7efcdb461a19ea90ed
|
[
"MIT"
] | null | null | null |
push_notifications/migrations/0002_auto_20180408_1513.py
|
walison17/pulso-api
|
b9edfc3f6042676dbdb50d7efcdb461a19ea90ed
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-04-08 15:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push_notifications', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='is_active',
),
migrations.AddField(
model_name='device',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='device',
name='device_type',
field=models.CharField(choices=[(0, 'ios'), (1, 'android')], default=1, max_length=10),
),
]
| 25.107143 | 99 | 0.5633 | 612 | 0.870555 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.217639 |
2f436e86cdf8ffd5b6c159aa475cc3ce92d884bf
| 50 |
py
|
Python
|
app/api/config.py
|
stdevelopr/Jtray
|
287a4be1e26b2dab372323cc0bd8df1f8689fd97
|
[
"MIT"
] | null | null | null |
app/api/config.py
|
stdevelopr/Jtray
|
287a4be1e26b2dab372323cc0bd8df1f8689fd97
|
[
"MIT"
] | 1 |
2020-05-01T20:37:34.000Z
|
2020-05-01T20:37:34.000Z
|
app/api/config.py
|
stdevelopr/JTray
|
287a4be1e26b2dab372323cc0bd8df1f8689fd97
|
[
"MIT"
] | null | null | null |
jira_user_url = ""
jira_email = ""
jira_token = ""
| 16.666667 | 18 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.12 |
2f43d99fa4ec9d66bba52027500997441d643a8e
| 1,216 |
py
|
Python
|
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | 1 |
2018-08-30T20:29:17.000Z
|
2018-08-30T20:29:17.000Z
|
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | null | null | null |
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | null | null | null |
import subprocess, re, os
from baseq.utils.runcommand import run_it, run_generator
import pandas as pd
import random
"""
baseq dev bed ./bed
"""
import click, os, sys
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
class BEDFILE:
def __init__(self, path):
self.bed = pd.read_table(path, usecols=range(3), names=['chr', 'start', 'end'], comment='@', converters={'chr':str})
self.stats()
def stats(self):
lengths = []
for index, row in self.bed.iterrows():
length = row['end'] - row['start']
lengths.append(length)
self.length = sum(lengths)
self.counts = len(lengths)
print("[info] Intervels {} Length {}.".format(self.counts, self.length))
def sampling(self, numbers=100):
df_s = self.bed.sample(n=numbers)
return df_s.values.tolist()
def sample_split_files(self, lines=100, files=10):
paths = []
for x in range(files):
path = "sample.{}.bed".format(x)
paths.append(path)
self.bed.sample(n=lines).to_csv(path, index=False, sep="\t", header=False)
return paths
| 31.179487 | 124 | 0.612664 | 919 | 0.755757 | 0 | 0 | 67 | 0.055099 | 0 | 0 | 127 | 0.104441 |
2f44190ef14e633a5b67ab12f51b43692438c0da
| 855 |
py
|
Python
|
tests/unit/test_iostatic.py
|
Rogdham/python-xz
|
f53266dae8d4f7fcc74cd53222f22105e40d5112
|
[
"MIT"
] | 3 |
2021-07-13T16:06:38.000Z
|
2022-03-04T22:52:58.000Z
|
tests/unit/test_iostatic.py
|
Rogdham/python-xz
|
f53266dae8d4f7fcc74cd53222f22105e40d5112
|
[
"MIT"
] | 3 |
2021-09-19T09:48:35.000Z
|
2022-01-09T15:38:48.000Z
|
tests/unit/test_iostatic.py
|
Rogdham/python-xz
|
f53266dae8d4f7fcc74cd53222f22105e40d5112
|
[
"MIT"
] | null | null | null |
from io import UnsupportedOperation
import pytest
from xz.io import IOStatic
def test_read() -> None:
static = IOStatic(b"abcdefghij")
# read all
static.seek(0)
assert static.read() == b"abcdefghij"
static.seek(4)
assert static.read() == b"efghij"
# read partial
static.seek(6)
assert static.read(3) == b"ghi"
assert static.read(3) == b"j"
assert static.read(3) == b""
assert static.read(3) == b""
def test_write() -> None:
with IOStatic(b"abc") as static:
assert static.writable() is False
static.seek(3)
with pytest.raises(UnsupportedOperation):
static.write(b"def")
def test_truncate() -> None:
with IOStatic(b"abc") as static:
assert static.writable() is False
with pytest.raises(UnsupportedOperation):
static.truncate()
| 22.5 | 49 | 0.625731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.108772 |
2f4660a8cf58761bb602bec1315943879f761718
| 4,264 |
py
|
Python
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 2 |
2015-04-28T00:35:21.000Z
|
2016-02-11T19:31:15.000Z
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 9 |
2015-02-02T11:24:23.000Z
|
2017-12-29T07:49:07.000Z
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
__init__.py
"""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, request, jsonify, render_template, make_response
from classes.database import db
from config import DefaultConfig
from classes import views
#from classes import models
from classes import oauth
__all__ = ['create_app', 'getDBInstance']
DEFAULT_APP_NAME = __name__
DEFAULT_MODULES = (
(views.frontend, ''),
(views.api, '/api'),
(views.user, '/users'),
(views.context, '/contexts'),
(views.sweet, '/sweets'),
(views.app, '/apps'),
(views.Oauth, '/oauth')
)
def create_app(config=None, app_name=None, modules=None):
if app_name is None:
app_name = DEFAULT_APP_NAME
if modules is None:
modules = DEFAULT_MODULES
app = Flask(app_name)
configure_app(app, config)
configure_logging(app)
configure_errorhandlers(app)
configure_extensions(app)
#configure_beforehandlers(app)
configure_modules(app, modules)
return app
def configure_app(app, config):
app.config.from_object(DefaultConfig())
if config is not None:
app.config.from_object(config)
app.config.from_envvar('APP_CONFIG', silent=True)
def configure_modules(app, modules):
for module, url_prefix in modules:
app.register_module(module, url_prefix=url_prefix)
def configure_extensions(app):
db.init_app(app)
db.app = app
oauth.init_app(app)
# return the current db instance
# TODO: is this needed so much?
def getDBInstance():
return db
def configure_errorhandlers(app):
if app.testing:
return
# TODO: with all these request can we send back the respective HTTP status
# codes instead of 200?
@app.errorhandler(404)
def not_found(error):
response = make_response()
response.status_code = 404
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/404.html')
return response
@app.errorhandler(403)
def forbidden(error):
response = make_response()
response.status_code = 403
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/403.html')
return response
@app.errorhandler(401)
def unauthorized(error):
response = make_response()
response.status_code = 401
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/401.html')
return response
@app.errorhandler(400)
def bad_request(error):
response = make_response()
response.status_code = 400
# Check if we have any custom error messages
#if g.error:
# print 'g.error:'
# print g.error
# error = g.error
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/400.html', error=error)
return response
@app.errorhandler(500)
def server_error(error):
response = make_response()
response.status_code = 500
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/500.html')
return response
def configure_logging(app):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
# Also error can be sent out via email. So we can also have a SMTPHandler?
log_file = os.path.join(os.path.dirname(__file__), '..',
app.config['LOG_FILE'])
max_size = 1024 * 1024 * 20 # Max Size for a log file: 20MB
log_handler = RotatingFileHandler(log_file, maxBytes=max_size,
backupCount=10)
if 'LOG_LEVEL' in app.config:
log_level = app.config['LOG_LEVEL'] or 'ERROR'
else:
log_level = 'ERROR'
log_handler.setLevel(log_level)
log_handler.setFormatter(formatter)
app.logger.addHandler(log_handler)
| 24.090395 | 78 | 0.633912 | 0 | 0 | 0 | 0 | 1,667 | 0.390947 | 0 | 0 | 780 | 0.182927 |
2f46d633a48c16504cc0737a6f08d56b6c8d1caf
| 2,313 |
py
|
Python
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 4 |
2018-12-04T23:33:46.000Z
|
2021-12-07T17:33:27.000Z
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 17 |
2018-12-12T23:32:09.000Z
|
2020-01-04T15:50:31.000Z
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import fileinput
import sys
DAY=12
class Plants():
def __init__(self, in_lines):
self.generation = 0
lines = iter(in_lines)
initial_state = next(lines).replace('initial state: ', '')
self.pots = {i:s for i, s in enumerate(initial_state)
if s == '#'
}
self.rules = {
r.split('=>')[0].strip():r.split('=>')[1].strip()
for r in lines if r and r.split('=>')[1].strip() == '#'
}
def gen(self):
pots_new = {}
for p in range(min(self.pots.keys())-4, max(self.pots.keys())+1+4):
key = '{}{}{}{}{}'.format(
'#' if p-2 in self.pots else '.',
'#' if p-1 in self.pots else '.',
'#' if p-0 in self.pots else '.',
'#' if p+1 in self.pots else '.',
'#' if p+2 in self.pots else '.',
)
if key in self.rules:
pots_new[p] = '#'
pots_new_str = ''.join(['#' if i in pots_new else '.'
for i in range(min(pots_new.keys()), max(pots_new.keys())+1)])
self.pots = pots_new
self.generation += 1
def print_pots(self):
return ''.join(['#' if i in self.pots else '.'
#for i in range(min(self.pots.keys()), max(self.pots.keys())+1)])
for i in range(min(-3, *self.pots.keys()), max(35, *self.pots.keys())+1)])
def sum_pots(self):
return sum(self.pots.keys())
@pytest.fixture
def example_result():
with open('12.testresult', 'r') as in_file:
return in_file.read().split('\n')
@pytest.fixture
def example_input():
with open('12.input.test', 'r') as in_file:
return in_file.read().split('\n')
def test_answer(example_input, example_result):
plants = Plants(example_input)
print('Rules: ',plants.rules)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
assert '{:2}: {}'.format(i, plants.print_pots()) == example_result[2+i]
assert plants.sum_pots() == 325
if __name__ == '__main__':
in_lines = [l.strip() for l in fileinput.input(sys.argv[1:] or '{:02}.input'.format(DAY))]
plants = Plants(in_lines)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
print('Answer: {}'.format(plants.sum_pots()))
| 31.684932 | 92 | 0.587981 | 1,268 | 0.548206 | 0 | 0 | 241 | 0.104194 | 0 | 0 | 349 | 0.150886 |
2f47e0e4afa3b0ef06fd5508f958beec6b26eb72
| 826 |
py
|
Python
|
03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/PySpark-and-AWS-Master-Big-Data-with-PySpark-and-AWS
|
28726ada2a8f03557180b472eecf3efc72cab5a2
|
[
"MIT"
] | 3 |
2021-09-29T04:11:44.000Z
|
2021-12-21T06:28:48.000Z
|
Part 3/Code/03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping
|
8993a8ee10534a29aeee18fa91bdc48e3093bec5
|
[
"MIT"
] | null | null | null |
Part 3/Code/03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping
|
8993a8ee10534a29aeee18fa91bdc48e3093bec5
|
[
"MIT"
] | 5 |
2021-11-17T15:47:36.000Z
|
2022-03-09T05:13:09.000Z
|
# Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
from pyspark.sql.functions import sum,avg,max,min,mean,count
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
# 1
df.groupBy("course").count().show()
df.groupBy("course").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 2
df.groupBy("course", "gender").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 3
df.groupBy("course", "gender").agg(sum("marks").alias("total_marks")).show()
# COMMAND ----------
# 4
df.groupBy("course", "age").agg(min("marks"), max("marks"), avg("marks")).show()
| 25.8125 | 99 | 0.659806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.422518 |
2f494c01c823bdfd4b8fa27dc3e019de599fda15
| 897 |
py
|
Python
|
queues/list_queue/queue.py
|
joeb15/202Problems
|
a8ab3dc49cb899b640cc836863e28e52fb978466
|
[
"MIT"
] | null | null | null |
queues/list_queue/queue.py
|
joeb15/202Problems
|
a8ab3dc49cb899b640cc836863e28e52fb978466
|
[
"MIT"
] | null | null | null |
queues/list_queue/queue.py
|
joeb15/202Problems
|
a8ab3dc49cb899b640cc836863e28e52fb978466
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
A queue is a first-in first-out type of data structure
For this to work, you must be able to enqueue (add) items to the queue, dequeue (remove) items from the queue
"""
class List_Queue:
"""
Creates a List Queue
"""
def __init__(self, size):
self.size = size
self.num_items = 0
self.front = 0
self.end = 0
self.list = [None for i in range(self.size)]
"""
returns whether the queue is full or not
"""
def is_full(self):
"""
Method will add a new items to the end of the queue
return True if successful
return False if not enough space in queue
"""
def enqueue(self, item):
"""
Method will remove the first item from the queue and return it
Raises an IndexError if no items are in the queue
"""
def dequeue(self):
| 19.933333 | 109 | 0.591973 | 690 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.635452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.