max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
seurat/generation/maya/seurat_rig.py | Asteur/vrhelper | 819 | 12793751 | <reponame>Asteur/vrhelper
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a JSON manifest and a Maya camera rig for Seurat.
Example usage:
CreateRig(headbox_min=[-0.5, -0.5, -0.5],
headbox_max=[0.5, 0.5, 0.5],
num_view_groups=16, # Should be a power of two.
image_size=1024,
near_clip=0.1,
far_clip=100.0,
depth_type='EYE_Z',
depth_channel_name='A',
color_file_path_pattern='%s_color.%04d.exr',
depth_file_path_pattern='%s_depth.%04d.exr',
json_file_path='./manifest.json')
"""
import json
import math
import operator
def ProjectPoint(matrix, point):
"""Projects a 3D point using a 4x4 matrix.
Args:
matrix: A 4x4 matrix represented as a list of 16 floats.
point: A 3D point represented as a list of 3 floats.
Returns:
The projected point, represented as a list of 3 floats.
"""
result_hom = [0.0, 0.0, 0.0, 0.0]
for row in xrange(4):
for col in xrange(3):
result_hom[row] += matrix[4 * row + col] * point[col]
# point.w = 1.0 implicitly
result_hom[row] += matrix[4 * row + 3]
w = result_hom[3]
return map(operator.div, result_hom[0:3], [w, w, w])
def WorldFromEyeMatrixFromFace(face_name):
"""Creates world-from-eye matrix for the given face of a cube map.
Args:
face_name: Name of the face. Must be one of 'front', 'back', 'left',
'right', 'bottom', 'top'.
Returns:
The world-from-eye matrix for the given face as a list in row-major order.
Raises:
ValueError: face_name is not the name of a cube map face.
"""
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
if face_name is 'front':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'back':
return [-1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'left':
return [ 0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
-1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'right':
return [ 0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'bottom':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
elif face_name is 'top':
return [ 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0] # pyformat: disable
else:
raise ValueError('Invalid face_name')
def CubeFaceProjectionMatrix(near, far):
"""Creates a cube-face 90 degree FOV projection matrix.
The created matrix is an OpenGL-style projection matrix.
Args:
near: Eye-space Z position of the near clipping plane.
far: Eye-space Z position of the far clipping plane.
Returns:
The clip-from-eye matrix as a list in row-major order.
Raises:
ValueError: Invalid clip planes. near <= 0.0 or far <= near.
"""
if near <= 0.0:
raise ValueError('near must be positive.')
if far <= near:
raise ValueError('far must be greater than near.')
left = -near
right = near
bottom = -near
top = near
a = (2.0 * near) / (right - left)
b = (2.0 * near) / (top - bottom)
c = (right + left) / (right - left)
d = (top + bottom) / (top - bottom)
e = (near + far) / (near - far)
f = (2.0 * near * far) / (near - far)
# pylint: disable=bad-whitespace
return [a, 0.0, c, 0.0,
0.0, b, d, 0.0,
0.0, 0.0, e, f,
0.0, 0.0, -1.0, 0.0] # pyformat: disable
def RadicalInverse(a, base):
"""Computes the radical inverse of |a| in base |base|.
Args:
a: The integer number for which the radical inverse is computed.
base: The radical inverse is computed in this base (integer).
Returns:
The radical inverse as a float in the range [0.0, 1.0).
"""
reversed_digits = 0
base_n = 1
# Compute the reversed digits, base b.
while a > 0:
next_a = a / base
digit = a - next_a * base
reversed_digits = reversed_digits * base + digit
base_n *= base
a = next_a
# Only when done are the reversed digits divided by b^n.
return min(reversed_digits / float(base_n), 1.0)
def PointInBox(box_min, box_max, sample):
"""Computes a sample point inside a box with arbitrary number of dimensions.
Args:
box_min: A list of floats representing the lower bounds of the box.
box_max: A list of floats representing the upper bounds of the box.
sample: A list of floats in the range [0.0, 1.0] representing the
relative sample position in the box.
Returns:
A list of floats, representing the absolute position of the sample in
the box.
"""
delta = map(operator.sub, box_max, box_min)
offset = map(operator.mul, delta, sample)
position = map(operator.add, box_min, offset)
return position
def Distance(point_a, point_b):
"""Computes the euclidean distance between two points.
The points can have an aribtrary number of dimensions.
Args:
point_a: A list of numbers representing the first point.
point_b: A list of numbers representing the second point.
Returns:
The euclidean distance as a float.
"""
delta = map(operator.sub, point_a, point_b)
delta_sqr = map(operator.mul, delta, delta)
distance_sqr = 0.0
for element in delta_sqr:
distance_sqr += element
return math.sqrt(distance_sqr)
def RotateCamera(camera_name, face_name):
"""Rotates a Maya camera node to look at a given cube map face.
Args:
camera_name: Name of the Maya camera's transform node.
face_name: Name of the cube map face.
Raises:
ValueError: face is not a valid cube map face name.
"""
# Disable the undefined-variable lint error, because the Maya package is not
# defined in the environment where the linter runs.
#
# pylint: disable=undefined-variable
if face_name is 'front':
pass
elif face_name is 'back':
maya.cmds.setAttr(camera_name + '.rotateY', 180)
elif face_name is 'left':
maya.cmds.setAttr(camera_name + '.rotateY', 90)
elif face_name is 'right':
maya.cmds.setAttr(camera_name + '.rotateY', -90)
elif face_name is 'bottom':
maya.cmds.setAttr(camera_name + '.rotateX', -90)
elif face_name is 'top':
maya.cmds.setAttr(camera_name + '.rotateX', 90)
else:
raise ValueError('Invalid face_name')
def GenerateCameraPositions(headbox_min, headbox_max, num_cameras):
"""Generates camera positions in a headbox.
Camera posittions are computed as a 3D Hammersley point set. The points are
transformed such that their bounding box is exactly equal to the headbox. The
points are then sorted according to distance to the headbox center. Finally,
the point that is closest to the headbox center is replaced by the headbox
center itself to include a view from the reference camera.
Args:
headbox_min: The lower bounds of the headbox as a list of 3 floats.
headbox_max: The upper bounds of the headbox as a list of 3 floats.
num_cameras: The number of cameras to generate. Should be a power of two.
Returns:
A list of 3D points (each a list of 3 floats), representing the positions
of the generated cameras.
Raises:
ValueError: num_cameras is not positive.
"""
if num_cameras <= 0:
raise ValueError('num_cameras must be positive')
if num_cameras == 1:
# Use the headbox center if a single camera position is requested.
return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])]
samples = []
max_sample = [0.0, 0.0, 0.0]
for i in xrange(num_cameras):
# Use a 3D Hammersley point set for the samples.
sample = [
i / float(num_cameras),
RadicalInverse(i, 2),
RadicalInverse(i, 3)
]
for dim in xrange(3):
max_sample[dim] = max(max_sample[dim], sample[dim])
samples.append(sample)
headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
camera_positions = []
for sample in samples:
# Normalize the samples so that their bounding box is the unit cube.
for dim in xrange(3):
sample[dim] /= max_sample[dim]
position = PointInBox(headbox_min, headbox_max, sample)
camera_positions.append(position)
sorted_positions = sorted(
camera_positions, key=lambda point: Distance(point, headbox_center))
# Replace the point closest to the headbox center by the headbox center
# itself.
sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
return sorted_positions
def CreateCameras(camera_positions, near_clip, far_clip):
"""Creates and animates the Maya cameras for the rig.
Six cameras, one for each cube face, are generated. Each camera is configured
with a square viewport and the given near and far clipping planes. This method
also adjusts the Maya timeline to exactly contain the frames for the rig
animation. Each of the six cameras will get one keyframe per camera position.
Args:
camera_positions: A list of 3D points (each a list of 3 floats) representing
the positions of the cameras.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
"""
# Disable the undefined-variable lint error, because the Maya package is not
# defined in the environment where the linter runs.
#
# pylint: disable=undefined-variable
start_time = 0
end_time = len(camera_positions) - 1
maya.cmds.playbackOptions(
animationStartTime=start_time,
animationEndTime=end_time,
minTime=start_time,
maxTime=end_time)
for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:
# Create a cube face camera and rotate it.
camera_name = maya.cmds.camera(
name='seurat_' + face,
focalLength=12.7,
horizontalFilmAperture=1,
verticalFilmAperture=1,
nearClipPlane=near_clip,
farClipPlane=far_clip)[0]
RotateCamera(camera_name, face)
# Set translation keyframes for all positions on this camera.
for view_group_index, position in enumerate(camera_positions):
maya.cmds.setKeyframe(
camera_name, at='translateX', t=view_group_index, v=position[0])
maya.cmds.setKeyframe(
camera_name, at='translateY', t=view_group_index, v=position[1])
maya.cmds.setKeyframe(
camera_name, at='translateZ', t=view_group_index, v=position[2])
def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip,
far_clip, depth_type, depth_channel_name,
color_file_path_pattern, depth_file_path_pattern):
"""Creates and returns the view groups for the JSON output.
Args:
headbox_center: Center of the headbox as a list of 3 floats.
camera_positions: Positions of the cameras as a list of 3D points (each a
list of 3 floats).
image_size: Size of the output images in pixels.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
depth_type: A string representing the depth encoding. Valid values are:
'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]),
'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf);
Arnold's encoding),
'RAY_DEPTH' (distance to eye).
depth_channel_name: Name of the depth channel in the output file.
Commonly used values are 'R' (VRay) and 'A' (Arnold).
color_file_path_pattern: File name pattern for color images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
depth_file_path_pattern: File name pattern for depth images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
Returns:
A dictionary representing the view groups.
"""
view_groups = []
for view_group_index, absolute_position in enumerate(camera_positions):
views = []
for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:
# Camera position relative to headbox center.
position = map(operator.sub, absolute_position, headbox_center)
clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip)
world_from_eye_matrix = WorldFromEyeMatrixFromFace(face)
# Set translation component of world-from-eye matrix.
for i in xrange(3):
world_from_eye_matrix[4 * i + 3] = position[i]
# Create camera object
camera = {
'image_width': image_size,
'image_height': image_size,
'clip_from_eye_matrix': clip_from_eye_matrix,
'world_from_eye_matrix': world_from_eye_matrix,
'depth_type': depth_type
}
# Create view object and add it to the view groups
color_image_path = (color_file_path_pattern % (face, view_group_index))
depth_image_path = (depth_file_path_pattern % (face, view_group_index))
view = {
'projective_camera': camera,
'depth_image_file': {
'color': {
'path': color_image_path,
'channel_0': 'R',
'channel_1': 'G',
'channel_2': 'B',
'channel_alpha': 'A'
},
'depth': {
'path': depth_image_path,
'channel_0': depth_channel_name
}
}
}
views.append(view)
view_group = {'views': views}
view_groups.append(view_group)
# Return the view_groups as a Python list.
return view_groups
def CreateRig(headbox_min,
headbox_max,
num_view_groups,
image_size,
near_clip,
far_clip,
depth_type,
depth_channel_name,
color_file_path_pattern,
depth_file_path_pattern,
json_file_path,
json_only=False):
"""Creates a Maya camera rig and JSON manifest for Seurat.
Args:
headbox_min: List of three floats representing the lower bounds of the
headbox in world-space.
headbox_max: List of three floats representing the upper bounds of the
headbox in world-space.
num_view_groups: Number of view groups (camera positions) to generate.
Must be a power of two.
image_size: Resolution of the output images in pixels.
near_clip: Eye-space Z position of the near clipping planes.
far_clip: Eye-space Z position of the far clipping planes.
depth_type: A string representing the depth encoding. Valid values are:
'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]),
'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf);
Arnold's encoding),
'RAY_DEPTH' (distance to eye).
depth_channel_name: Name of the depth channel in the output file.
Commonly used values are 'R' (VRay) and 'A' (Arnold).
color_file_path_pattern: File name pattern for color images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
depth_file_path_pattern: File name pattern for depth images. Must
contain a placeholder for a string (face name) and an integer (view
group number). Example: '%s.%04d.exr' for file names
'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.
json_file_path: Path to the output JSON manifest file.
json_only: A boolean value. If true, the Maya camera generation step is
bypassed.
"""
# Compute the positions of the cameras.
camera_positions = GenerateCameraPositions(headbox_min, headbox_max,
num_view_groups)
# Generate the six Maya cameras and keyframe their positions.
if not json_only:
CreateCameras(camera_positions, near_clip, far_clip)
# Compute the headbox center.
headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])
# Generate the JSON manifest and write it to the file.
view_groups = CreateViewGroups(headbox_center, camera_positions, image_size,
near_clip, far_clip, depth_type,
depth_channel_name, color_file_path_pattern,
depth_file_path_pattern)
json_string = json.dumps({'view_groups': view_groups}, indent=2)
with open(json_file_path, 'w') as json_file:
json_file.write(json_string)
| 2.15625 | 2 |
cse/migrations/0009_auto_20210418_0602.py | Kunal614/Resources | 1 | 12793752 | <gh_stars>1-10
# Generated by Django 3.0.5 on 2021-04-18 06:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cse', '0008_problemset_name'),
]
operations = [
migrations.DeleteModel(
name='cp',
),
migrations.DeleteModel(
name='problemofday',
),
migrations.DeleteModel(
name='problemset',
),
]
| 1.546875 | 2 |
smartcross/envs/obs/__init__.py | opendilab/DI-smartcross | 49 | 12793753 | from .sumo_obs import SumoObs
from .sumo_obs_runner import SumoObsRunner
| 1.054688 | 1 |
Answer3.py | AshishK199/HackerRank | 0 | 12793754 | <gh_stars>0
count=0
r=[]
def func1(c):
global r
global count
if c in r:
return
else:
r.append(c)
c=c+1
c=str(c)
c=c.rstrip('0')
c=int(c)
count=count+1
func1(c)
n=int(input())
count=0
func1(n)
print(count) | 3.28125 | 3 |
print_lending_stats.py | m-Ferstl/bitfinex_lending_stats | 1 | 12793755 | <gh_stars>1-10
import argparse
from funding_earnings_stats import FundingEarningsCalculator
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("funding_earnings_file", help="The path to the bitfinex funding earnings csv file.")
args = arg_parser.parse_args()
csv_parser = FundingEarningsCalculator(args.funding_earnings_file)
currencies = csv_parser.get_currencies()
print("Found earnings for {} currencies in file {} ".format(len(currencies), args.funding_earnings_file))
currency_stats = csv_parser.get_currency_stats()
for currency in currencies:
print("Earnings for {}".format(currency))
print()
print(currency_stats[currency])
print()
print()
print()
print("---------------------")
print()
monthly_earnings = csv_parser.get_monthly_earnings()
print("Monthly Earnings")
print()
print(csv_parser.get_monthly_earnings())
# TODO Monthly earnings sum col, Testing with multiple months
# TODO FundingEarningsCalculator so umstrukturieren, dass parsen ausgelagert ist
| 2.953125 | 3 |
auto-labeler/MATCH/analysis/precision_and_recall.py | nasa-petal/PeTaL-labeller | 3 | 12793756 | '''
precision_and_recall.py
Run MATCH with PeTaL data.
Last modified on 10 August 2021.
DESCRIPTION
precision_and_recall.py produces three plots from results in MATCH/PeTaL.
These three plots appear in plots/YYYYMMDD_precision_recall and are
as follows:
- HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number
of labels predicted. Higher threshold means fewer labels get past the threshold.
- HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying
the threshold. As threshold decreases from 1 to 0, precision goes down but recall
goes up (because more labels get past the threshold).
- HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score
vary as threshold varies from 0 to 1.
OPTIONS
-m, --match PATH/TO/MATCH
Path of MATCH folder.
-p, --plots PATH/TO/plots
Path of plots folder.
-d, --dataset PeTaL
Name of dataset, e.g., "PeTaL".
-v, --verbose
Enable verbosity.
USAGE
python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose
Authors: <NAME> (<EMAIL>, <EMAIL>)
'''
import click
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import logging
from collections import namedtuple
from tqdm import tqdm
Stats = namedtuple("Stats", "threshold topk precision recall f1")
@click.command()
@click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.')
@click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.')
@click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., "PeTaL".')
@click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.')
def main(match_path, plots_path, dataset, verbose):
"""Plots precision and recall and other statistics on graphs.
Args:
match_path (str): Path of MATCH folder.
plots_path (str): Path of plots folder.
verbose (bool): Verbose output.
"""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s:%(name)s] %(message)s"
)
PRlogger = logging.getLogger("P&R")
DATASET = dataset
MODEL = 'MATCH'
res_labels = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy", allow_pickle=True)
res_scores = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy", allow_pickle=True)
test_labels = np.load(f"{match_path}/{DATASET}/test_labels.npy", allow_pickle=True)
train_labels = np.load(f"{match_path}/{DATASET}/train_labels.npy", allow_pickle=True)
if verbose:
PRlogger.info(f"Computing statistics by varying threshold for {MODEL} on {DATASET}.")
thresholds = list(x / 10000 for x in range(1, 10)) + \
list(x / 1000 for x in range(1, 10)) + \
list(x / 100 for x in range(1, 10)) + \
list(x / 20 for x in range(2, 19)) + \
list((90 + x) / 100 for x in range(1, 10)) + \
list((990 + x) / 1000 for x in range(1, 10)) + \
list((9990 + x) / 10000 for x in range(1, 10))
ps = []
rs = []
ts = []
f1s = []
topks = []
for threshold in tqdm(thresholds):
stats = compute_stats(threshold, res_labels, res_scores, test_labels)
ps.append(stats.precision)
rs.append(stats.recall)
ts.append(threshold)
f1s.append(stats.f1)
topks.append(stats.topk)
'''
Make the following plots to assess the performance of the model.
Precision-recall curve
Precision, recall, and F1 score by varying threshold
Numbers of labels predicted by varying threshold
'''
ALL_PLOTS_PATH = plots_path
if not os.path.exists(ALL_PLOTS_PATH):
os.mkdir(ALL_PLOTS_PATH)
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {ALL_PLOTS_PATH}.")
now = datetime.now()
date_str = now.strftime("%Y%m%d")
time_str = now.strftime("%H%M%S")
comment = f"precision_recall" # "_on_{DATASET}"
PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f"{date_str}_{comment}")
if not os.path.exists(PLOTS_PATH):
os.mkdir(PLOTS_PATH)
if verbose:
PRlogger.info(f"New plots directory at {PLOTS_PATH}")
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {PLOTS_PATH}")
########################################
# PRECISION-RECALL CURVE
########################################
plt.grid()
plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold')
plt.plot(ps, rs, linestyle='-')
plt.xlabel('Recall')
plt.xlim(0, 1)
plt.ylabel('Precision')
plt.ylim(0, 1)
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# PRECISION, RECALL, AND F1 SCORE BY THRESHOLD
########################################
plt.grid()
plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, ps, linestyle='-', label='Precision')
plt.plot(ts, rs, linestyle='-', label='Recall')
plt.plot(ts, f1s, linestyle='-', label='F1 score')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Metrics')
plt.ylim(0, 1)
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# NUMBER OF LABELS PREDICTED BY THRESHOLD
########################################
plt.grid()
plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, topks, linestyle='-', label='Number of Labels')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Labels')
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
def compute_stats(threshold, res_labels, res_scores, test_labels):
"""
compute_stats(threshold)
Parameters:
threshold: float, 0.0 < threshold < 1.0
res_labels: numpy array of predicted labels
res_scores: numpy array of predicted label scores
test_labels: numpy array of target labels
Returns:
Stats object containing
threshold
topk: average number of labels above threshold
precision: average precision across examples
recall: average recall across examples
f1: average F1 score across examples
Note:
precision, recall, and F1 scores are macro (averaged across examples, not labels)
"""
precisions = []
recalls = []
topks = []
f1s = []
for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels):
topk = np.argmax(res_score < threshold) # topk becomes the number of labels scoring above the threshold
precision = 1.0 if topk == 0 else np.mean([1 if x in test_label else 0 for x in res_label[:topk]])
recall = np.mean([1 if x in res_label[:topk] else 0 for x in test_label])
f1 = 0 if (precision + recall) == 0 else (2 * precision * recall) / (precision + recall)
topks.append(topk)
precisions.append(precision)
recalls.append(recall)
f1s.append(f1)
# print(res_label[:topk], precision, recall)
return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s))
if __name__ == '__main__':
main() | 2.984375 | 3 |
20_1.py | yunjung-lee/class_python_numpy | 0 | 12793757 | ####################데이터프레임의 문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성#######################################
#이용함수 apply
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
# df = pd.DataFrame({'id' : [1,2,10,20,100,200],
# "name":['aaa','bbb','ccc','ddd','eee','fff']})
# print(df)
#
# #컬럼을 변경하여 새로운 컬럼을 생성
# #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬
# df['id_2']=df['id'].apply(lambda x:"{:0>5d}".format(x))
# print(df)
# # id name id_2
# # 0 1 aaa 00001
# # 1 2 bbb 00002
# # 2 10 ccc 00010
# # 3 20 ddd 00020
# # 4 100 eee 00100
# # 5 200 fff 00200
#
# # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다.
# #
# # x=3.141592
# # print("{:.2f}".format(x))
# # # 3.14
# #
# # print("{:+.2f}".format(x))
# # # +3.14
# #
# # x=-3.141592
# # print("{:+.2f}".format(x))
# # # -3.14
# #
# # x=2.718
# # print("{:.0f}".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림)
# # # 3
# #
# # x=3.147592
# # print("{:.2f}".format(x)) # .2f(소수 점 셋째자리에서 반올림)
# # # 3.15
# #
# # x=5
# # print("{:0>2d}".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 )
# # # 05
# #
# # x=7777777777
# # print("{:0>5d}".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지)
# # # 7777777777
# # print("{:,}".format(x))
# # # 7,777,777,777
# #
# # x=0.25
# # print("{:.2%}".format(x))
# # # 25.00%
# #
#
# #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨)
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 NaN
# # 1 2 bbb 00002 NaN
# # 2 10 ccc 00010 NaN
# # 3 20 ddd 00020 NaN
# # 4 100 eee 00100 NaN
# # 5 200 fff 00200 NaN
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
#
# #id를 소숫점 이하로 나타내는 새로운 열을 추가
# df['id_3']=df['id'].apply(lambda x: "{:.2f}".format(x))
# print(df)
# # id name id_2 id_name id_3
# # 0 1 aaa 00001 00001_aaa 1.00
# # 1 2 bbb 00002 00002_bbb 2.00
# # 2 10 ccc 00010 00010_ccc 10.00
# # 3 20 ddd 00020 00020_ddd 20.00
# # 4 100 eee 00100 00100_eee 100.00
# # 5 200 fff 00200 00200_fff 200.00
#
# df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌
# print(df)
# # id name id_2 id_name id_3 name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF
#
#
# # id_name_3 컬럼추가
# # id_name_3 => 1.00:AAA
#
# df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1)
# print(df)
# # id name id_2 id_name id_3 name_3 id_name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF
#
###################################################################################################################
#groupby 집계함수
# 1.딕셔너리를 이용해서 그룹화
#위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄
# data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분.
df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4'])
print(df)
# c1 c2 c3 c4 c5
# r1 0 1 2 3 4
# r2 5 6 7 8 9
# r3 10 11 12 13 14
# r4 15 16 17 18 19
# row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum())
# row_g2 = r3+r4
mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 0 1 2 3 4
# row_g2 5 6 7 8 9
# row_g3 10 11 12 13 14
# row_g4 15 16 17 18 19
mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
print(gbr.mean())
# c1 c2 c3 c4 c5
# row_g1 2.5 3.5 4.5 5.5 6.5
# row_g2 12.5 13.5 14.5 15.5 16.5
print(gbr.std())
# c1 c2 c3 c4 c5
# row_g1 3.535534 3.535534 3.535534 3.535534 3.535534
# row_g2 3.535534 3.535534 3.535534 3.535534 3.535534
# col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다.
# col_g2 = c3+c4+c5
mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'}
gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다.
print(gbc.sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
print(type(mdr))
# <class 'dict'>
print(mdr)
# {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'}
# dic -> Series
# Series를 이용한 그룹화
msr = Series(mdr)
print(type(msr))
# <class 'pandas.core.series.Series'>
print(msr)
# r1 row_g1
# r2 row_g1
# r3 row_g2
# r4 row_g2
# dtype: object
print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
msc = Series(mdc)
print(df.groupby(msc,axis=1).sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
#함수를 이용한 그룹화
# 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달)
def rgf(x) :
if x == 'r1' or x == 'r2':
rg = 'row_g1'
else:
rg = 'row_g2'
return rg
# 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임 생성
print(df.groupby(rgf).sum())
| 3.40625 | 3 |
research/slim/eval_lib.py | plant-tw/models_plant | 3 | 12793758 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import yaml
from collections import Iterable, defaultdict
from itertools import cycle
import subprocess
import PIL
import math
import os
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.training import monitored_session
from datasets.plants import read_label_file
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
from matplotlib.font_manager import FontManager
import matplotlib
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
OUTPUT_MODEL_NODE_NAMES_DICT = {
'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1',
'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1',
}
def define_tf_flags():
BATCH_SIZE = 100
tf.app.flags.DEFINE_integer(
'batch_size', BATCH_SIZE, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/',
'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'plants', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'validation',
'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'mobilenet_v1',
'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None,
'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
FLAGS = tf.app.flags.FLAGS
def get_dataset_dir(config):
return get_config_value(config, 'dataset_dir')
def get_config_value(config, key):
return config.get(key) or getattr(FLAGS, key)
def get_checkpoint_dir_path(config):
return get_config_value(config, 'checkpoint_path')
def get_lastest_check_point(config):
checkpoint_path = get_checkpoint_dir_path(config)
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
return checkpoint_path
def inspect_tfrecords(tfrecords_filename):
record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)
examples = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
examples.append(example)
# print(example)
return examples
def get_info(config, checkpoint_path=None,
calculate_confusion_matrix=False):
dataset_dir = get_dataset_dir(config)
model_name = get_model_name(config)
# tf.logging.set_verbosity(tf.logging.INFO)
tf.Graph().as_default()
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir)
####################
# Select the model #
####################
num_classes = (dataset.num_classes - FLAGS.labels_offset)
network_fn = nets_factory.get_network_fn(
model_name,
num_classes=num_classes,
is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_epochs=1, # 每張只讀一次
# num_readers=1,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
# common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
raw_images = image
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
allow_smaller_final_batch=True,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
one_hot_predictions = slim.one_hot_encoding(
predictions, dataset.num_classes - FLAGS.labels_offset)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
if calculate_confusion_matrix:
confusion_matrix = tf.confusion_matrix(labels=labels,
num_classes=num_classes,
predictions=predictions)
else:
confusion_matrix = None
# Print the summaries to screen.
for name, value in names_to_values.items():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
tf.logging.info('Evaluating %s' % checkpoint_path)
labels_to_names = read_label_file(dataset_dir)
probabilities = tf.nn.softmax(logits)
softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy(
one_hot_predictions, logits, label_smoothing=0.0, weights=1.0)
grad_imgs = tf.gradients(softmax_cross_entropy_loss,
images)[0]
return {
'labels_to_names': labels_to_names,
'checkpoint_path': checkpoint_path,
'num_batches': num_batches,
'names_to_values': names_to_values,
'names_to_updates': names_to_updates,
'variables_to_restore': variables_to_restore,
'images': images,
'raw_images': raw_images,
'network_fn': network_fn,
'labels': labels,
'logits': logits,
'probabilities': probabilities,
'predictions': predictions,
'confusion_matrix': confusion_matrix,
'loss': softmax_cross_entropy_loss,
'grad_imgs': grad_imgs,
}
def get_monitored_session(checkpoint_path):
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
# scaffold=scaffold,
# master=master,
# config=config
)
return monitored_session.MonitoredSession(
session_creator=session_creator)
def plot_confusion_matrix(confusion_matrix, labels_to_names=None,
save_dir='.'):
import seaborn as sns
set_matplot_zh_font()
# ax = plt.subplot()
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(18, 15)
# https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black
# confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01,
# confusion_matrix)
cmap = plt.get_cmap('Accent')
# cmap = plt.get_cmap('coolwarm')
# cmap = plt.get_cmap('plasma')
# cmap = plt.get_cmap('Blues')
# cmap.set_bad(color='black')
mask = np.zeros_like(confusion_matrix)
mask[confusion_matrix == 0] = True
# sns.set(font_scale=1)
with sns.axes_style('darkgrid'):
sns.heatmap(confusion_matrix,
linewidths=0.2,
linecolor='#eeeeee',
xticklabels=True,
yticklabels=True,
mask=mask, annot=False, ax=ax, cmap=cmap)
n = confusion_matrix.shape[0]
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Confusion Matrix')
axis = [labels_to_names[i] if labels_to_names else i
for i in range(n)]
ax.xaxis.set_ticklabels(axis, rotation=270)
ax.yaxis.set_ticklabels(axis, rotation=0)
pic_path = os.path.join(save_dir, 'confusion_matrix.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('plot shown')
plt.show()
def get_matplot_zh_font():
# From https://blog.csdn.net/kesalin/article/details/71214038
fm = FontManager()
mat_fonts = set(f.name for f in fm.ttflist)
output = subprocess.check_output('fc-list :lang=zh-tw -f "%{family}\n"',
shell=True)
zh_fonts = set(f.split(',', 1)[0] for f in output.split('\n'))
available = list(mat_fonts & zh_fonts)
return available
def set_matplot_zh_font():
available = get_matplot_zh_font()
if len(available) > 0:
plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
def deprocess_image(x, target_std=0.15):
# normalize tensor
x = np.abs(x)
x = np.max(x, axis=2)
x -= x.mean()
std = x.std()
if std:
x /= std
x *= target_std
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
def plot_image_in_grids(image_list, n_columns, file_name=None):
image_table = chunks(image_list, n_columns)
n_row = len(image_table)
plt.figure(figsize=(15, 10))
i = 1
for row in image_table:
for col in row:
plt.subplot(n_row, n_columns, i)
plt.imshow(col)
i += 1
if file_name:
plt.savefig(file_name)
print(file_name, 'saved')
else:
print('plot shown')
plt.show()
def plot_saliency(saliency, image, file_name=None):
plt.figure(figsize=(15, 10))
plot_image_in_grids([
[saliency, image]
], file_name)
def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
if use_cached:
aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5')
if aggregated is not None:
return aggregated
calculate_confusion_matrix = True
info = get_info(config,
calculate_confusion_matrix=calculate_confusion_matrix)
num_batches = info['num_batches']
aggregated = {}
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
with get_monitored_session(checkpoint_path) as sess:
for i in range(int(math.ceil(num_batches))):
print('batch #{} of {}'.format(i, num_batches))
params = {
k: v
for k, v in info.items()
if isinstance(v, tf.Tensor) and (not keys or k in keys)
}
try:
feed_dict = {}
res = sess.run(params, feed_dict=feed_dict)
except:
import traceback
traceback.print_exc()
raise
for k in res.keys():
value = res[k]
if k == 'confusion_matrix':
if k not in aggregated:
aggregated[k] = np.matrix(value)
else:
aggregated[k] += np.matrix(value)
else:
if k not in aggregated:
aggregated[k] = []
if isinstance(value, Iterable):
aggregated[k].extend(value)
else:
aggregated[k].append(value)
labels = res['labels']
print('len labels', len(labels))
all_labels = aggregated['labels']
print('all_labels length', len(all_labels))
print('all_labels unique length', len(set(all_labels)))
if use_cached:
save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated)
return aggregated
def _run_saliency_maps(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
grad_imgs = aggregated['grad_imgs']
images = aggregated['images']
prefix = ''
save_saliency_maps(config, grad_imgs, images, prefix,
labels=aggregated['labels'])
def _run_info(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
# 'raw_images',
'logits',
'probabilities',
'predictions',
'confusion_matrix',
# 'loss',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
from collections import Counter
all_labels = aggregated['labels']
c = Counter(all_labels)
kv_pairs = sorted(dict(c).items(), key=lambda p: p[0])
for k, v in kv_pairs:
print(k, v)
def save_var(directory, file_name, info):
import h5py
info_file_path = os.path.join(directory, file_name)
f = h5py.File(info_file_path, 'w')
for k, v in info.items():
f[k] = v
f.close()
print(info_file_path, 'saved')
def load_var(directory, file_name):
import h5py
info_file_path = os.path.join(directory, file_name)
try:
with h5py.File(info_file_path, 'r') as f:
return {
k: f[k][:] for k in f.keys()
}
except IOError:
return None
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
return [l[i:i + n] for i in range(0, len(l), n)]
def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None):
n = images.shape[0]
save_dir = 'saliency_maps'
labels_to_names = read_label_file(get_dataset_dir(config))
label_count_map = defaultdict(int)
try:
os.makedirs(save_dir)
except OSError:
pass
for j in range(n):
image = images[j]
grad_img = grad_imgs[j]
label = labels[j]
label_name = labels_to_names[label]
if label_count_map[label] >= 10:
continue
file_name = '{}/{}{:03d}.jpg'.format(
save_dir,
'{:02}_{}_{}'.format(
label, label_name.encode('utf-8'),
prefix) if labels is not None else prefix,
label_count_map[label])
saliency = deprocess_image(grad_img, target_std=0.3)
restored_image = ((image / 2 + 0.5) * 255).astype('uint8')
blend = get_image_with_saliency_map(restored_image, saliency)
plot_image_in_grids([
saliency,
restored_image,
blend,
], n_columns=2, file_name=file_name)
label_count_map[label] += 1
def _plot_roc(logits_list, labels, predictions, probabilities,
plot_all_classes=False, save_dir=None):
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
possible_labels = list(range(max(labels) + 1))
y_binary = label_binarize(labels, classes=possible_labels)
output_matrix = np.array(probabilities)
y_score_matrix = output_matrix
y_score_matrix = np.where(
y_score_matrix == np.max(y_score_matrix, axis=1)[:, None],
y_score_matrix, 0)
tpr = {}
fpr = {}
roc_auc = {}
for i in range(len(possible_labels)):
y_scores = y_score_matrix[:, i]
fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores)
roc_auc[i] = auc(fpr[i], tpr[i])
# 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
y_score_matrix_ravel = y_score_matrix.ravel()
i_positive = y_score_matrix_ravel != 0
fpr["highest_probability"], tpr[
"highest_probability"], micro_thresholds = roc_curve(
y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive])
roc_auc["highest_probability"] = auc(fpr["highest_probability"],
tpr["highest_probability"])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], micro_thresholds = roc_curve(
y_binary.ravel(), y_score_matrix.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
n_classes = len(possible_labels)
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# key_series = 'micro'
key_series = 'highest_probability'
i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series])
optimal_threshold_fpr = fpr[key_series][i_optimal_micro]
optimal_threshold_tpr = tpr[key_series][i_optimal_micro]
optimal_threshold = micro_thresholds[i_optimal_micro]
print('optimal_threshold_fpr:', optimal_threshold_fpr)
print('optimal_threshold_tpr:', optimal_threshold_tpr)
print('optimal_threshold:', optimal_threshold)
# Plot all ROC curves
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
if plot_all_classes:
for i, color in zip(range(n_classes), colors):
label = 'ROC curve of class {0} (area = {1:0.2f})'.format(
i, roc_auc[i])
label = None
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label=label)
plt.plot(fpr["highest_probability"], tpr["highest_probability"],
label='ROC curve (area = {0:0.2f})'
''.format(roc_auc["highest_probability"]),
color='blue', linestyle=':', linewidth=4)
# plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
pic_path = os.path.join(save_dir, 'roc_curve.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('ROC curve shown')
plt.show()
def _roc_analysis(config, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
keys = [
'logits',
'labels',
'predictions',
'probabilities',
]
info = _eval_tensors(config, keys=keys, use_cached=use_cached)
logits_list = info['logits']
labels = info['labels']
predictions = info['predictions']
probabilities = info['probabilities']
_plot_roc(logits_list, labels, predictions, probabilities,
save_dir=checkpoint_dir_path)
return
def inspect_datasets(config):
dataset_dir = get_dataset_dir(config)
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_validation_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_train_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
def resize(im, target_smallest_size):
resize_ratio = 1.0 * target_smallest_size / min(list(im.size))
target_size = tuple(int(resize_ratio * l) for l in im.size)
return im.resize(target_size, PIL.Image.BILINEAR)
def central_crop(im, w, h):
half_w = im.size[0] / 2
half_h = im.size[1] / 2
return im.crop(
(half_w - w / 2, half_h - h / 2, half_w + w / 2, half_h + h / 2))
def pre_process_resnet(im, coreml=False):
target_smallest_size = 224
im1 = resize(im, target_smallest_size)
im2 = central_crop(im1, target_smallest_size, target_smallest_size)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr[:, :, 0] -= _R_MEAN
arr[:, :, 1] -= _G_MEAN
arr[:, :, 2] -= _B_MEAN
return arr
def central_crop_by_fraction(im, central_fraction):
w = im.size[0]
h = im.size[1]
return central_crop(im, w * central_fraction, h * central_fraction)
def pre_process_mobilenet(im, coreml=False):
# 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py
# 裡的 preprocess_for_eval
im1 = central_crop_by_fraction(im, 0.875)
target_smallest_size = 224
im2 = im1.resize((target_smallest_size, target_smallest_size),
PIL.Image.BILINEAR)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr /= 255.0
arr -= 0.5
arr *= 2.0
return arr
def pre_process(config, im, coreml=False):
model_name = get_model_name(config)
return {
'resnet_v2_50': pre_process_resnet,
'mobilenet_v1': pre_process_mobilenet,
}[model_name](im, coreml=coreml)
def get_model_name(config):
model_name = get_config_value(config, 'model_name')
return model_name
def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None):
# http://www.cnblogs.com/arkenstone/p/7551270.html
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = dataset_dir_file(config, filename)
# image_np = cv2.imread(filename)
result = run_inference_on_file_pb(
config, filename, pb_file_path=pb_file_path,
dataset_dir=dataset_dir)
index = result['prediction_label']
print("Prediction label index:", index)
prediction_name = result['prediction_name']
print("Prediction name:", prediction_name)
print("Top 3 Prediction label index:", ' '.join(result['top_n_names']))
assert prediction_name == label
def dataset_dir_file(config, filename):
filename = os.path.join(get_dataset_dir(config), filename)
return filename
def run_inference_by_pb(config, image_np, pb_file_path=None):
checkpoint_dir_path = get_checkpoint_dir_path(config)
pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path
with tf.gfile.GFile(pb_file_path) as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return _run_inference_by_graph_def(config, graph_def, image_np)
def _run_inference_by_graph_def(config, graph_def, image_np,
enable_saliency_maps=False):
model_name = get_model_name(config)
image_size = 224
image_np = pre_process(config, image_np)
image_np = cv2.resize(image_np, (image_size, image_size))
# expand dims to shape [None, 299, 299, 3]
image_np = np.expand_dims(image_np, 0)
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
input_tensor_name = "input:0"
# output_tensor_name = "resnet_v2_50/predictions/Reshape_1:0"
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[
model_name] + ":0"
input_tensor = sess.graph.get_tensor_by_name(
input_tensor_name) # get input tensor
output_tensor = sess.graph.get_tensor_by_name(
output_tensor_name) # get output tensor
tensor_map = {
'logits': output_tensor,
}
if enable_saliency_maps:
tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name(
'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0')
result = sess.run(tensor_map, feed_dict={input_tensor: image_np})
return {
'logits': result['logits'],
'grad_imgs': result.get('grad_imgs'),
}
def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
dataset_dir = get_dataset_dir(config)
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = os.path.join(dataset_dir, filename)
image_np = PIL.Image.open(filename)
logits = run_inference_by_coreml(
config, image_np, coreml_file_path=coreml_file_path, )
print('logits', logits)
index = np.argmax(logits)
print("Prediction label index:", index)
prediction_name = labels_to_names[index]
print("Prediction name:", prediction_name)
index_list = np.argsort(logits)
print("Top 3 Prediction label index:",
index_list,
' '.join([labels_to_names[i] for i in list(index_list)]))
assert prediction_name == label
def run_inference_by_coreml(config, image_np, coreml_file_path=None):
import coremltools
import tfcoreml
model_name = get_model_name(config)
checkpoint_dir_path = get_checkpoint_dir_path(config)
frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path
coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path
image_np = pre_process(config, image_np, coreml=True)
image = Image.fromarray(image_np.astype('int8'), 'RGB')
input_tensor_shapes = {
"input:0": [1, image_np.shape[0], image_np.shape[1],
3]} # batch size is 1
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + ":0"
coreml_model = coremltools.models.MLModel(coreml_model_file)
convert_model = False
# convert_model = True
if convert_model:
extra_args = {
'resnet_v2_50': {
'red_bias': -_R_MEAN,
'green_bias': -_G_MEAN,
'blue_bias': -_B_MEAN,
},
'mobilenet_v1': {
'red_bias': -1.0,
'green_bias': -1.0,
'blue_bias': -1.0,
'image_scale': 2.0 / 255.,
}
}[model_name]
coreml_model = tfcoreml.convert(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file.replace('.mlmodel',
'_test.mlmodel'),
input_name_shape_dict=input_tensor_shapes,
output_feature_names=[output_tensor_name],
image_input_names=['input:0'],
**extra_args
)
coreml_inputs = {'input__0': image}
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False)
# example output: 'resnet_v2_50__predictions__Reshape_1__0'
probs = coreml_output[
output_tensor_name.replace('/', '__').replace(':', '__')].flatten()
return probs
def run_inference_on_file_pb(config, filename, pb_file_path=None,
dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
image_np = PIL.Image.open(filename)
logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[
'logits']
index = np.argmax(logits, 1)
prediction_name = labels_to_names[index[0]]
index_list = np.argsort(logits, 1)
top_n_names = list(reversed(
[labels_to_names[i] for i in list(index_list[0])]))
print('logits', logits)
result = {
'prediction_name': prediction_name,
'prediction_label': index[0],
'top_n_names': top_n_names,
'logits': logits.tolist(),
}
return result
def test_inference_by_model_files(config, dataset_dir=None,
frozen_graph_path=None,
coreml_file_path=None):
dataset_dir = dataset_dir or get_dataset_dir(config)
test_inference_by_pb(config, pb_file_path=frozen_graph_path,
dataset_dir=dataset_dir)
test_inference_by_coreml(config, coreml_file_path=coreml_file_path,
dataset_dir=dataset_dir)
def get_image_with_saliency_map(image_np, saliency):
image_np = np.copy(np.asarray(image_np))[:, :]
w, h = image_np.shape[0:2]
l = min(w, h)
saliency = cv2.resize(saliency, (l, l))
saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB)
canvas = image_np[:, :]
w_offset = int((w - l) / 2)
h_offset = int((h - l) / 2)
roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l]
intensify_factor = 3
alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1)
paint = np.copy(1 - alpha) * 255
overlap = roi_img[paint > 128]
if overlap.mean() + overlap.std() > 128:
color = np.array([0, 0, 255]).astype(float) / 255 # blue
else:
color = np.array([255, 200, 0]).astype(float) / 255 # orange
paint[:, :] *= color
roi_img = cv2.multiply(alpha, roi_img.astype(float))
roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int)
canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img
return canvas
def test_frozen_graph_saliency_map(config):
checkpoint_dir = config['checkpoint_path']
dataset_dir = get_dataset_dir(config)
frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb')
filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg')
labels_to_names = read_label_file(dataset_dir)
image_np = PIL.Image.open(filename)
results = run_inference_by_pb(config, image_np,
pb_file_path=frozen_graph_path)
logits = results['logits']
index = np.argmax(logits, 1)[0]
prediction_name = labels_to_names[index]
grad_imgs = results['grad_imgs']
saliency = deprocess_image(grad_imgs[0])
blend = get_image_with_saliency_map(image_np, saliency)
print(prediction_name)
plot_image_in_grids([
blend, image_np,
saliency,
], 2)
@click.group()
def cli():
pass
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def run_info(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_info(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
def test_models(config_file):
with open(config_file) as f:
config = yaml.load(f)
test_inference_by_model_files(config)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def plot_roc(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_roc_analysis(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def saliency_maps(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_saliency_maps(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def confusion_matrix(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
keys = [
'confusion_matrix',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
checkpoint_dir_path = get_checkpoint_dir_path(config)
dataset_dir = get_dataset_dir(config)
labels_to_names = read_label_file(dataset_dir)
plot_confusion_matrix(aggregated['confusion_matrix'],
labels_to_names=labels_to_names,
save_dir=checkpoint_dir_path)
if __name__ == '__main__':
define_tf_flags()
cli()
| 2.359375 | 2 |
tools/agile-machine-learning-api/codes/trainer/utils/metric_utils.py | ruchirjain86/professional-services | 2,116 | 12793759 | <gh_stars>1000+
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for creating metrics in tensorflow for model training."""
import tensorflow as tf
def mean_acc(labels, predictions, num_classes):
"""Mean per class accuracy metrics
Arguments:
labels: tf.Tensor objects, True values of the dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
The mean per class accuracy
"""
return {'mean_class_acc': tf.metrics.mean_per_class_accuracy(
labels,
predictions['class_ids'], num_classes)
}
def my_auc(labels, predictions):
"""Custom AUC metric using interpolation.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
The AUC metric for the model
"""
return {'auc_ci': tf.metrics.auc(
labels,
predictions['class_ids'],
summation_method='careful_interpolation')
}
def rmse(labels, predictions):
"""Root mean squared error metric for regression tasks.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
Root mean squared error for regression model
"""
return {'root_mean_square_error': tf.metrics.root_mean_squared_error(
labels,
predictions['predictions'])
}
def mar(labels, predictions):
"""Mean absolute error for regression model.
Arguments:
labels: tf.Tensor objects, True values of dependent variable
predictions: tf.Tensor objects, Predictions from the model
Returns:
Mean absolute error for the regression model
"""
return {'mean_absolute_error': tf.metrics.mean_absolute_error(
labels,
predictions['predictions'])
}
| 2.671875 | 3 |
2-Medium/maxEvents.py | Sma-Das/Leetcode | 0 | 12793760 | <reponame>Sma-Das/Leetcode
def maxEvents(events: list[list[int, int]]) -> int:
events = sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1]))
reserved = set()
for start, end in events:
if start not in reserved:
reserved.add(start)
else:
for day in range(start, end+1):
if day not in reserved:
reserved.add(day)
break
return len(reserved)
if __name__ == '__main__':
events = [
[[1, 2], [2, 3], [3, 4]],
[[1, 2], [2, 3], [3, 4], [1, 2]],
[[1, 4], [4, 4], [2, 2], [3, 4], [1, 1]],
[[1, 100000]],
[[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7]],
]
for event in events:
print(maxEvents(event))
| 2.75 | 3 |
test_app.py | suchayan01/scaler-boocamp | 0 | 12793761 | from app import app
with app.test_client() as c:
response= c.get('/')
assert response.data == b'Hello World!'
assert response.status_code==200 | 2.15625 | 2 |
coding/learn_collections/defaultdict_01_demo.py | yatao91/learning_road | 3 | 12793762 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from collections import defaultdict
def default_factory():
return "default value"
d = defaultdict(default_factory, foo="bar")
print("d: ", d)
print("foo => ", d["foo"])
print("bar => ", d["bar"])
| 3 | 3 |
dps.py | mattflanzer/photosort | 0 | 12793763 | # dropbox-photo-sorter
# (c)2018 <NAME>
# License: MIT
import sys
import re
import os
import shutil
from PIL import Image
import datetime
import time
import PIL.ExifTags
import sqlite3
import requests
import json
import unidecode
import argparse
class Storage:
def __init__(self, pathname):
self.year = None
self.month = None
self.country = None
self.state = None
self.city = None
if pathname:
unix = os.path.getmtime(pathname)
dt = datetime.datetime.utcfromtimestamp(unix)
self.year = "{:%Y}".format(dt)
self.month = "{:%m}".format(dt)
self.cached = False
def dict(self):
return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city}
def __str__(self):
return str(self.dict())
def __getitem__(self,key):
return self.dict()[key]
def item(self,key):
if key=='y':
return self.year
elif key=='m':
return self.month
elif key=='c':
return self.country
elif key=='s':
return self.state
elif key=='l':
return self.city
else:
return None
class Node:
prefixsz = 8
def __init__(self):
self.children={}
self.value = []
def isLeaf(self):
return len(self.children)==0
def add(self,k):
if k is None:
return self
if not k in self.children:
self.children[k] = Node()
return self.children[k]
def merge(self,n):
for k,v in n.children.iteritems():
if k in self.children:
self.children[k].merge(v)
else:
self.children[k] = v
self.value += n.value
def flatten(self):
new_node = Node()
for k,v in self.children.iteritems():
new_node.merge(v)
self.children = new_node.children
self.value += new_node.value
def collapse(self,level,minimum,current,show_collapse):
num_children = len(self.children)
for k,n in self.children.items():
n.collapse(level,minimum,current+1,show_collapse)
if (n.size() < minimum) and (current >= level-1):
if show_collapse:
print "flattening %s at %d with %s"%(k,current,n.children.keys())
n.flatten()
if (num_children < minimum) and (n.size() < minimum) and (current >= level):
if show_collapse:
print "merging %s at %d with %s"%(k,current,n.children.keys())
self.merge(n)
del self.children[k]
def dict(self,full_path):
rtrn = {}
for v, cached in self.value:
rtrn[v] = os.path.basename(v) if full_path else ''
for k,n in self.children.iteritems():
for dk,dv in n.dict(full_path).iteritems():
rtrn[dk] = "%s/%s"%(k,dv)
return rtrn
def dump(self,show_cached=True,level=0):
prefix = ' ' * (Node.prefixsz * level)
if self.value:
for v, cached in self.value:
if not cached or show_cached:
print prefix+os.path.basename(v)
for k,n in self.children.iteritems():
cached,non_cached = n.count_cached()
if (non_cached > 0) or show_cached:
print "%s%s/"%(prefix,k)
n.dump(show_cached,level+1)
def count_cached(self):
cached = 0
non_cached = 0
if self.value:
cached = len(filter(lambda (v,c): c, self.value))
non_cached = len(self.value) - cached
for k,n in self.children.iteritems():
(sub_cached, sub_non_cached) = n.count_cached()
cached += sub_cached
non_cached += sub_non_cached
return (cached, non_cached)
def size(self):
return len(self.value) + len(self.children)
class StorageTree:
default_mode='ymcsl'
def __init__(self,stores,mode=default_mode):
self.head = Node()
for k,v in stores.iteritems():
node = self.head
for m in mode:
node = node.add(v.item(m))
node.value.append((k,v.cached))
def dict(self,full_path):
return self.head.dict(full_path)
def dump(self,show_cached=True):
self.head.dump(show_cached)
def collapse(self,level,minimum,show_collapse):
self.head.collapse(level,minimum,0,show_collapse)
@classmethod
def fromDirectory(cls,root,fd,cache,mode,google):
stores = {}
if fd:
fd.write('Processing ')
lfileext = lambda f: os.path.splitext(f)[1].lower()
all_files = [os.path.join(d,filename) for d, _, files in os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')]
progress = Progress(all_files,fd)
for pathname in progress:
s = cache.get(pathname) if cache else None
if s is None:
s = Storage(pathname)
try:
im = Image.open(pathname)
exif = ExifData(im._getexif())
if exif.year and exif.month:
s.year = exif.year
s.month = exif.month
if exif.lat and exif.lon:
if google is None:
(s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc
else:
(s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc
#print "%s: %s"%(filename,s)
except Exception as e:
#print "Exception %s: %s" %(filename,str(e))
pass
if cache:
cache.put(pathname,s)
stores[pathname] = s
if cache:
cache.flush()
return cls(stores,mode)
class ExifData:
def __init__(self,exifraw):
if 0x0132 in exifraw:
self.year = exifraw[0x0132][:4]
self.month = exifraw[0x0132][5:7]
else:
self.year = None
self.month = None
if 0x8825 in exifraw:
gpsraw = exifraw[0x8825]
self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S')
self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W')
else:
self.lat = None
self.lon = None
@staticmethod
def degrees(raw,neg):
((degreesNumerator, degreesDenominator),
(minutesNumerator, minutesDenominator),
(secondsNumerator, secondsDenominator)) = raw
Degrees = (float(degreesNumerator) / float(degreesDenominator))
Minutes = (float(minutesNumerator) / float(minutesDenominator))
Seconds = (float(secondsNumerator) / float(secondsDenominator))
dd = Degrees + Minutes/60.0 + Seconds/3600.0
if neg:
dd *= -1.0
return dd
class Cache:
def __init__(self,rootdir,use_pending=False):
self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db'))
cursor = self.conn.cursor()
sql = """
CREATE TABLE IF NOT EXISTS storage (
hash int PRIMARY KEY,
year text,
month text,
country text,
state text,
city text,
filename text
);
"""
cursor.execute(sql)
self.conn.commit()
#print "created table storage"
self.pending = {}
self.use_pending = use_pending
def __getitem__(self,h):
cursor = self.conn.cursor()
sql = """
SELECT year, month, country, state, city
FROM storage
WHERE hash = ?
;
"""
cursor.execute(sql,(h,))
row = cursor.fetchone()
#print "found "+str(row)
if row is None:
return None
s = Storage(None)
(s.year, s.month, s.country, s.state, s.city) = row
s.cached = True
return s
def __contains__(self,h):
s = self[h]
return s is not None
def get(self,pathname):
h = self.make_hash(pathname)
return self.__getitem__(h)
def make_hash(self,pathname):
mtime = os.path.getmtime(pathname)
filename = os.path.basename(pathname)
return hash((filename,mtime))
def __setitem__(self,h,(filename,s)):
cursor = self.conn.cursor()
sql = """
INSERT INTO storage(hash, year, month, country, state, city, filename) VALUES (?, ?, ?, ?, ?, ?, ?);
"""
cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename))
self.conn.commit()
#print "insert "+str(h)
return s
def put(self,pathname,s):
h = self.make_hash(pathname)
filename = os.path.basename(pathname)
if self.use_pending:
self.pending[h] = (filename,s)
return s
else:
return self.__setitem__(h,(filename,s))
def dump(self):
cursor = self.conn.cursor()
sql = """
SELECT * FROM storage;
"""
cursor.execute(sql)
rows = cursor.fetchall()
print rows
def flush(self):
if len(self.pending) > 0:
cursor = self.conn.cursor()
sql = "INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES "
results = []
sqlext = []
for (k,(f,v)) in self.pending.iteritems():
sqlext.append("(?, ?, ?, ?, ?, ?, ?)")
results.append(k)
results.append(v.year)
results.append(v.month)
results.append(v.country)
results.append(v.state)
results.append(v.city)
results.append(f)
sql += ",".join(sqlext)
sql += ";"
cursor.execute(sql,results)
self.conn.commit()
self.pending = {}
class Progress:
def __init__(self,lst,fd):
self.lst = lst
self.sz = len(self.lst)
try:
if fd.isatty():
self.fd = fd
else:
self.fd = None
except:
self.fd = None
def __iter__(self):
start = datetime.datetime.now()
last_len = 0
s=''
for (i,x) in enumerate(self.lst):
yield x
elapsed = datetime.datetime.now() - start
if i+1 == self.sz: # last one
predict_display = " in %s"%(str(elapsed)[:7])
elif i > 2:
rate = float(i)/elapsed.total_seconds()
predict = datetime.timedelta(seconds = float(self.sz-i) / rate)
predict_display = " (%.2f fps, %s remaining)"%(rate,str(predict)[:7])
else:
predict_display = ''
s = "%d/%d%s"%(i+1,self.sz,predict_display)
if self.fd:
back = chr(8)*last_len
self.fd.write(back+s)
this_len = len(s)
if this_len < last_len:
self.fd.write(' '*(last_len-this_len))
else:
last_len = this_len
self.fd.flush()
if self.fd:
self.fd.write('\n')
self.fd.flush()
else:
print s
class GeoCoder(object):
def __init__(self,lat,lon):
self.loc = (None,None,None)
class GeoCoderGoogle(GeoCoder):
def __init__(self,lat,lon,key):
import googlemaps
super(GeoCoderGoogle,self).__init__(lat,lon)
# Look up an address with reverse geocoding
gmaps = googlemaps.Client(key=key)
reverse_geocode_result = gmaps.reverse_geocode((lat,lon))
address = reverse_geocode_result[0]['address_components']
city = GeoCoderGoogle.address_part(address,'locality')
state = GeoCoderGoogle.address_part(address,'administrative_area_level_1')
country = GeoCoderGoogle.address_part(address,'country')
self.loc=(city,state,country)
@staticmethod
def address_part(address,key):
for d in address:
if key in d['types']:
return d['long_name']
return None
class GeoCoderOpenStreetmap(GeoCoder):
def __init__(self,lat,lon):
super(GeoCoderOpenStreetmap,self).__init__(lat,lon)
r = requests.get("https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1"%(lat,lon))
nom = json.loads(r.text)
address = nom['address']
try:
city = unidecode.unidecode(address['city'])
except:
city = None
try:
state = unidecode.unidecode(address['state'])
except:
state = None
try:
country = unidecode.unidecode(address['country'])
except:
country = None
self.loc= (city,state,country)
def move_files(args):
(root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached)
storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google)
if show_collapse:
storage_tree.dump(show_cached)
print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min)
storage_tree.collapse(storage_levels,storage_min,show_collapse)
storage_tree.dump(show_cached)
for k,v in storage_tree.dict(True).iteritems():
d = os.path.join(root,os.path.dirname(v))+"/"
#print "Making directory %s"%d
if not dry_run:
try:
os.makedirs(os.path.dirname(d))
except Exception as e:
#print str(e)
pass
dst = os.path.join(root,v)
if k != dst:
print "%s->%s"%(k,v)
if not dry_run:
try:
shutil.move(k,dst)
except Exception as e:
print str(e)
def env(key,default):
try:
val = os.environ[key]
if isinstance(default,bool):
return val.lower() == 'true'
elif isinstance(default,int):
return int(val)
else:
return val
except:
return default
def main():
try:
# create the args list
parser = argparse.ArgumentParser()
parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help="Minimum number of subdirectories")
parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help="Minimum number of items in subdirectory before collapsing")
parser.add_argument('--dry-run',default=env('DRY_RUN',False),action="store_true",help="Calculate directory structure without moving files")
parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action="store_true",help="Display directory structure before collapsing")
parser.add_argument('--order',default=StorageTree.default_mode,help="Default directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City")
parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help="Google Maps API Key. Specify this key to use Google Maps reverse geo-code service")
parser.add_argument('directory',help="Directory containing photos to be rearranged")
parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action="store_true",help="Show cached (previous) elements in directory structure")
args = parser.parse_args()
# check the order
args.order=args.order.lower()
oc = [0]*128
for ch in args.order:
oc[ord(ch)] += 1
for (i,cc) in enumerate(oc):
if cc > (1 if chr(i) in StorageTree.default_mode else 0):
raise RuntimeError("Invalid argument for --order. Must be permutation of 'YMCSL'")
# move the files
move_files(args)
print "This is DropBoxPhotoSorter"
#cache_.dump()
return 0
except Exception as e:
print e
import traceback
tb = traceback.format_exc()
print tb
return -1
if __name__=='__main__':
sys.exit(main())
| 2.703125 | 3 |
tests/test_assessment_status.py | ivan-c/truenth-portal | 0 | 12793764 | """Module to test assessment_status"""
from __future__ import unicode_literals # isort:skip
from datetime import datetime
from random import choice
from string import ascii_letters
from dateutil.relativedelta import relativedelta
from flask_webtest import SessionScope
import pytest
from sqlalchemy.orm.exc import NoResultFound
from portal.extensions import db
from portal.models.audit import Audit
from portal.models.clinical_constants import CC
from portal.models.encounter import Encounter
from portal.models.identifier import Identifier
from portal.models.intervention import INTERVENTION
from portal.models.organization import Organization
from portal.models.overall_status import OverallStatus
from portal.models.qb_status import QB_Status
from portal.models.qb_timeline import invalidate_users_QBT
from portal.models.questionnaire import Questionnaire
from portal.models.questionnaire_bank import (
QuestionnaireBank,
QuestionnaireBankQuestionnaire,
)
from portal.models.questionnaire_response import (
QuestionnaireResponse,
aggregate_responses,
qnr_document_id,
)
from portal.models.recur import Recur
from portal.models.research_protocol import ResearchProtocol
from portal.models.role import ROLE
from portal.models.user import get_user
from portal.system_uri import ICHOM
from tests import TEST_USER_ID, TestCase, associative_backdate
now = datetime.utcnow()
def mock_qr(
instrument_id, status='completed', timestamp=None, qb=None,
doc_id=None, iteration=None, user_id=TEST_USER_ID):
if not doc_id:
doc_id = ''.join(choice(ascii_letters) for _ in range(10))
timestamp = timestamp or datetime.utcnow()
qr_document = {
"questionnaire": {
"display": "Additional questions",
"reference":
"https://{}/api/questionnaires/{}".format(
'SERVER_NAME', instrument_id)},
"identifier": {
"use": "official",
"label": "cPRO survey session ID",
"value": doc_id,
"system": "https://stg-ae.us.truenth.org/eproms-demo"}
}
enc = Encounter(
status='planned', auth_method='url_authenticated', user_id=user_id,
start_time=timestamp)
with SessionScope(db):
db.session.add(enc)
db.session.commit()
enc = db.session.merge(enc)
if not qb:
qstats = QB_Status(get_user(user_id), timestamp)
qbd = qstats.current_qbd()
qb, iteration = qbd.questionnaire_bank, qbd.iteration
qr = QuestionnaireResponse(
subject_id=user_id,
status=status,
authored=timestamp,
document=qr_document,
encounter_id=enc.id,
questionnaire_bank=qb,
qb_iteration=iteration)
with SessionScope(db):
db.session.add(qr)
db.session.commit()
invalidate_users_QBT(user_id=user_id)
localized_instruments = {'eproms_add', 'epic26', 'comorb'}
metastatic_baseline_instruments = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}
metastatic_indefinite_instruments = {'irondemog'}
metastatic_3 = {
'eortc', 'eproms_add', 'ironmisc'}
metastatic_4 = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi'}
metastatic_6 = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}
symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'}
def mock_questionnairebanks(eproms_or_tnth):
"""Create a series of near real world questionnaire banks
:param eproms_or_tnth: controls which set of questionnairebanks are
generated. As restrictions exist, such as two QBs with the same
classification can't have the same instrument, it doesn't work to mix
them.
"""
if eproms_or_tnth == 'eproms':
return mock_eproms_questionnairebanks()
elif eproms_or_tnth == 'tnth':
return mock_tnth_questionnairebanks()
else:
raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format(
eproms_or_tnth))
def mock_eproms_questionnairebanks():
# Define base ResearchProtocols
localized_protocol = ResearchProtocol(name='localized_protocol')
metastatic_protocol = ResearchProtocol(name='metastatic_protocol')
with SessionScope(db):
db.session.add(localized_protocol)
db.session.add(metastatic_protocol)
db.session.commit()
localized_protocol = db.session.merge(localized_protocol)
metastatic_protocol = db.session.merge(metastatic_protocol)
locpro_id = localized_protocol.id
metapro_id = metastatic_protocol.id
# Define test Orgs and QuestionnaireBanks for each group
localized_org = Organization(name='localized')
localized_org.research_protocols.append(localized_protocol)
metastatic_org = Organization(name='metastatic')
metastatic_org.research_protocols.append(metastatic_protocol)
# from https://docs.google.com/spreadsheets/d/\
# 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238
three_q_recur = Recur(
start='{"months": 3}', cycle_length='{"months": 6}',
termination='{"months": 24}')
four_q_recur = Recur(
start='{"months": 6}', cycle_length='{"years": 1}',
termination='{"months": 33}')
six_q_recur = Recur(
start='{"years": 1}', cycle_length='{"years": 1}',
termination='{"years": 3, "months": 3}')
for name in (localized_instruments.union(*(
metastatic_baseline_instruments,
metastatic_indefinite_instruments,
metastatic_3,
metastatic_4,
metastatic_6))):
TestCase.add_questionnaire(name=name)
with SessionScope(db):
db.session.add(localized_org)
db.session.add(metastatic_org)
db.session.add(three_q_recur)
db.session.add(four_q_recur)
db.session.add(six_q_recur)
db.session.commit()
localized_org, metastatic_org = map(
db.session.merge, (localized_org, metastatic_org))
three_q_recur = db.session.merge(three_q_recur)
four_q_recur = db.session.merge(four_q_recur)
six_q_recur = db.session.merge(six_q_recur)
# Localized baseline
l_qb = QuestionnaireBank(
name='localized',
classification='baseline',
research_protocol_id=locpro_id,
start='{"days": 0}',
overdue='{"days": 7}',
expired='{"months": 3}')
for rank, instrument in enumerate(localized_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
l_qb.questionnaires.append(qbq)
# Metastatic baseline
mb_qb = QuestionnaireBank(
name='metastatic',
classification='baseline',
research_protocol_id=metapro_id,
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_baseline_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mb_qb.questionnaires.append(qbq)
# Metastatic indefinite
mi_qb = QuestionnaireBank(
name='metastatic_indefinite',
classification='indefinite',
research_protocol_id=metapro_id,
start='{"days": 0}',
expired='{"years": 50}')
for rank, instrument in enumerate(metastatic_indefinite_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mi_qb.questionnaires.append(qbq)
# Metastatic recurring 3
mr3_qb = QuestionnaireBank(
name='metastatic_recurring3',
classification='recurring',
research_protocol_id=metapro_id,
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}',
recurs=[three_q_recur])
for rank, instrument in enumerate(metastatic_3):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr3_qb.questionnaires.append(qbq)
# Metastatic recurring 4
mr4_qb = QuestionnaireBank(
name='metastatic_recurring4',
classification='recurring',
research_protocol_id=metapro_id,
recurs=[four_q_recur],
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_4):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr4_qb.questionnaires.append(qbq)
# Metastatic recurring 6
mr6_qb = QuestionnaireBank(
name='metastatic_recurring6',
classification='recurring',
research_protocol_id=metapro_id,
recurs=[six_q_recur],
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_6):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr6_qb.questionnaires.append(qbq)
with SessionScope(db):
db.session.add(l_qb)
db.session.add(mb_qb)
db.session.add(mi_qb)
db.session.add(mr3_qb)
db.session.add(mr4_qb)
db.session.add(mr6_qb)
db.session.commit()
def mock_tnth_questionnairebanks():
for name in (symptom_tracker_instruments):
TestCase.add_questionnaire(name=name)
# Symptom Tracker Baseline
self_management = INTERVENTION.SELF_MANAGEMENT
st_qb = QuestionnaireBank(
name='symptom_tracker',
classification='baseline',
intervention_id=self_management.id,
start='{"days": 0}',
expired='{"months": 3}'
)
for rank, instrument in enumerate(symptom_tracker_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
st_qb.questionnaires.append(qbq)
# Symptom Tracker Recurrence
st_recur = Recur(
start='{"months": 3}', cycle_length='{"months": 3}',
termination='{"months": 27}')
with SessionScope(db):
db.session.add(st_qb)
db.session.add(st_recur)
db.session.commit()
self_management = INTERVENTION.SELF_MANAGEMENT
st_recur_qb = QuestionnaireBank(
name='symptom_tracker_recurring',
classification='recurring',
intervention_id=self_management.id,
start='{"days": 0}',
expired='{"months": 3}',
recurs=[st_recur]
)
for rank, instrument in enumerate(symptom_tracker_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
st_recur_qb.questionnaires.append(qbq)
with SessionScope(db):
db.session.add(st_recur_qb)
db.session.commit()
class TestQuestionnaireSetup(TestCase):
"Base for test classes needing mock questionnaire setup"
eproms_or_tnth = 'eproms' # modify in child class to test `tnth`
def setUp(self):
super(TestQuestionnaireSetup, self).setUp()
mock_questionnairebanks(self.eproms_or_tnth)
class TestAggregateResponses(TestQuestionnaireSetup):
def test_aggregate_response_timepoints(self):
# generate a few mock qr's from various qb iterations, confirm
# time points.
nineback, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=nineback, local_metastatic='metastatic')
instrument_id = 'eortc'
for months_back in (0, 3, 6, 9):
backdate, _ = associative_backdate(
now=now, backdate=relativedelta(months=months_back))
mock_qr(instrument_id=instrument_id, timestamp=backdate)
# add staff user w/ same org association for bundle creation
staff = self.add_user(username='staff')
staff.organizations.append(Organization.query.filter(
Organization.name == 'metastatic').one())
self.promote_user(staff, role_name=ROLE.STAFF.value)
staff = db.session.merge(staff)
bundle = aggregate_responses(
instrument_ids=[instrument_id], current_user=staff)
expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'}
found = [i['timepoint'] for i in bundle['entry']]
assert set(found) == expected
def test_site_ids(self):
# bless org w/ expected identifier type
wanted_system = 'http://pcctc.org/'
unwanted_system = 'http://other.org/'
self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system]
id_value = '146-11'
org = Organization.query.filter(
Organization.name == 'metastatic').one()
id1 = Identifier(
system=wanted_system, use='secondary', value=id_value)
id2 = Identifier(
system=unwanted_system, use='secondary', value=id_value)
org.identifiers.append(id1)
org.identifiers.append(id2)
with SessionScope(db):
db.session.commit()
nineback, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=nineback, local_metastatic='metastatic')
instrument_id = 'eortc'
mock_qr(instrument_id=instrument_id)
# add staff user w/ same org association for bundle creation
staff = self.add_user(username='staff')
staff.organizations.append(Organization.query.filter(
Organization.name == 'metastatic').one())
self.promote_user(staff, role_name=ROLE.STAFF.value)
staff = db.session.merge(staff)
bundle = aggregate_responses(
instrument_ids=[instrument_id], current_user=staff)
id1 = db.session.merge(id1)
assert 1 == len(bundle['entry'])
assert (1 ==
len(bundle['entry'][0]['subject']['careProvider']))
assert (1 ==
len(bundle['entry'][0]['subject']['careProvider'][0]
['identifier']))
assert (id1.as_fhir() ==
bundle['entry'][0]['subject']['careProvider'][0]
['identifier'][0])
class TestQB_Status(TestQuestionnaireSetup):
def test_qnr_id(self):
qb = QuestionnaireBank.query.first()
mock_qr(
instrument_id='irondemog',
status='in-progress', qb=qb,
doc_id='two11')
qb = db.session.merge(qb)
result = qnr_document_id(
subject_id=TEST_USER_ID,
questionnaire_bank_id=qb.id,
questionnaire_name='irondemog',
iteration=None,
status='in-progress')
assert result == 'two11'
def test_qnr_id_missing(self):
qb = QuestionnaireBank.query.first()
qb = db.session.merge(qb)
with pytest.raises(NoResultFound):
result = qnr_document_id(
subject_id=TEST_USER_ID,
questionnaire_bank_id=qb.id,
questionnaire_name='irondemog',
iteration=None,
status='in-progress')
def test_enrolled_in_metastatic(self):
"""metastatic should include baseline and indefinite"""
self.bless_with_basics(local_metastatic='metastatic')
user = db.session.merge(self.test_user)
a_s = QB_Status(user=user, as_of_date=now)
assert a_s.enrolled_in_classification('baseline')
assert a_s.enrolled_in_classification('indefinite')
def test_enrolled_in_localized(self):
"""localized should include baseline but not indefinite"""
self.bless_with_basics(local_metastatic='localized')
user = db.session.merge(self.test_user)
a_s = QB_Status(user=user, as_of_date=now)
assert a_s.enrolled_in_classification('baseline')
assert not a_s.enrolled_in_classification('indefinite')
def test_localized_using_org(self):
self.bless_with_basics(local_metastatic='localized', setdate=now)
self.test_user = db.session.merge(self.test_user)
# confirm appropriate instruments
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert (set(a_s.instruments_needing_full_assessment()) ==
localized_instruments)
def test_localized_on_time(self):
# User finished both on time
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(instrument_id='eproms_add', timestamp=now)
mock_qr(instrument_id='epic26', timestamp=now)
mock_qr(instrument_id='comorb', timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.completed
# confirm appropriate instruments
assert not a_s.instruments_needing_full_assessment('all')
def test_localized_inprogress_on_time(self):
# User finished both on time
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(
instrument_id='eproms_add', status='in-progress',
doc_id='eproms_add', timestamp=now)
mock_qr(
instrument_id='epic26', status='in-progress', doc_id='epic26',
timestamp=now)
mock_qr(
instrument_id='comorb', status='in-progress', doc_id='comorb',
timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.in_progress
# confirm appropriate instruments
assert not a_s.instruments_needing_full_assessment()
assert set(a_s.instruments_in_progress()) == localized_instruments
def test_localized_in_process(self):
# User finished one, time remains for other
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(instrument_id='eproms_add', timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.in_progress
# confirm appropriate instruments
assert (localized_instruments -
set(a_s.instruments_needing_full_assessment('all')) ==
{'eproms_add'})
assert not a_s.instruments_in_progress()
def test_metastatic_on_time(self):
# User finished both on time
self.bless_with_basics(
local_metastatic='metastatic', setdate=now)
for i in metastatic_baseline_instruments:
mock_qr(instrument_id=i, timestamp=now)
mi_qb = QuestionnaireBank.query.filter_by(
name='metastatic_indefinite').first()
mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.completed
# shouldn't need full or any inprocess
assert not a_s.instruments_needing_full_assessment('all')
assert not a_s.instruments_in_progress('all')
def test_metastatic_due(self):
# hasn't taken, but still in OverallStatus.due period
self.bless_with_basics(local_metastatic='metastatic', setdate=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.due
# confirm list of expected intruments needing attention
assert (metastatic_baseline_instruments ==
set(a_s.instruments_needing_full_assessment()))
assert not a_s.instruments_in_progress()
# metastatic indefinite should also be 'due'
assert (metastatic_indefinite_instruments ==
set(a_s.instruments_needing_full_assessment('indefinite')))
assert not a_s.instruments_in_progress('indefinite')
def test_localized_overdue(self):
# if the user completed something on time, and nothing else
# is due, should see the thank you message.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# backdate so the baseline q's have expired
mock_qr(
instrument_id='epic26', status='in-progress', timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.partially_completed
# with all q's expired,
# instruments_needing_full_assessment and instruments_in_progress
# should be empty
assert not a_s.instruments_needing_full_assessment()
assert not a_s.instruments_in_progress()
def test_localized_as_of_date(self):
# backdating consent beyond expired and the status lookup date
# within a valid window should show available assessments.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# backdate so the baseline q's have expired
mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26',
timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
as_of_date = backdate + relativedelta(days=2)
a_s = QB_Status(user=self.test_user, as_of_date=as_of_date)
assert a_s.overall_status == OverallStatus.in_progress
# with only epic26 started, should see results for both
# instruments_needing_full_assessment and instruments_in_progress
assert ({'eproms_add', 'comorb'} ==
set(a_s.instruments_needing_full_assessment()))
assert ['doc-26'] == a_s.instruments_in_progress()
def test_metastatic_as_of_date(self):
# backdating consent beyond expired and the status lookup date
# within a valid window should show available assessments.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3))
self.bless_with_basics(setdate=backdate, local_metastatic='metastatic')
# backdate so the baseline q's have expired
mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23',
timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
as_of_date = backdate + relativedelta(days=2)
a_s = QB_Status(user=self.test_user, as_of_date=as_of_date)
assert a_s.overall_status == OverallStatus.in_progress
# with only epic26 started, should see results for both
# instruments_needing_full_assessment and instruments_in_progress
assert ['doc-23'] == a_s.instruments_in_progress()
assert a_s.instruments_needing_full_assessment()
def test_initial_recur_due(self):
# backdate so baseline q's have expired, and we within the first
# recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert (set(a_s.instruments_needing_full_assessment()) ==
metastatic_3)
# confirm iteration 0
assert a_s.current_qbd().iteration == 0
def test_2nd_recur_due(self):
# backdate so baseline q's have expired, and we within the 2nd
# recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_3
# however, we should be looking at iteration 2 (zero index)!
assert a_s.current_qbd().iteration == 1
def test_initial_recur_baseline_done(self):
# backdate to be within the first recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, days=2))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
# add baseline QNRs, as if submitted nearly 3 months ago, during
# baseline window
backdated = nowish - relativedelta(months=2, days=25)
baseline = QuestionnaireBank.query.filter_by(
name='metastatic').one()
for instrument in metastatic_baseline_instruments:
mock_qr(instrument, qb=baseline, timestamp=backdated)
self.test_user = db.session.merge(self.test_user)
# Check status during baseline window
a_s_baseline = QB_Status(
user=self.test_user, as_of_date=backdated)
assert a_s_baseline.overall_status == OverallStatus.completed
assert not a_s_baseline.instruments_needing_full_assessment()
# Whereas "current" status for the initial recurrence show due.
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_3
def test_secondary_recur_due(self):
# backdate so baseline q's have expired, and we are within the
# second recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=6, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# w/ no questionnaires submitted
# should include all from second recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_4
def test_batch_lookup(self):
self.login()
self.bless_with_basics()
response = self.client.get(
'/api/consent-assessment-status?user_id=1&user_id=2')
assert response.status_code == 200
assert len(response.json['status']) == 1
assert (
response.json['status'][0]['consents'][0]['assessment_status'] ==
str(OverallStatus.expired))
def test_none_org(self):
# check users w/ none of the above org
self.test_user = db.session.merge(self.test_user)
self.test_user.organizations.append(Organization.query.get(0))
self.login()
self.bless_with_basics(
local_metastatic='metastatic', setdate=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.due
def test_boundary_overdue(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.overdue
def test_boundary_expired(self):
"At expired, should be expired"
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.expired
def test_boundary_in_progress(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
for instrument in localized_instruments:
mock_qr(
instrument_id=instrument, status='in-progress',
timestamp=nowish)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.in_progress
def test_boundary_recurring_in_progress(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=6, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
mr3_qb = QuestionnaireBank.query.filter_by(
name='metastatic_recurring3').first()
for instrument in metastatic_3:
mock_qr(
instrument_id=instrument, status='in-progress',
qb=mr3_qb, timestamp=nowish, iteration=0)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.in_progress
def test_boundary_in_progress_expired(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
for instrument in localized_instruments:
mock_qr(
instrument_id=instrument, status='in-progress',
timestamp=nowish-relativedelta(days=1))
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.partially_completed
def test_all_expired_old_tx(self):
self.login()
# backdate outside of baseline window (which uses consent date)
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=4, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# provide treatment date outside of all recurrences
tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000)
self.add_procedure(code='7', display='Focal therapy',
system=ICHOM, setdate=tx_date)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.expired
class TestTnthQB_Status(TestQuestionnaireSetup):
"""Tests with Tnth QuestionnaireBanks"""
eproms_or_tnth = 'tnth'
def test_no_start_date(self):
# W/O a biopsy (i.e. event start date), no questionnaries
self.promote_user(role_name=ROLE.PATIENT.value)
# toggle default setup - set biopsy false for test user
self.login()
self.test_user = db.session.merge(self.test_user)
self.test_user.save_observation(
codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='final', issued=now)
qstats = QB_Status(self.test_user, now)
assert not qstats.current_qbd()
assert not qstats.enrolled_in_classification("baseline")
| 2.078125 | 2 |
resource.py | HADESAngelia/NumberedImageComparation | 1 | 12793765 | '''
File: \resource.py
Project: NumberRecongization
Created Date: Monday March 26th 2018
Author: Huisama
-----
Last Modified: Saturday March 31st 2018 11:08:21 pm
Modified By: Huisama
-----
Copyright (c) 2018 Hui
'''
import os
import scipy.misc as scm
import random
import numpy as np
import PIL
# STD_WIDTH = 667
# STD_HEIGHT = 83
STD_WIDTH = 252
STD_HEIGHT = 40
import matplotlib.pyplot as plt
'''
This class stands for dataset and provides data processing oparations
'''
class DataSet(object):
def __init__(self, data_dir, batch_size):
self.data_dir = data_dir
self.batch_size = batch_size
self.train_set_ratio = 0.8
self.validate_set_ratio = 0.1
'''
Get mean width and height of dataset
'''
def get_data_mean_size(self):
full_width, full_height = 0, 0
count = 0
def dummy(self, dir, file):
nonlocal full_width, full_height, count
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
width, height = self.get_size(fullfile)
full_width += width
full_height += height
print("%s, %s" % (width, height))
count += 1
self.lookup_dataset_dir(dummy)
return full_width / count, full_height / count
'''
Get width and height of a single image
'''
def get_size(self, image_file_path):
img = scm.imread(image_file_path)
return img.shape[1], img.shape[0]
'''
Load dataset
'''
def load_dataset(self):
self.neg_data = []
self.pos_data = []
self.poscount = 0
self.negcount = 0
def dummy(self, dir, file):
if file == 'dataset.txt':
# open and read in
with open(os.path.join(self.data_dir, dir, file)) as file:
for line in file:
newline = line.strip()
splittext = newline.split('\t')
if int(splittext[2]) == 1:
self.pos_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.poscount += 1
else:
self.neg_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.negcount += 1
self.lookup_dataset_dir(dummy)
# print("negcount: %d, poscount: %d" % (self.negcount, self.poscount))
return True
'''
Check if image has 4 channel
'''
def check_image_channels(self):
def dummy(self, dir, file):
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
img = scm.imread(fullfile)
if img.shape[2] != 3:
print("Wrong image: %d", fullfile)
self.lookup_dataset_dir(dummy)
'''
Generate dataset after loading dataset
'''
def generate_dataset(self):
random.shuffle(self.neg_data)
random.shuffle(self.pos_data)
# total = len(self.data)
pos_total = len(self.pos_data)
pos_train_size = int(pos_total * self.train_set_ratio)
pos_validate_size = int(pos_total * self.validate_set_ratio)
# pos_test_size = pos_total - pos_train_size - pos_validate_size
neg_total = len(self.neg_data)
neg_train_size = int(neg_total * self.train_set_ratio)
neg_validate_size = int(neg_total * self.validate_set_ratio)
# neg_test_size = neg_total - neg_train_size - neg_validate_size
self.batch_index = 0
self.pos_train_set = self.pos_data[0 : pos_train_size]
pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size]
pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total]
self.neg_train_set = self.neg_data[0 : neg_train_size]
neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size]
neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total]
dec = len(neg_validation_set) - len(pos_validation_set)
for _ in range(dec):
pos_validation_set.append(random.choice(self.pos_data))
dec = len(neg_test_set) - len(pos_test_set)
for _ in range(dec):
pos_test_set.append(random.choice(self.pos_data))
self.validation_set = []
self.validation_set.extend(pos_validation_set)
self.validation_set.extend(neg_validation_set)
self.test_set = []
self.test_set.extend(pos_test_set)
self.test_set.extend(neg_test_set)
'''
Ergodic files in dataset dir
'''
def lookup_dataset_dir(self, callback):
for _, dirs, _ in os.walk(self.data_dir):
for dir in dirs:
for _, _, files in os.walk(os.path.join(self.data_dir, dir)):
for file in files:
callback(self, dir, file)
'''
Get iamge data
'''
def get_image_data(self, tp):
image1, image2 = scm.imread(tp[0]), scm.imread(tp[1])
newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH)))
newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH)))
# img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis]
img_comb = np.dstack((newimg1, newimg2))
return img_comb / 255.0
'''
Get a batch of dataset
'''
def next_batch(self, batch_size):
random_neg = batch_size // 2
random_pos = batch_size - random_neg
org_pos_data = []
org_neg_data = []
for _ in range(random_pos):
org_pos_data.append(random.choice(self.pos_train_set))
for _ in range(random_neg):
org_neg_data.append(random.choice(self.neg_train_set))
pos_data = list(map(self.get_image_data, org_pos_data))
pos_labels = list(map(lambda e: e[2], org_pos_data))
neg_data = list(map(self.get_image_data, org_neg_data))
neg_labels = list(map(lambda e: e[2], org_neg_data))
pos_data.extend(neg_data)
pos_labels.extend(neg_labels)
return np.array(pos_data), np.array(pos_labels)
'''
Get validation dataset
'''
def get_validation_set(self):
data = np.array(list(map(self.get_image_data, self.validation_set)))
labels = np.array(list(map(lambda e: e[2], self.validation_set)))
return data, labels
'''
Get test dataset
'''
def get_test_set(self):
data = np.array(list(map(self.get_image_data, self.test_set)))
labels = np.array(list(map(lambda e: e[2], self.test_set)))
return data, labels
# obj = DataSet('./Pic', 8)
# obj.check_image_channels()
# obj.load_dataset()
# obj.generate_dataset()
# data, labels = obj.next_batch(8)
# while done != True:
# print(data[0][0].dtype)
# data, labels, done = obj.next_batch() | 2.546875 | 3 |
LeetCode/Python/container_with_most_water.py | wh-acmer/minixalpha-acm | 0 | 12793766 | #!/usr/bin/env python
#coding: utf-8
class Solution:
# @return an integer
def maxArea(self, height):
low, high = 0, len(height) - 1
max_area = 0
while low < high:
max_area = max(max_area,
(high - low) * min(height[low], height[high]))
if height[low] < height[high]:
low += 1
else:
high -= 1
return max_area
| 3.546875 | 4 |
data_prepare.py | zhengsl/SmartInvetory | 0 | 12793767 | <filename>data_prepare.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xlrd
import pymongo
import pymysql
from datetime import datetime, timedelta
import traceback
month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}
year_dict = {'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017}
def read_from_excel_to_mongo(file):
mongo_client = pymongo.MongoClient()
db_test_01 = mongo_client.db_test_01
with xlrd.open_workbook(file) as excel:
table = excel.sheet_by_index(0)
row_num = table.nrows - 1
group_size = 10000
group_num = row_num // group_size
last_group = row_num - group_num * group_size
if last_group != 0:
group_num = group_num + 1
for i in range(group_num):
records = []
iter_size = group_size if i != group_num - 1 else last_group
for j in range(iter_size):
index = j + i * group_size + 1
row_v = table.row_values(index)
record = {
"owner": row_v[0],
"code": row_v[1],
"slip_code": row_v[2],
"create_time": gen_datetime(row_v[3]),
"province": row_v[4],
"city": row_v[5],
"sku_code": row_v[6],
"bar_code": row_v[7],
"wms_sku_color_original": row_v[8],
"wms_sku_size_original": row_v[9],
"sku_categories_id": int(row_v[10]) if row_v[10] != '' else -1,
"sku_type_id": int(row_v[11]) if row_v[11] != '' else -1,
"supplier_code": row_v[12],
"quantity": int(row_v[13])
}
print("{}: {}".format(index, record.values()))
records.append(record)
print("Bulk write to MongoDB...")
db_test_01.source.insert_many(records)
mongo_client.close()
def gen_datetime(date_time_str):
dt_array = date_time_str.split(' ')
date_array = dt_array[0].split('-')
day = int(date_array[0])
month = month_dict[date_array[1]]
year = year_dict[date_array[2]]
time_array = dt_array[1].split('.')
hour = int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0])
minute = int(time_array[1])
second = int(time_array[2])
return datetime(year, month, day, hour, minute, second)
def read_from_excel_to_mysql(file):
start = 1
mysqldb = pymysql.connect(host="localhost", user="root", passwd="<PASSWORD>", db="nike_sales", charset="utf8")
cursor = mysqldb.cursor()
sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color,
sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage,
msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt)
values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1}
try:
with xlrd.open_workbook(file) as excel:
table = excel.sheet_by_index(0)
for rowIndex in range(start, table.nrows):
row_v = table.row_values(rowIndex)
print(row_v)
cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6],
row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13],
genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]),
get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21])))
print("ROW NO.{} inserted".format(rowIndex))
mysqldb.commit()
print("Completed")
except pymysql.err.DataError:
traceback.print_exc()
mysqldb.commit()
print("ROW NO.{} to be inserted".format(rowIndex+1))
finally:
mysqldb.close()
def read_from_excel_to_mysql_11(file, year):
start = 1
mysqldb = pymysql.connect(host="localhost", user="root", passwd="<PASSWORD>", db="nike_sales", charset="utf8")
cursor = mysqldb.cursor()
sql = '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size,
genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt)
values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''
genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1}
try:
with xlrd.open_workbook(file) as excel:
table = excel.sheet_by_index(0)
for rowIndex in range(start, table.nrows):
row_v = table.row_values(rowIndex)
print(row_v)
cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6],
row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11],
get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]),
get_float(row_v[16])))
print("ROW NO.{} inserted".format(rowIndex))
mysqldb.commit()
print("Completed")
except pymysql.err.DataError:
traceback.print_exc()
#mysqldb.commit()
print("ROW NO.{} to be inserted".format(rowIndex+1))
finally:
mysqldb.close()
def read_from_txt_to_mysql(file, year, start=0):
mysqldb = pymysql.connect(host="localhost", user="root", passwd="<PASSWORD>", db="nike_sales", charset="utf8")
cursor = mysqldb.cursor()
sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color,
sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty)
values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year)
try:
count = 0
with open(file, mode='r', encoding='utf-8') as source:
line = source.readline()
while line and line.strip() != '':
line = source.readline()
if line and line.strip() != '':
count += 1
ignore = True
print("{}: {}".format(count, line))
if count >= start:
line = line.replace('?', '').replace('?', '')
data = line.split('\t')
cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6],
data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13],
get_int(data[14])))
ignore = False
if not ignore and count % 1000000 == 0:
mysqldb.commit()
else:
break
mysqldb.commit()
except Exception:
traceback.print_exc()
finally:
mysqldb.close()
def getsqldatestr(original):
delta = int(original) - 41758
return datetime(2014, 4, 29) + timedelta(days=delta)
def getsqldatefromstr(dstr):
mdy = dstr.split('/')
return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1]))
def get_int(original):
try:
return int(original)
except:
return 0
def get_float(original):
try:
return float(original)
except:
return 0
if __name__ == "__main__":
#read_from_excel_to_mysql_11("/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx", 17)
read_from_txt_to_mysql("/Users/leon/Desktop/NIKE/NIKE17年数据.txt", 2017, 5000001)
| 2.953125 | 3 |
src/kcri/bap/data.py | zwets/kcri-cge-bap | 0 | 12793768 | <reponame>zwets/kcri-cge-bap
#!/usr/bin/env python3
#
# kcri.bap.data
#
# Defines the data structures that are shared across the BAP services.
#
import os, enum
from datetime import datetime
from pico.workflow.blackboard import Blackboard
### BAPBlackboard class
#
# Wraps the generic Blackboard with an API that adds getters and putters for
# data shared between BAP services, so they're not randomly grabbing around
# in bags of untyped data.
class BAPBlackboard(Blackboard):
'''Adds to the generic Blackboard getters and putters specific to the shared
data definitions in the current BAP.'''
def __init__(self, verbose=False):
super().__init__(verbose)
# BAP-level methods
def start_run(self, service, version, user_inputs):
self.put('bap/run_info/service', service)
self.put('bap/run_info/version', version)
self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds'))
self.put('bap/user_inputs', user_inputs)
def end_run(self, state):
start_time = datetime.fromisoformat(self.get('bap/run_info/time/start'))
end_time = datetime.now()
self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds'))
self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds())
self.put('bap/run_info/status', state)
def put_user_input(self, param, value):
return self.put('bap/user_inputs/%s' % param, value)
def get_user_input(self, param, default=None):
return self.get('bap/user_inputs/%s' % param, default)
def add_warning(self, warning):
'''Stores a warning on the 'bap' top level (note: use service warning instead).'''
self.append_to('bap/warnings', warning)
# Standard methods for BAP common data
def put_db_root(self, path):
'''Stores the root of the BAP services databases.'''
self.put_user_input('db_root', path)
def get_db_root(self):
'''Retrieve the user_input/db_root, this must be set.'''
db_root = self.get_user_input('db_root')
if not db_root:
raise Exception("database root path is not set")
elif not os.path.isdir(db_root):
raise Exception("db root path is not a directory: %s" % db_root)
return os.path.abspath(db_root)
# Sample ID
def put_sample_id(self, id):
'''Store id as the sample id in the summary.'''
self.put('bap/summary/sample_id', id)
def get_sample_id(self):
return self.get('bap/summary/sample_id', 'unknown')
# Contigs and reads
def put_fastq_paths(self, paths):
'''Stores the fastqs path as its own (pseudo) user input.'''
self.put_user_input('fastqs', paths)
def get_fastq_paths(self, default=None):
return self.get_user_input('fastqs', default)
def put_user_contigs_path(self, path):
'''Stores the contigs path as its own (pseudo) user input.'''
self.put_user_input('contigs', path)
def get_user_contigs_path(self, default=None):
return self.get_user_input('contigs', default)
def put_assembled_contigs_path(self, path):
'''Stores the path to the computed contigs.'''
self.put('bap/summary/contigs', path)
def get_assembled_contigs_path(self, default=None):
return self.get('bap/summary/contigs', default)
def put_graph_path(self, path):
'''Stores the path to the GFA file.'''
self.put('bap/summary/graph', path)
def get_graph_path(self, default=None):
return self.get('bap/summary/graph', default)
# Species
def put_user_species(self, lst):
'''Stores list of species specified by user.'''
self.put_user_input('species', lst)
def get_user_species(self, default=None):
return self.get_user_input('species', default)
def add_detected_species(self, lst):
self.append_to('bap/summary/species', lst, True)
def get_detected_species(self, default=None):
return self.get('bap/summary/species', default)
def get_species(self, default=None):
ret = list()
ret.extend(self.get_user_species(list()))
ret.extend(self.get_detected_species(list()))
return ret if ret else default
# Reference
def put_closest_reference(self, acc, desc):
'''Stores the accession and description of closest reference.'''
self.put('bap/summary/closest/accession', acc)
self.put('bap/summary/closest/name', desc)
def put_closest_reference_path(self, path):
'''Stores the path to the closest reference genome.'''
self.put('bap/summary/closest/path', path)
def put_closest_reference_length(self, length):
'''Stores the length of the closest reference genome.'''
self.put('bap/summary/closest/length', length)
def get_closest_reference(self, default=None):
'''Returns dict with fields accession, name, path, length, or the default.'''
return self.get('bap/summary/closest', default)
def get_closest_reference_path(self, default=None):
return self.get_closest_reference({}).get('path', default)
def get_closest_reference_length(self, default=None):
return self.get_closest_reference({}).get('length', default)
# MLST
def add_mlst(self, st, loci, alleles):
str = "%s[%s]" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles))))
self.append_to('bap/summary/mlst', str, True)
def get_mlsts(self):
return sorted(self.get('bap/summary/mlst', []))
# Plasmids
def put_user_plasmids(self, lst):
'''Stores list of plasmids specified by user.'''
self.put_user_input('plasmids', lst)
def get_user_plasmids(self, default=None):
return sorted(self.get_user_input('plasmids', default))
def add_detected_plasmid(self, plasmid):
self.append_to('bap/summary/plasmids', plasmid, True)
def get_detected_plasmids(self, default=None):
return sorted(self.get('bap/summary/plasmids', default))
def get_plasmids(self, default=None):
ret = list()
ret.extend(self.get_user_plasmids(list()))
ret.extend(self.get_detected_plasmids(list()))
return ret if ret else default
def add_pmlst(self, profile, st):
str = "%s%s" % (profile, st)
self.append_to('bap/summary/pmlsts', str)
def get_pmlsts(self):
return sorted(self.get('bap/summary/pmlsts', []))
# Virulence
def add_detected_virulence_gene(self, gene):
self.append_to('bap/summary/virulence_genes', gene, True)
def get_virulence_genes(self):
return sorted(self.get('bap/summary/virulence_genes', []))
# Resistance
def add_amr_gene(self, gene):
self.append_to('bap/summary/amr_genes', gene, True)
def get_amr_genes(self):
return sorted(self.get('bap/summary/amr_genes', []))
def add_amr_classes(self, classes):
self.append_to('bap/summary/amr_classes', classes, True)
def get_amr_classes(self):
return sorted(self.get('bap/summary/amr_classes', []))
def add_amr_phenotype(self, pheno):
self.append_to('bap/summary/amr_phenotypes', pheno, True)
def get_amr_phenotypes(self):
return sorted(self.get('bap/summary/amr_phenotypes', []))
def add_amr_mutation(self, mut):
self.append_to('bap/summary/amr_mutations', mut, True)
def get_amr_mutations(self):
return sorted(self.get('bap/summary/amr_mutations', []))
# cgMLST
def add_cgmlst(self, scheme, st, pct):
str = '%s:%s(%s%%)' % (scheme, st, pct)
self.append_to('bap/summary/cgmlst', str, True)
def get_cgmlsts(self):
return sorted(self.get('bap/summary/cgmlst', []))
| 2.140625 | 2 |
tensorflow/script/evaluation/building_iou_w_class_acc.py | christinazavou/ANNFASS_Structure | 0 | 12793769 | import os
import json
import sys
from tqdm import tqdm
from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \
nearest_neighbour_of_face_centers
from iou_calculations import *
# BuildNet directories
BUILDNET_BASE_DIR = os.path.join(os.sep, "media", "maria", "BigData1", "Maria", "buildnet_data_2k")
assert (os.path.isdir(BUILDNET_BASE_DIR))
BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, "flippedNormal_unit_obj_withtexture")
assert (os.path.isdir(BUILDNET_OBJ_DIR))
BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "nocolor")
assert (BUILDNET_PTS_DIR)
BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "point_labels_32")
assert (BUILDNET_PTS_LABELS_DIR)
BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "faceindex")
assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR))
BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "component_label_32")
assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR))
BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, "dataset")
assert (os.path.isdir(BUILDNET_SPLITS_DIR))
BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, "test_split.txt")
assert (os.path.isfile(BUILDNET_TEST_SPLIT))
# Network results directory
NET_RESULTS_DIR = sys.argv[1]
assert (os.path.isdir(NET_RESULTS_DIR))
# Create directories for best results
BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, "best_points")
os.makedirs(BEST_POINTS_DIR, exist_ok=True)
BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, "best_triangles")
os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True)
BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, "best_comp")
os.makedirs(BEST_COMP_DIR, exist_ok=True)
# Create directories for aggregated mesh features
FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, "face_feat_from_tr")
os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True)
FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, "face_feat_from_comp")
os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True)
def classification_accuracy(ground, prediction, face_area=None):
"""
Classification accuracy
:param ground: N x 1, numpy.ndarray(int)
:param prediction: N x 1, numpy.ndarray(int)
:param face_area: N x 1, numpy.ndarray(float)
:return:
accuracy: float
"""
prediction = np.copy(prediction)
ground = np.copy(ground)
non_zero_idx = np.squeeze(ground != 0).nonzero()[0]
ground = ground[non_zero_idx]
prediction = prediction[non_zero_idx]
if face_area is not None:
face_area = np.copy(face_area)
face_area = face_area[non_zero_idx]
accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area)
accuracy = accuracy[0]
else:
accuracy = np.sum(ground == prediction) / float(len(ground))
return accuracy
def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False):
"""
Transfer point predictions to triangles and components through avg pooling
:param vertices: N x 3, numpy.ndarray(float)
:param faces: M x 3, numpy.ndarray(int)
:param components: M x 1, numpy.ndarray(int)
:param points: K x 3, numpy.ndarray(float)
:param point_feat: K x 31, numpy.ndarray(float)
:param point_face_index: K x 1, numpy.ndarray(int)
:param max_pool: bool
:return:
face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int)
face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int)
face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float)
face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float)
face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int)
face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int)
"""
n_components = len(np.unique(components))
face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))
face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))
comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1]))
if max_pool:
face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool)
face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool)
comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool)
face_point_index = {}
# Find faces that have no corresponding points
sampled = set(point_face_index.flatten())
unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points
face_centers = compute_face_centers(faces, unsampled, vertices)
# Transfer point predictions to triangles
# Find nearest point and assign its point feature to each unsampled face
nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index,
point_feat, points, unsampled)
if max_pool: # unsampled faces have only one point, so max == avg. feat. , that of the nearest point
face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool)
# Use avg pooling for sampled faces
for face in sampled:
mask = np.squeeze(point_face_index == face)
face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0)
if max_pool:
# Use max pooling also
face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0)
face_point_index[face] = mask.nonzero()[0].tolist()
# Transfer point predictions to components
for comp_idx in range(comp_feat_avg_pool.shape[0]):
face_idx = np.squeeze(components == comp_idx).nonzero()[0]
point_idx = []
for idx in face_idx:
try:
point_idx.extend(face_point_index[int(idx)])
except:
point_idx.append(face_point_index[int(idx)])
comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0)
face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx]
if max_pool:
comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0)
face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx]
face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:,
np.newaxis] + 1 # we exclude undetermined (label 0) during training
face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1
if max_pool:
face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1
face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1
return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \
face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool
return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \
face_feat_from_comp_avg_pool
def get_split_models(split_fn):
"""
Read split.txt file and return model names
:param split_fn:
:return:
models_fn: list(str)
"""
models_fn = []
with open(split_fn, 'r') as fin:
for line in fin:
models_fn.append(line.strip())
return models_fn
def get_point_cloud_data(model_name):
"""
Get point cloud data needed for evaluation
:param model_name: str
:return:
points: N x 3, numpy.ndarray(float)
point_gt_labels: N x 1, numpy.ndarray(int)
point_pred_labels: N x 1, numpy.ndarray(int)
point_pred_feat: N x 31, numpy.ndarray(float)
point_face_index: N x 1, numpy.ndarray(int)
"""
# Get points
points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + ".ply"))
# Get ground truth labels
with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + "_label.json"), 'r') as fin_json:
labels_json = json.load(fin_json)
point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis]
assert (points.shape[0] == point_gt_labels.shape[0])
# Get per point features (probabilities)
try:
point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + ".npy"))
except FileNotFoundError:
point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1))
assert (point_feat.shape[0] == point_gt_labels.shape[0])
assert (point_feat.shape[1] == (len(toplabels) - 1))
# Calculate pred label
point_pred_labels = np.argmax(point_feat, axis=1)[:,
np.newaxis] + 1 # we exclude undetermined (label 0) during training
assert (point_gt_labels.shape == point_pred_labels.shape)
# Get points face index
with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + ".txt"), 'r') as fin_txt:
point_face_index = fin_txt.readlines()
point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis]
assert (point_face_index.shape == point_gt_labels.shape)
return points, point_gt_labels, point_pred_labels, point_feat, point_face_index
def get_mesh_data_n_labels(model_name):
"""
Get mesh data needed for evaluation
:param model_name: str
:return:
vertices: N x 3, numpy.ndarray(float)
faces: M x 3, numpy.ndarray(int)
face_labels: M x 1, numpy.ndarray(int)
components: M x 1, numpy.ndarray(float)
face_area: M x 1, numpy.ndarray(float)
"""
# Load obj
vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + ".obj"))
# Calculate face area
faces -= 1
face_area = calculate_face_area(vertices=vertices, faces=faces)
assert (face_area.shape[0] == faces.shape[0])
# Read components to labels
with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + "_label.json"), 'r') as fin_json:
labels_json = json.load(fin_json)
face_labels = np.zeros_like(components)
for comp, label in labels_json.items():
face_labels[np.where(components == int(comp))[0]] = label
return vertices, faces, face_labels, components, face_area
def save_pred_in_json(labels, fn_json):
"""
Save labels in json format
:param labels: N x 1, numpy.ndarray(int)
:param fn_json: str
:return:
None
"""
# Convert numpy to dict
labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist()))
# Export json file
with open(fn_json, 'w') as fout_json:
json.dump(labels_json, fout_json)
if __name__ == "__main__":
top_k = 200
best_iou_model = np.zeros((top_k,))
best_iou_model[:] = 1e-9
best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \
[[] for _ in range(top_k)], \
[[] for _ in range(top_k)], \
[[] for _ in range(top_k)]
# Get model names
models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT)
point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \
mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {}
point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \
mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {}
print("Calculate part and shape IOU for point and mesh tracks")
for model_fn in tqdm(models_fn):
# Get point cloud data
points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn)
# Get mesh data
vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn)
# Infer face labels from point predictions
face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \
face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \
transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True)
# Calculate point building iou
point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels)
# Calculate mesh building iou
mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr,
face_area)
mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp,
face_area)
mesh_buildings_iou_from_tr_max_pool[model_fn] = \
get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area)
mesh_buildings_iou_from_comp_max_pool[model_fn] = \
get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area)
# Calculate classification accuracy
point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels)
mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr)
mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp)
mesh_buildings_acc_from_tr_max_pool[model_fn] = \
classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool)
mesh_buildings_acc_from_comp_max_pool[model_fn] = \
classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool)
# Save mesh feat data
np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + ".npy"), face_feat_from_tr.astype(np.float32))
np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + ".npy"), face_feat_from_comp.astype(np.float32))
# Save best and worst model
label_iou = mesh_buildings_iou_from_comp[model_fn]["label_iou"]
s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0
if s_iou > best_iou_model[-1]:
best_iou_model[top_k - 1] = s_iou
best_model_points_pred[top_k - 1] = point_pred_labels
best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr
best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp
best_model_fn[top_k - 1] = model_fn
sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist()
best_iou_model = best_iou_model[sort_idx]
best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx]
best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx]
best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx]
best_model_fn = [best_model_fn[idx] for idx in sort_idx]
best_iou_model -= 1 # restore to original values
# Calculate avg point part and shape IOU
point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou)
point_part_iou = get_part_iou(buildings_iou=point_buildings_iou)
mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr)
mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr)
mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp)
mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp)
mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)
mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)
mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)
mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)
point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc))
mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float(
len(mesh_buildings_acc_from_tr))
mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float(
len(mesh_buildings_acc_from_comp))
mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float(
len(mesh_buildings_acc_from_tr_max_pool))
mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float(
len(mesh_buildings_acc_from_comp_max_pool))
# Save best
buf = ''
for i in range(top_k):
print(best_iou_model[i]); print(best_model_fn[i])
buf += "Best model iou: " + str(best_iou_model[i]) + ", " + best_model_fn[i] + '\n'
save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + "_label.json"))
save_pred_in_json(best_model_triangles_pred[i],
os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + "_label.json"))
save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + "_label.json"))
# Log results
buf += "Point Classification Accuracy: " + str(np.round(point_acc * 100, 2)) + '\n' \
"Point Shape IoU: " + str(
np.round(point_shape_iou['all'] * 100, 2)) + '\n' \
"Point Part IoU: " + str(
np.round(point_part_iou['all'] * 100, 2)) + '\n' \
"Point Part IoU - FR: " + str(
np.round(point_part_iou['fr-part'] * 100, 2)) + '\n' \
"Per label point part IoU: " + ", ".join([label + ": " +
str(np.round(
point_part_iou[
label] * 100,
2)) for label in
toplabels.values() if
label != "undetermined"]) + '\n' \
"Average Pooling" + '\n' \
"---------------" + '\n' \
"Mesh Classification Accuracy From Triangles: " + str(
np.round(mesh_acc_from_tr * 100, 2)) + '\n' \
"Mesh Shape IoU From Triangles: " + str(
np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles: " + str(
np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles - FR: " + str(
np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\n' \
"Mesh Classification Accuracy From Comp: " + str(
np.round(mesh_acc_from_comp * 100, 2)) + '\n' \
"Mesh Shape IoU From Comp: " + str(
np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp: " + str(
np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp- FR: " + str(
np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\n' \
"Per label mesh part IoU from triangles: " + ", ".join(
[label + ": " +
str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Per label mesh part IoU from comp: " + ", ".join([label + ": " +
str(np.round(
mesh_part_iou_from_comp[
label][0] * 100, 2)) for
label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Max Pooling" + '\n' \
"-----------" + '\n' \
"Mesh Classification Accuracy From Triangles: " + str(
np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\n' \
"Mesh Shape IoU From Triangles: " + str(
np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles: " + str(
np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles - FR: " + str(
np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\n' \
"Mesh Classification Accuracy From Comp: " + str(
np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\n' \
"Mesh Shape IoU From Comp: " + str(
np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp: " + str(
np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp- FR: " + str(
np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\n' \
"Per label mesh part IoU from triangles: " + ", ".join(
[label + ": " +
str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Per label mesh part IoU from comp: " + ", ".join([label + ": " +
str(np.round(
mesh_part_iou_from_comp_max_pool[
label][0] * 100, 2)) for
label in toplabels.values() if
label != "undetermined"]) + '\n'
print(buf)
with open(os.path.join(NET_RESULTS_DIR, "results_log.txt"), 'w') as fout_txt:
fout_txt.write(buf)
| 2.171875 | 2 |
lc0253_meeting_rooms_ii.py | bowen0701/python-algorithms-data-structures | 8 | 12793770 | <gh_stars>1-10
"""Leetcode 253. Meeting Rooms II (Premium)
Medium
URL: https://leetcode.com/problems/meeting-rooms-ii
Given an array of meeting time intervals consisting of start and end times
[[s1,e1],[s2,e2],...] (si < ei),
find the minimum number of conference rooms required.
Example1
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Explanation:
We need two meeting rooms
room1: (0,30)
room2: (5,10),(15,20)
Example2
Input: intervals = [[7, 10], [2, 4]]
Output: 1
Explanation:
Only need one meeting room
"""
class SolutionSortEndMinHeapEnd(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
Time complexity: O(n*logn).
Space complexity: O(n).
"""
import heapq
if not intervals or not intervals[0]:
return 0
# Sort intervals by start time.
intervals.sort()
# Use min heap to store end times.
end_minhq = []
heapq.heappush(end_minhq, intervals[0][1])
for i in range(1, len(intervals)):
# If next start time is after min end time, remove min end time.
if intervals[i][0] >= end_minhq[0]:
heapq.heappop(end_minhq)
# Add next end time to min heap.
heapq.heappush(end_minhq, intervals[i][1])
return len(end_minhq)
class SolutionTimeCounterListInsort(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
Time complexity: O(n).
Space complexity: O(n).
"""
from bisect import insort
# Sort times and add increment/decrement counters by start/end.
time_counters = []
for i in range(len(intervals)):
insort(time_counters, (intervals[i][0], 1))
insort(time_counters, (intervals[i][1], -1))
cur_n, max_n = 0, 0
for t, counter in time_counters:
cur_n += counter
max_n = max(max_n, cur_n)
return max_n
def main():
# Output: 2.
intervals = [[0,30],[5,10],[15,20]]
print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals)
print SolutionTimeCounterListInsort().minMeetingRooms(intervals)
# Output: 1.
intervals = [[7, 10], [2, 4]]
print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals)
print SolutionTimeCounterListInsort().minMeetingRooms(intervals)
if __name__ == '__main__':
main()
| 3.578125 | 4 |
tests/fixtures/create_fixtures.py | brightway-lca/bw_default_backend | 0 | 12793771 | <reponame>brightway-lca/bw_default_backend<filename>tests/fixtures/create_fixtures.py<gh_stars>0
import bw_projects as bw
import bw_default_backend as backend
import pytest
@pytest.fixture(scope="function")
def basic_fixture():
NAME = "test-fixtures"
# if NAME in bw.projects:
# bw.projects.delete_project(NAME)
bw.projects.create_project(NAME, add_base_data=True)
biosphere_collection = backend.Collection.create(name="biosphere")
food_collection = backend.Collection.create(name="food")
first = backend.Flow.create(
name="an emission", kind="biosphere", collection=biosphere_collection, unit="kg"
)
second = backend.Flow.create(
name="another emission",
kind="biosphere",
collection=biosphere_collection,
unit="kg",
)
world = backend.Geocollection.get(name="world")
canada = backend.Location.create(geocollection=world, name="Canada")
lunch_flow = backend.Flow.create(
name="lunch food", unit="kg", kind="technosphere", collection=food_collection
)
lunch_activity = backend.Activity.create(
name="eating lunch",
collection=food_collection,
reference_product=lunch_flow,
location=canada,
)
backend.Exchange.create(
activity=lunch_activity, flow=lunch_flow, direction="production", amount=0.5
)
backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05)
dinner_flow = backend.Flow.create(
name="dinner main dish",
unit="kg",
kind="technosphere",
collection=food_collection,
)
dinner_activity = backend.Activity.create(
name="eating dinner",
collection=food_collection,
reference_product=dinner_flow,
location=canada,
)
backend.Exchange.create(
activity=dinner_activity, flow=dinner_flow, direction="production", amount=0.25
)
backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15)
method = backend.Method.create(name=("test", "method"))
backend.CharacterizationFactor.create(flow=first, method=method, amount=42)
backend.CharacterizationFactor.create(flow=second, method=method, amount=99)
| 2.078125 | 2 |
python/mxnet/ndarray/contrib.py | ijkguo/mxnet | 4 | 12793772 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import
"""Contrib NDArray API of MXNet."""
import math
from ..context import current_context
from ..random import uniform
from ..base import _as_list
from . import ndarray
try:
from .gen_contrib import *
except ImportError:
pass
__all__ = ["rand_zipfian"]
# pylint: disable=line-too-long
def rand_zipfian(true_classes, num_sampled, range_max, ctx=None):
"""Draw random samples from an approximately log-uniform or Zipfian distribution.
This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).
The elements of sampled_candidates are drawn with replacement from the base distribution.
The base distribution for this operator is an approximately log-uniform or Zipfian distribution:
P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
This sampler is useful when the true classes approximately follow such a distribution.
For example, if the classes represent words in a lexicon sorted in decreasing order of \
frequency. If your classes are not ordered by decreasing frequency, do not use this op.
Additionaly, it also returns the number of times each of the \
true classes and the sampled classes is expected to occur.
Parameters
----------
true_classes : NDArray
A 1-D NDArray of the target classes.
num_sampled: int
The number of classes to randomly sample.
range_max: int
The number of possible classes.
ctx : Context
Device context of output. Default is current context.
Returns
-------
samples: NDArray
The sampled candidate classes in 1-D `int64` dtype.
expected_count_true: NDArray
The expected count for true classes in 1-D `float64` dtype.
expected_count_sample: NDArray
The expected count for sampled candidates in 1-D `float64` dtype.
Examples
--------
>>> true_cls = mx.nd.array([3])
>>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)
>>> samples
[1 3 3 3]
<NDArray 4 @cpu(0)>
>>> exp_count_true
[ 0.12453879]
<NDArray 1 @cpu(0)>
>>> exp_count_sample
[ 0.22629439 0.12453879 0.12453879 0.12453879]
<NDArray 4 @cpu(0)>
"""
if ctx is None:
ctx = current_context()
log_range = math.log(range_max + 1)
rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx)
# make sure sampled_classes are in the range of [0, range_max)
sampled_classes = (rand.exp() - 1).astype('int64') % range_max
true_cls = true_classes.as_in_context(ctx).astype('float64')
expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled
# cast sampled classes to fp64 to avoid interget division
sampled_cls_fp64 = sampled_classes.astype('float64')
expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
expected_count_sampled = expected_prob_sampled * num_sampled
return sampled_classes, expected_count_true, expected_count_sampled
# pylint: enable=line-too-long
def foreach(body, data, init_states):
"""Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below:
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray:
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or a list of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or a list of NDArrays.
The output data concatenated from the output of all iterations.
states: a list of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
"""
def check_input(inputs, in_type, msg):
is_NDArray_or_list = True
if isinstance(inputs, list):
for i in inputs:
if not isinstance(i, in_type):
is_NDArray_or_list = False
break
else:
is_NDArray_or_list = isinstance(inputs, in_type)
assert is_NDArray_or_list, msg
check_input(data, ndarray.NDArray, "data should be an NDArray or a list of NDArrays")
check_input(init_states, ndarray.NDArray,
"init_states should be an NDArray or a list of NDArrays")
not_data_list = isinstance(data, ndarray.NDArray)
num_iters = data.shape[0] if not_data_list else data[0].shape[0]
states = init_states
outputs = []
for i in range(num_iters):
if not_data_list:
eles = data[i]
else:
eles = [d[i] for d in data]
outs, states = body(eles, states)
outs = _as_list(outs)
outputs.append(outs)
outputs = zip(*outputs)
tmp_outputs = []
for out in outputs:
tmp_outputs.append(ndarray.op.stack(*out))
outputs = tmp_outputs
if not_data_list and len(outputs) == 1:
outputs = outputs[0]
return (outputs, states)
| 2.125 | 2 |
bisect/41997.py | simonjayhawkins/pandas | 1 | 12793773 | # 1.3: (intended?) Behavior change with empty apply #41997
import pandas as pd
print(pd.__version__)
df = pd.DataFrame(columns=["a", "b"])
df["a"] = df.apply(lambda x: x["a"], axis=1)
print(df)
| 3.421875 | 3 |
tests/examples/minlplib/graphpart_2g-0088-0088.py | ouyang-w-19/decogo | 2 | 12793774 | # MINLP written by GAMS Convert at 04/21/18 13:52:22
#
# Equation counts
# Total E G L N X C B
# 65 65 0 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 193 1 192 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 385 193 192 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 +
67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 +
61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172
+ 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30
- 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8*
m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9*
m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 +
199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12*
m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 -
147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15
*m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 +
153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18*
m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 -
220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21*
m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287*
m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 +
47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48
- 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29*
m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158*
m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 +
18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60
- 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39
*m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788*
m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 +
67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46*
m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871*
m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 -
97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56
- 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55*
m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440*
m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520
*m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 +
97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65*
m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754*
m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 -
169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73*
m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652*
m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 -
35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79*
m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023*
m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107
- 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86
*m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 +
204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734*
m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116
- 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590*
m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 +
90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99*
m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125
+ 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134
*m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106*
m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 -
22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716*
m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112*
m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138
+ 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321*
m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120
*m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 -
39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240*
m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125*
m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151
- 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 +
76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764*
m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134*
m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139
- 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 -
154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148*
m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143*
m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 -
105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364*
m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149*
m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154
+ 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044*
m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155*
m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181
- 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331*
m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162*
m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167
- 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908
*m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170*
m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 -
72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177
*m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184
+ 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 +
59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192
, sense=minimize)
m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1)
m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1)
m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1)
m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1)
m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1)
m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1)
m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1)
m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1)
m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1)
m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1)
m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1)
m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1)
m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1)
m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1)
m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1)
m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1)
m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1)
m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1)
m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1)
m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1)
m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1)
m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1)
m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1)
m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1)
m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1)
m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1)
m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1)
m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1)
m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1)
m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1)
m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1)
m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1)
m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1)
m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1)
m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1)
m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 == 1)
m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 == 1)
m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 == 1)
m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 == 1)
m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 == 1)
m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 == 1)
m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 == 1)
m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 == 1)
m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 == 1)
m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 == 1)
m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 == 1)
m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 == 1)
m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 == 1)
m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 == 1)
m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 == 1)
m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 == 1)
m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 == 1)
m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 == 1)
m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 == 1)
m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 == 1)
m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 == 1)
m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 == 1)
m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 == 1)
m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 == 1)
m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 == 1)
m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 == 1)
m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 == 1)
m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 == 1)
m.c64 = Constraint(expr= m.b190 + m.b191 + m.b192 == 1)
| 1.632813 | 2 |
tests/conftest.py | verotel/pyzeebe | 0 | 12793775 | from random import randint
from threading import Event
from unittest.mock import patch, MagicMock
from uuid import uuid4
import pytest
from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job
from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter
from pyzeebe.task.task import Task
from pyzeebe.worker.task_handler import ZeebeTaskHandler
from tests.unit.utils.gateway_mock import GatewayMock
from tests.unit.utils.random_utils import random_job
@pytest.fixture
def job_with_adapter(zeebe_adapter):
return random_job(zeebe_adapter=zeebe_adapter)
@pytest.fixture
def job_without_adapter():
return random_job()
@pytest.fixture
def job_from_task(task):
job = random_job(task)
job.variables = dict(x=str(uuid4()))
return job
@pytest.fixture
def zeebe_adapter(grpc_create_channel):
return ZeebeAdapter(channel=grpc_create_channel())
@pytest.fixture
def zeebe_client(grpc_create_channel):
return ZeebeClient(channel=grpc_create_channel())
@pytest.fixture
def zeebe_worker(zeebe_adapter):
worker = ZeebeWorker()
worker.zeebe_adapter = zeebe_adapter
return worker
@pytest.fixture
def task(task_type):
return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x))
@pytest.fixture
def task_type():
return str(uuid4())
@pytest.fixture
def stop_after_test():
stop_test = Event()
yield stop_test
stop_test.set()
@pytest.fixture
def handle_task_mock():
with patch("pyzeebe.worker.worker.ZeebeWorker._handle_task") as mock:
yield mock
@pytest.fixture
def stop_event_mock(zeebe_worker):
with patch.object(zeebe_worker, "stop_event") as mock:
yield mock
@pytest.fixture
def handle_not_alive_thread_spy(mocker):
spy = mocker.spy(ZeebeWorker, "_handle_not_alive_thread")
yield spy
@pytest.fixture
def router():
return ZeebeTaskRouter()
@pytest.fixture
def routers():
return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))]
@pytest.fixture
def task_handler():
return ZeebeTaskHandler()
@pytest.fixture
def decorator():
def simple_decorator(job: Job) -> Job:
return job
return MagicMock(wraps=simple_decorator)
@pytest.fixture(scope="module")
def grpc_add_to_server():
from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server
return add_GatewayServicer_to_server
@pytest.fixture(scope="module")
def grpc_servicer():
return GatewayMock()
@pytest.fixture(scope="module")
def grpc_stub_cls(grpc_channel):
from zeebe_grpc.gateway_pb2_grpc import GatewayStub
return GatewayStub
| 1.921875 | 2 |
NLP/Word - Embeddings in Tensorflow.py | sunnyshah2894/Tensorflow | 1 | 12793776 | import tensorflow as tf
import numpy as np
# training set. Contains a row of size 5 per train example. The row is same as a sentence, with words replaced
# by its equivalent unique index. The below dataset contains 6 unique words numbered 0-5. Ideally the word vector for
# 4 and 5 indexed words should be same.
X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]])
# output dummy for testing purpose
y_train = np.array([0,1])
# Create the embeddings
with tf.name_scope("embeddings"):
# Initiliaze the embedding vector by randomly distributing the weights.
embedding = tf.Variable(tf.random_uniform((6,
3), -1, 1))
# create the embedding layer
embed = tf.nn.embedding_lookup(embedding, X_train)
# So that we can apply a convolution 2d operations on top the expanded single channel embedded vectors
embedded_chars_expanded = tf.expand_dims(embed, -1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer());
result,result_expanded = sess.run([embed,embedded_chars_expanded]);
print(result_expanded.shape)
print(result)
print(result_expanded)
# OUTPUT
# result
# [[[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [ 0.9624436 -0.99176955 0.15964746]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]
#
# [[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [-0.48809385 -0.55618596 -0.73995876]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]]
# result_expanded - has a dimension of (2,5,3,1)
# [[[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[ 0.6642673 ]
# [ 0.6548476 ]
# [ 0.00760126]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]
#
#
# [[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[-0.90803576]
# [ 0.75451994]
# [ 0.8864901 ]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]] | 3.53125 | 4 |
functions.py | krishnaaxo/Stock | 0 | 12793777 |
import matplotlib.pyplot as plt
import yfinance as yf #To access the financial data available on Yahoo Finance
import numpy as np
def get_stock_data(tickerSymbol, start_date, end_date):
tickerData = yf.Ticker(tickerSymbol)
df_ticker = tickerData.history(period='1d', start=start_date, end=end_date)
return df_ticker
def prepare_data(s):
ymax = 1000
s = s * ymax / s.max() # scale y range
s = 1450 -s # The image top left is (0,0), so the horizon line is around -1450, so our plot should be above that
# smoothen the fig
window_size = len(s) // 150
s = s.rolling(window_size, min_periods=1).mean()
return s
def make_picture(stock_prices, img, x_width_image, horizon_height):
"""x_width_image: dedicated arg for more control, instead of taking image dim"""
fig, ax = plt.subplots()
ax.imshow(img)
x = np.linspace(0, x_width_image, len(stock_prices))
ax.fill_between(x, stock_prices, horizon_height, color='#081A1C')
plt.axis('off')
plt.tight_layout()
return fig
| 3.4375 | 3 |
Ubiquitous Computing/src/raspberry/RestSender.py | Alvarohf/University-work | 0 | 12793778 | <reponame>Alvarohf/University-work
import requests
import datetime
import json
from enum import Enum
class Recivers (Enum):
FLEX = 'flex'
WEIGHT = 'weight'
TEMPERATURE = 'temperature'
HUMIDITY = 'humidity'
NOISE = 'noise'
LIGHT = 'light'
POSITION = 'position'
USER = 'user'
class RestSender:
def __init__(self, url= 'http://192.168.127.12:8080/'):
self.url = url
def send(self, content, to):
return requests.post(self.url + to, content)
def add_user (self, user_name):
user = {}
user['userId'] = user_name
user['name'] = user_name
user['age'] = 0
user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com'
user['passw'] = <PASSWORD>
user['created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
return self.send(user, Recivers.USER.value)
| 2.9375 | 3 |
Drawing-On-Images/accessingPixelValues.py | TUCchkul/OpenCV-ImageProcessing-and-Numpy | 0 | 12793779 | import cv2
image=cv2.imread(r'md.jpg',flags=1)
print(image[0:100,0:100])
#Changes pixel value in the original images
#image[0:100,0:100]=255#fully white
image[0:100,0:100]=[165,42,42]#RGB format
cv2.imshow('New Image',image)
cv2.waitKey(0)
cv2.destroyAllWindows() | 3.21875 | 3 |
notes/algo-ds-practice/problems/array/rabin_karp.py | Anmol-Singh-Jaggi/interview-notes | 6 | 12793780 | # Verified on https://leetcode.com/problems/implement-strstr
import string
from functools import lru_cache
@lru_cache(maxsize=None)
def get_char_code(ch, char_set=string.ascii_letters + string.digits):
# We could have also used:
# return ord(ch) - ord('0')
return char_set.index(ch)
def rabin_karp(haystack, needle):
# CAREFUL: Beware of these corner cases!
if needle == "":
return 0
if len(needle) == 0 or len(needle) > len(haystack):
return -1
HASH_MOD = 1000000007
# We can use any number as base, but its better to
# take the alphabet size to minimize collisions
BASE = 26
needle_hash = 0
haystack_hash = 0
for i in range(needle):
needle_char = get_char_code(needle[i])
haystack_char = get_char_code(haystack[i])
needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD
haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD
if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]:
return 0
# Now compute hashes on a rolling basis
base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD)
for i in range(len(needle), len(haystack)):
haystack_start_pos = i + 1 - len(needle)
haystack_end_pos = i + 1
old_char = get_char_code(haystack[haystack_start_pos - 1])
ch = get_char_code(haystack[i])
haystack_hash = (
(haystack_hash - base_power_up * old_char) * BASE + ch
) % HASH_MOD
if (
haystack_hash == needle_hash
and needle == haystack[haystack_start_pos:haystack_end_pos]
):
return haystack_start_pos
return -1
def main():
haystack = "abcs"
needle = ""
print(rabin_karp(haystack, needle))
if __name__ == "__main__":
main()
| 3.484375 | 3 |
black_list/black_list/spiders/BL_ICEFugitivesList.py | Damon-zln/ws-scrapy | 0 | 12793781 | from bs4 import BeautifulSoup
from black_list.items import BLICEFugitivesListItem
from scrapy import Spider, Request
import os
class BlIcefugitiveslistSpider(Spider):
name = 'BL_ICEFugitivesList'
allowed_domains = ['www.ice.gov']
start_urls = ['https://www.ice.gov/most-wanted']
header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning'
def parse(self, response):
with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f:
f.write(self.header + '\n')
soup = BeautifulSoup(response.body, 'lxml')
tables = soup.select('.field-item')
tables.pop(0)
for table in tables:
links = table.find_all(text='READ MORE')
for link in links:
yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info)
def get_info(self, response):
soup = BeautifulSoup(response.body, 'lxml')
item = BLICEFugitivesListItem()
item['name'] = ''
item['offense'] = ''
item['aka'] = ''
item['sex'] = ''
item['dob'] = ''
item['pob'] = ''
item['complexion'] = ''
item['reward'] = ''
item['height'] = ''
item['weight'] = ''
item['eyes'] = ''
item['haia'] = ''
item['scars'] = ''
item['address'] = ''
item['synopsis'] = ''
item['warning'] = ''
if soup.find(text='Name') is not None:
item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text
if soup.select('div.wanted-for') is not None:
item['offense'] = soup.select('div.wanted-for')[0].text
if soup.find(text='Alias') is not None:
item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text
if soup.find(text='Gender') is not None:
item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text
if soup.find(text='Date of Birth') is not None:
item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text
if soup.find(text='Place of Birth') is not None:
item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text
if soup.find(text='Skin Tone') is not None:
item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text
if soup.find(text='Reward') is not None:
item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text
if soup.find(text='Height') is not None:
item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text
if soup.find(text='Weight') is not None:
item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text
if soup.find(text='Eyes') is not None:
item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text
if soup.find(text='Hair') is not None:
item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text
if soup.find(text='Scars/Marks') is not None:
item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text
if soup.find(text='Last Known Location') is not None:
item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text
values = soup.select('div[class="field-label"]')
if values:
for i in values:
if "Summary:" in i.text:
item['synopsis'] = i.next_sibling.text
if "Warning:" in i.text:
item['warning'] = i.next_sibling.text
yield item
| 3.015625 | 3 |
src/exception-handling/custom_exception.py | mrdulin/python-codelab | 0 | 12793782 | class UserDefinedException(Exception):
def __init__(self, eid, message):
self.eid = eid
self.message = message
class ExceptionDemo:
def draw(self, number):
print('called compute(%s)' % str(number))
if number > 500 or number <= 0:
raise UserDefinedException(101, 'number out of bound')
else:
print('normal exit')
demo = ExceptionDemo()
try:
demo.draw(125)
demo.draw(900)
except UserDefinedException as e:
print('Exception caught: id: {}, message: {}'.format(e.eid, e.message))
| 3.296875 | 3 |
xotl/ql/core.py | merchise/xotl.ql | 1 | 12793783 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) <NAME> [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
'''The query language core.
'''
import ast
import types
from xoutil.symbols import Unset
from xoutil.objects import memoized_property
from collections import MappingView, Mapping
from xoutil.decorator.meta import decorator
from xotl.ql import interfaces
class Universe:
'''The class of the `this`:obj: object.
The `this` object is simply a name from which objects can be drawn in a
query.
'''
def __new__(cls):
res = getattr(cls, 'instance', None)
if not res:
res = super().__new__(cls)
cls.instance = res
return res
def __getitem__(self, key):
return self
def __getattr__(self, name):
return self
def __iter__(self):
return self
def next(self):
raise StopIteration
__next__ = next
this = Universe()
RESERVED_ARGUMENTS = (
'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame'
)
class QueryObject:
frame_type = 'xotl.ql.core.Frame'
def __init__(self, qst, _frame, **kwargs):
self.qst = qst
self._frame = _frame
if any(name in RESERVED_ARGUMENTS for name in kwargs):
raise TypeError('Invalid keyword argument')
self.expression = kwargs.pop('expression', None)
for attr, val in kwargs.items():
setattr(self, attr, val)
def get_value(self, name, only_globals=False):
if not only_globals:
res = self._frame.f_locals.get(name, Unset)
else:
res = Unset
if res is Unset:
res = self._frame.f_globals.get(name, Unset)
if res is not Unset:
return res
else:
raise NameError(name)
@memoized_property
def locals(self):
return self._frame.f_locals
@memoized_property
def globals(self):
return self._frame.f_globals
@memoized_property
def source(self):
builder = SourceBuilder()
return builder.get_source(self.qst)
def get_query_object(generator,
query_type='xotl.ql.core.QueryObject',
frame_type=None,
**kwargs):
'''Get the query object from a query expression.
'''
from xoutil.objects import import_object
from xotl.ql.revenge import Uncompyled
uncompiled = Uncompyled(generator)
gi_frame = generator.gi_frame
QueryObjectType = import_object(query_type)
FrameType = import_object(frame_type or QueryObjectType.frame_type)
return QueryObjectType(
uncompiled.qst,
FrameType(gi_frame.f_locals, gi_frame.f_globals),
expression=generator,
**kwargs
)
# Alias to the old API.
these = get_query_object
def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject',
frame_type=None, **kwargs):
'''Get a predicate object from a predicate expression.
'''
from xoutil.objects import import_object
from .revenge import Uncompyled
uncompiled = Uncompyled(func)
PredicateClass = import_object(predicate_type)
FrameClass = import_object(frame_type or PredicateClass.frame_type)
return PredicateClass(
uncompiled.qst,
FrameClass(_get_closure(func), func.__globals__),
predicate=func,
**kwargs
)
def normalize_query(which, **kwargs):
'''Ensure a query object.
If `which` is a query expression (more precisely a generator object) it is
passed to `get_query_object`:func: along with all keyword arguments.
If `which` is not a query expression it must be a `query object`:term:,
other types are a TypeError.
'''
from types import GeneratorType
if isinstance(which, GeneratorType):
return get_query_object(which, **kwargs)
else:
if not isinstance(which, interfaces.QueryObject):
raise TypeError('Query object expected, but object provided '
'is not: %r' % type(which))
return which
@decorator
def thesefy(target, make_subquery=True):
'''Allow an object to participate in queries.
Example as a wrapper::
class People:
# ...
pass
query = (who for who in thesefy(People))
Example as a decorator::
@thesefy
class People:
pass
query = (who for who in People)
If `target` already support the iterable protocol (i.e implement
``__iter__``), return it unchanged.
If `make_subquery` is True, then the query shown above will be equivalent
to::
query = (who for who in (x for x in this if isinstance(x, People)))
If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that
simply returns the same object and a ``next()`` method that immediately
stops the iteration.
Notice that in order to use `make_subquery` you call `thesefy`:func: as a
decorator-returning function::
class Person:
pass
query = (x for x in thesefy(make_subquery=False)(Person))
# or simply as a decorator
@thesefy(make_subquery=False)
class Person:
pass
'''
if getattr(target, '__iter__', None):
return target
class new_meta(type(target)):
if make_subquery:
def __iter__(self):
return (x for x in this if isinstance(x, self))
else:
def __iter__(self):
return self
def next(self):
raise StopIteration
__next__ = next
from xoutil.objects import copy_class
new_class = copy_class(target, meta=new_meta)
return new_class
class Frame:
def __init__(self, locals, globals, **kwargs):
self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries',
True)
self.f_locals = _FrameView(locals)
self.f_globals = _FrameView(globals)
self.f_locals.owner = self.f_globals.owner = self
class _FrameView(MappingView, Mapping):
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __getitem__(self, key):
res = self._mapping[key]
if self.owner.auto_expand_subqueries and key == '.0':
return sub_query_or_value(res)
else:
return res
def get(self, key, default=None):
res = self._mapping.get(key, default)
if self.owner.auto_expand_subqueries and key == '.0':
return sub_query_or_value(res)
else:
return res
def __iter__(self):
return iter(self._mapping)
def _get_closure(obj):
assert isinstance(obj, types.FunctionType)
if obj.__closure__:
return {
name: cell.cell_contents
for name, cell in zip(obj.__code__.co_freevars, obj.__closure__)
}
else:
return {}
def sub_query_or_value(v):
if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>':
return get_query_object(v)
else:
return v
class SourceBuilder(ast.NodeVisitor):
def get_source(self, node):
stack = self.stack = []
self.visit(node)
assert len(stack) == 1, 'Remaining items %r at %r' % (stack, node)
return stack.pop()
def visit_And(self, node):
self.stack.append(' and ')
def visit_Or(self, node):
self.stack.append(' or ')
def visit_Name(self, node):
self.stack.append(node.id)
def visit_BoolOp(self, node):
self.visit(node.op)
for val in node.values:
self.visit(val)
exprs = []
for _ in range(len(node.values)):
exprs.insert(0, self.stack.pop(-1))
op = self.stack.pop(-1)
self.stack.append('(%s)' % op.join(exprs))
def visit_BinOp(self, node):
stack = self.stack
self.visit(node.op)
self.visit(node.right)
self.visit(node.left)
left = stack.pop(-1)
right = stack.pop(-1)
op = stack.pop(-1)
stack.append('(%s%s%s)' % (left, op, right))
def visit_Add(self, node):
self.stack.append(' + ')
def visit_Sub(self, node):
self.stack.append(' - ')
def visit_Mult(self, node):
self.stack.append(' * ')
def visit_Div(self, node):
self.stack.append(' / ')
def visit_Mod(self, node):
self.stack.append(' % ')
def visit_Pow(self, node):
self.stack.append(' ** ')
def visit_LShift(self, node):
self.stack.append(' << ')
def visit_RShift(self, node):
self.stack.append(' >> ')
def visit_BitOr(self, node):
self.stack.append(' | ')
def visit_BitAnd(self, node):
self.stack.append(' & ')
def visit_BitXor(self, node):
self.stack.append(' ^ ')
def visit_FloorDiv(self, node):
self.stack.append(' // ')
def visit_Num(self, node):
self.stack.append('%s' % node.n)
def visit_UnaryOp(self, node):
stack = self.stack
self.visit(node.op)
self.visit(node.operand)
operand = stack.pop(-1)
op = stack.pop(-1)
stack.append('(%s%s)' % (op, operand))
def visit_Invert(self, node):
self.stack.append('~')
def visit_Not(self, node):
self.stack.append('not ')
def visit_UAdd(self, node):
self.stack.append('+')
def visit_USub(self, node):
self.stack.append('-')
def visit_IfExp(self, node):
self.visit(node.orelse)
self.visit(node.test)
self.visit(node.body)
body = self.stack.pop(-1)
test = self.stack.pop(-1)
orelse = self.stack.pop(-1)
self.stack.append('(%s if %s else %s)' % (body, test, orelse))
def visit_Lambda(self, node):
raise NotImplementedError()
def visit_Dict(self, node):
# order does not really matter but I'm picky
for k, v in reversed(zip(node.keys, node.values)):
self.visit(v)
self.visit(k)
dictbody = ', '.join(
'%s: %s' % (self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.keys))
)
self.stack.append('{%s}' % dictbody)
def visit_Set(self, node):
for elt in reversed(node.elts):
self.visit(elt)
setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts)))
self.stack.append('{%s}' % setbody)
def visit_ListComp(self, node):
self._visit_comp(node)
self.stack.append('[%s]' % self.stack.pop(-1))
def visit_SetComp(self, node):
self._visit_comp(node)
self.stack.append('{%s}' % self.stack.pop(-1))
def visit_DictComp(self, node):
self.visit(node.value)
self.visit(node.key)
pop = lambda: self.stack.pop(-1)
lines = ['%s: %s' % (pop(), pop())]
self._visit_generators(node)
lines.append(pop())
self.stack.append('{%s}' % ' '.join(lines))
def visit_GeneratorExp(self, node):
self._visit_comp(node)
self.stack.append('(%s)' % self.stack.pop(-1))
def _visit_comp(self, node):
self.visit(node.elt)
pop = lambda: self.stack.pop(-1)
lines = [pop()]
self._visit_generators(node)
lines.append(pop())
self.stack.append(' '.join(lines))
def _visit_generators(self, node):
for comp in reversed(node.generators):
for if_ in reversed(comp.ifs):
self.visit(if_)
self.stack.append(len(comp.ifs)) # save the length of ifs [*]
self.visit(comp.iter)
self.visit(comp.target)
pop = lambda: self.stack.pop(-1)
lines = []
for _ in range(len(node.generators)):
lines.append('for %s in %s' % (pop(), pop()))
for if_ in range(pop()): # [*] pop the length of ifs
lines.append('if %s' % pop())
self.stack.append(' '.join(lines))
def visit_Yield(self, node):
raise TypeError('Invalid node Yield')
def visit_Eq(self, node):
self.stack.append(' == ')
def visit_NotEq(self, node):
self.stack.append(' != ')
def visit_Lt(self, node):
self.stack.append(' < ')
def visit_LtE(self, node):
self.stack.append(' <= ')
def visit_Gt(self, node):
self.stack.append(' > ')
def visit_GtE(self, node):
self.stack.append(' >= ')
def visit_Is(self, node):
self.stack.append(' is ')
def visit_IsNot(self, node):
self.stack.append(' is not ')
def visit_In(self, node):
self.stack.append(' in ')
def visit_NotIn(self, node):
self.stack.append(' not in ')
def visit_Compare(self, node):
self.visit(node.left)
for op, expr in reversed(zip(node.ops, node.comparators)):
self.visit(expr)
self.visit(op)
right = ''.join(
# I assume each operator has spaces around it
'%s%s' % (self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.ops))
)
self.stack.append('%s%s' % (self.stack.pop(-1), right))
def visit_Call(self, node):
if node.kwargs:
self.visit(node.kwargs)
if node.starargs:
self.visit(node.starargs)
for kw in reversed(node.keywords):
self.visit(kw.value)
self.stack.append(kw.arg)
for arg in reversed(node.args):
self.visit(arg)
self.visit(node.func)
func = self.stack.pop(-1)
args = [self.stack.pop(-1) for _ in range(len(node.args))]
keywords = [
(self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.keywords))
]
starargs = self.stack.pop(-1) if node.starargs else ''
kwargs = self.stack.pop(-1) if node.kwargs else ''
call = ', '.join(args)
if keywords:
if call:
call += ', '
call += ', '.join('%s=%s' % (k, v) for k, v in keywords)
if starargs:
if call:
call += ', '
call += '*%s' % starargs
if kwargs:
if call:
call += ', '
call += '**%s' % kwargs
self.stack.append('%s(%s)' % (func, call))
def visit_Str(self, node):
self.stack.append('%r' % node.s)
visit_Bytes = visit_Str
def visit_Repr(self, node):
raise NotImplementedError
def visit_Attribute(self, node):
self.visit(node.value)
self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr))
def visit_Subscript(self, node):
self.visit(node.slice)
self.visit(node.value)
self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1)))
def visit_Ellipsis(self, node):
self.stack.append('...')
def visit_Slice(self, node):
if node.step:
self.visit(node.step)
step = self.stack.pop(-1)
else:
step = None
if node.upper:
self.visit(node.upper)
upper = self.stack.pop(-1)
else:
upper = None
if node.lower:
self.visit(node.lower)
lower = self.stack.pop(-1)
else:
lower = None
if lower:
res = '%s:' % lower
else:
res = ':'
if upper:
res += '%s' % upper
if step:
res += ':%s' % step
self.stack.append(res)
def visit_List(self, node):
for elt in reversed(node.elts):
self.visit(elt)
self.stack.append(
'[%s]' % ', '.join(
self.stack.pop(-1) for _ in range(len(node.elts))
)
)
def visit_Tuple(self, node):
for elt in reversed(node.elts):
self.visit(elt)
result = (
'(%s' % ', '.join(
self.stack.pop(-1) for _ in range(len(node.elts))
)
)
if len(node.elts) == 1:
result += ', )'
else:
result += ')'
self.stack.append(result)
del decorator
| 2.21875 | 2 |
src/CIA_InternationalOrgnizationsAndGroups.py | Larz60p/WorldFactBook | 1 | 12793784 | # copyright (c) 2018 Larz60+
import ScraperPaths
import GetPage
import CIA_ScanTools
from lxml import html
from lxml.cssselect import CSSSelector
from lxml import etree
from lxml.etree import XPath
import re
import os
import sys
class CIA_InternationalOrgnizationsAndGroups:
def __init__(self):
self.spath = ScraperPaths.ScraperPaths()
self.gp = GetPage.GetPage()
self.getpage = self.gp.get_page
self.get_filename = self.gp.get_filename
self.cst = CIA_ScanTools.CIA_Scan_Tools()
self.fact_links = self.cst.fact_links
self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html'
self.filename = self.get_filename(self.mainurl)
self.mainpage = self.getpage(self.mainurl, self.filename)
self.scrape_text()
self.cst.save_fact_links()
def remove_fluff(self, item):
if '\r\n' in item or '\n' in item:
nitem = ''
parts = item.split('\n')
for part in parts:
nitem = f'{nitem.strip()} {part.strip()}'
return nitem
else:
return item
def scrape_text(self):
tree = html.fromstring(self.mainpage)
# html.open_in_browser(tree)
c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {}
childno = 1
while True:
xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)')
# print(xx[0].text)
if len(xx) == 0:
break
title = self.remove_fluff(xx[0].text.strip())
# print(f'Title: {title}')
c2 = c1[title] = {}
# yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')
yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')
if len(yy[0]) > 1:
# print(f'\n...length yy: {len(yy[0])}')
c3 = c2['Description'] = []
# print(f'{html.tostring(yy[0])}')
for n, element in enumerate(yy[0]):
if n % 2 == 0:
desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8'))
c3.append(desc)
else:
c3 = c2['Description'] = []
description = self.remove_fluff(yy[0].text.strip())
c3.append(description)
# print(f'Description: {description}')
childno += 1
if __name__ == '__main__':
CIA_InternationalOrgnizationsAndGroups()
| 2.65625 | 3 |
Char6 AlphaGo/test_qd.py | rh01/Deep-reinforcement-learning-with-pytorch | 1 | 12793785 | <gh_stars>1-10
# coding:utf-8
import sys
sys.path.append('../../pythonModules')
import wg4script, AI_QD, wgdensestranet
if __name__ == '__main__':
num_xd, strategy_id_r, strategy_id_b = 0, 0, 0
num_plays, num_objcutility = 50, 1
dic2_rolloutaiparas = {
'red': {'type_ai': AI_QD.AI_QD_BASE,
'type_stra': 'rule-base',
'type_stranet': wgdensestranet.StraDenseNet,
},
'blue': {'type_ai': AI_QD.AI_QD_STRA,
'type_stra': 'random',
'type_stranet': wgdensestranet.StraDenseNet,
},
}
dic_mainparas = {'str_wgrootdir':'../../',
'str_global_flag': 'QD',
'num_plays': num_plays,
'num_objcutility': num_objcutility,
'num_xd': num_xd,
'strategy_ids': (strategy_id_r, strategy_id_b),
'flag_show': True,
'flag_action_cache': False,
'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列
'flag_cache': False,
'flag_gpu': False,
'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True
'flag_dllnum': 0,
'cuda_id': 0,
'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据,
'dic2_aiparas': {
'flag_color4acai': 0,
'blue': {'type_ai': AI_QD.AI_QD_HA,
'type_stra': 'rule-base',
# type of stratree of nodes, how to select next path, [rule-base, random, net]
'type_stranet': wgdensestranet.StraDenseNet,
'dic2_rolloutaiparas': dic2_rolloutaiparas,
'flag_candidateactions': 'rule-base'
# [rule-base, stra] how to get candidate actions
},
'red': {'type_ai': AI_QD.AI_QD_BASE,
'type_stra': 'net',
'type_stranet': wgdensestranet.StraDenseNet,
'dic2_rolloutaiparas': dic2_rolloutaiparas,
'flag_candidateactions': 'stra'
},
},
}
wg4script.simulateframe(dic_mainparas= dic_mainparas) | 1.351563 | 1 |
permutations/permutations.py | QQuinn03/LeetHub | 0 | 12793786 | <filename>permutations/permutations.py<gh_stars>0
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
if not nums:
return [[]]
path=[]
res=[]
self.helper(nums,res,path)
return res
def helper(self,nums,res,path):
if not nums:
res.append(path)
return res
for i in range(len(nums)):
cur = nums[i]
left=nums[:i]+nums[i+1:]
self.helper(left,res,path+[cur])
| 3.59375 | 4 |
tempest/lib/api_schema/response/compute/v2_48/servers.py | mail2nsrajesh/tempest | 0 | 12793787 | # Copyright 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247
show_server_diagnostics = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'state': {
'type': 'string', 'enum': [
'pending', 'running', 'paused', 'shutdown', 'crashed',
'suspended']
},
'driver': {
'type': 'string', 'enum': [
'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv']
},
'hypervisor': {'type': ['string', 'null']},
'hypervisor_os': {'type': ['string', 'null']},
'uptime': {'type': ['integer', 'null']},
'config_drive': {'type': 'boolean'},
'num_cpus': {'type': 'integer'},
'num_nics': {'type': 'integer'},
'num_disks': {'type': 'integer'},
'memory_details': {
'type': 'object',
'properties': {
'maximum': {'type': ['integer', 'null']},
'used': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['maximum', 'used']
},
'cpu_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'null']},
'time': {'type': ['integer', 'null']},
'utilisation': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['id', 'time', 'utilisation']
}
},
'nic_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'mac_address': {'oneOf': [parameter_types.mac_address,
{'type': 'null'}]},
'rx_octets': {'type': ['integer', 'null']},
'rx_errors': {'type': ['integer', 'null']},
'rx_drop': {'type': ['integer', 'null']},
'rx_packets': {'type': ['integer', 'null']},
'rx_rate': {'type': ['integer', 'null']},
'tx_octets': {'type': ['integer', 'null']},
'tx_errors': {'type': ['integer', 'null']},
'tx_drop': {'type': ['integer', 'null']},
'tx_packets': {'type': ['integer', 'null']},
'tx_rate': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['mac_address', 'rx_octets', 'rx_errors',
'rx_drop',
'rx_packets', 'rx_rate', 'tx_octets',
'tx_errors',
'tx_drop', 'tx_packets', 'tx_rate']
}
},
'disk_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'read_bytes': {'type': ['integer', 'null']},
'read_requests': {'type': ['integer', 'null']},
'write_bytes': {'type': ['integer', 'null']},
'write_requests': {'type': ['integer', 'null']},
'errors_count': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['read_bytes', 'read_requests', 'write_bytes',
'write_requests', 'errors_count']
}
}
},
'additionalProperties': False,
'required': [
'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime',
'config_drive', 'num_cpus', 'num_nics', 'num_disks',
'memory_details', 'cpu_details', 'nic_details', 'disk_details'],
}
}
get_server = copy.deepcopy(servers247.get_server)
| 1.546875 | 2 |
recipes/Python/389203_Series_generator_using_multiple_generators_/recipe-389203.py | tdiprima/code | 2,023 | 12793788 | <gh_stars>1000+
# Simple series generator with
# multiple generators & decorators.
# Author : <NAME>
def myfunc(**kwds):
def func(f):
cond = kwds['condition']
proc = kwds['process']
num = kwds['number']
x = 0
for item in f():
if cond and cond(item):
if proc: item = proc(item)
yield item
x += 1
if x==num:
break
return func
def series(condition=None, process=None, number=10):
@myfunc(condition=condition,process=process,number=number)
def wrapper():
x = 1
while 1:
yield x
x += 1
return wrapper
| 2.890625 | 3 |
oferty/migrations/0012_auto_20181203_1119.py | minikdo/estates | 0 | 12793789 | # Generated by Django 2.1.2 on 2018-12-03 10:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oferty', '0011_ofertyest_kto_prowadzi'),
]
operations = [
migrations.AlterField(
model_name='ofertyest',
name='kto_prowadzi',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='oferty.OfertyUsers'),
),
]
| 1.429688 | 1 |
Listas de Python/Lista 2/L02EX04.py | 4RandomProgrammer/Python | 0 | 12793790 | <filename>Listas de Python/Lista 2/L02EX04.py
#L02EX04
#inputs
CircJose = int(input())
PosJose = int(input())
CircFlav = int(input())
PosFlav = int(input())
#var
counterJose = 1
CounterFlav = 1
vivos = 0
PosVivo1 = 1
PosVivo2 = 1
while counterJose <= CircJose:
PosVivo1 += 2
if PosVivo1 > counterJose:
PosVivo1 = 1
counterJose += 1
if PosVivo1 == PosJose:
vivos += 1
while CounterFlav <= CircFlav:
PosVivo2 += 2
if PosVivo2 > CounterFlav:
PosVivo2 = 1
CounterFlav += 1
if PosVivo2 == PosFlav:
vivos += 1
print(vivos) | 3.453125 | 3 |
finds/alfred.py | terence-lim/investment-data-science | 2 | 12793791 | <reponame>terence-lim/investment-data-science
"""Convenience class and methods to access ALFRED/FRED apis and FRED-MD/FRED-QD
- FRED, ALFRED, revisions vintages
- PCA, approximate factor model, EM algorithm
Author: <NAME>
License: MIT
"""
import os
import sys
import json
import io
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd
from datetime import datetime, date
import requests
from bs4 import BeautifulSoup
from io import StringIO
import pickle
import zipfile
import re
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from pandas.api import types
import time
from .edgar import requests_get
from .busday import str2date, to_monthend
import config
# From https://research.stlouisfed.org/econ/mccracken/fred-databases/
_fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/'
def _print(*args, echo=config.ECHO, **kwargs):
"""helper to echo debugging messages"""
if echo: print(*args, **kwargs)
def _int2date(date):
"""helper method to convert int date to FRED api string format"""
return ([_int2date(d) for d in date] if types.is_list_like(date)
else "-".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]]))
def _date2int(date):
"""helper method to convert FRED api string format to int date"""
return ([_date2int(d) for d in date] if types.is_list_like(date)
else int(re.sub('\D', '', str(date)[:10])))
def multpl(page):
"""Helper method to retrieve shiller series by parsing multpl.com web page"""
url = f"https://www.multpl.com/{page}/table/by-month"
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
tables = soup.findChildren('table')
df = pd.read_html(tables[0].decode())[0]
df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d')
df['date'] = to_monthend(df.iloc[:, 0])
df = df.sort_values('Date').groupby('date').last().iloc[:,-1]
if not types.is_numeric_dtype(df):
df = df.map(lambda x: re.sub('[^\d\.\-]','',x)).astype(float)
return df
def fred_md(vintage=0, url=None, echo=config.ECHO):
"""Retrieve and parse current or vintage csv from McCracken FRED-MD site
Parameters
----------
vintage : str or int, default 0 (for current.csv)
file name relative to base url or zipfile archive, or int date YYYYMM
url : str, default is None
base name of url, local file path or zipfile archive
Returns
-------
df : DataFrame
indexed by end-of-month date
Notes
-----
if vintage is int: then derive vintage csv file name from input date YYYYMM
if url is None: then derive subfolder or zip archive name, from vintage
Examples
--------
md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv',
url=md_url + 'Historical_FRED-MD.zip') # pre-2015
md_df, mt = fredmd(csvfile='monthly/2015-05.csv',
url=md_url + 'FRED_MD.zip') # post-2015
"""
url_ = _fred_md_url
if isinstance(vintage, int) and vintage:
csvfile_ = f"{vintage // 100}-{vintage % 100:02d}.csv"
if vintage < 201500:
url_ = url_ + 'Historical_FRED-MD.zip'
csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_
else:
csvfile_ = 'monthly/' + csvfile_
vintage = csvfile_
else:
vintage = vintage or 'monthly/current.csv'
_print(vintage, echo=echo)
url = url or url_
if url.endswith('.zip'):
if url.startswith('http'):
url = io.BytesIO(requests.get(url).content)
with zipfile.ZipFile(url).open(vintage) as f:
df = pd.read_csv(f, header=0)
else:
df = pd.read_csv(os.path.join(url, vintage), header=0)
df.columns = df.columns.str.rstrip('x')
meta = dict()
for _, row in df.iloc[:5].iterrows():
if '/' not in row[0]: # this row has metadata, e.g. transform codes
label = re.sub("[^a-z]", '', row[0].lower()) # simplify label str
meta[label] = row[1:].astype(int).to_dict() # as dict of int codes
df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date
df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d')
df.index = to_monthend(df.index)
return df.iloc[:, 1:], DataFrame(meta)
def fred_qd(vintage=0, url=None, echo=False):
"""Retrieve and parse current or vintage csv from McCracken FRED-MD site
Parameters
----------
vintage : str or int, default 0 (i.e. current.csv)
file name relative to base url or zipfile archive, or int date YYYYMM
url : str, default is None
base name of url, local file path or zipfile archive
Returns
-------
df : DataFrame
indexed by end-of-month date
Notes
-----
if csvfile is int: then derive vintage csv file name from input date YYYYMM
if url is None: then derive subfolder name from vintage
"""
url = url or _fred_md_url
if isinstance(vintage, int) and vintage:
vintage = f"quarterly/{vintage // 100}-{vintage % 100:02d}.csv"
else:
vintage = 'quarterly/current.csv'
_print(vintage, echo=echo)
df = pd.read_csv(os.path.join(url, vintage), header=0)
df.columns = df.columns.str.rstrip('x')
meta = dict()
for _, row in df.iloc[:5].iterrows():
if '/' not in row[0]: # this row has metadata, e.g. transform codes
label = re.sub("[^a-z]", '', row[0].lower()) # simplify label str
meta[label] = row[1:].astype(int).to_dict() # as dict of int codes
df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date
df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d')
df.index = to_monthend(df.index)
return df.iloc[:, 1:], DataFrame(meta)
class Alfred:
"""Base class for Alfred/Fred access, and manipulating retrieved data series
Parameters
----------
cache_ : dict
cached series and observations
tcode_ : dict
transformation codes
Notes
-----
lin = Levels (No transformation) [default]
chg = Change x(t) - x(t-1)
ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr)
pch = Percent Change ((x(t)/x(t-1)) - 1) * 100
pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100
pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1
cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1)))
cca = Continuously Compounded Annual Rate of Change
(ln(x(t)) - ln(x(t-1))) * n_obs_per_yr
log = Natural Log ln(x(t))
"""
tcode_ = {1: {'diff': 0, 'log': 0},
2: {'diff': 1, 'log': 0},
3: {'diff': 2, 'log': 0},
4: {'diff': 0, 'log': 1},
5: {'diff': 1, 'log': 1},
6: {'diff': 2, 'log': 1},
7: {'diff': 1, 'log': 0, 'pct_change': True},
'lin': {'diff': 0, 'log': 0},
'chg': {'diff': 1, 'log': 0},
'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12},
'pch': {'diff': 0, 'log': 0, 'pct_change': True},
'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12},
'pca': {'diff': 1, 'log': 1, 'annualize': 12},
'cch': {'diff': 1, 'log': 1},
'cca': {'diff': 1, 'log': 1, 'annualize': 12},
'lin': {'diff': 0, 'log': 0},
'log': {'diff': 0, 'log': 1}}
header_ = {
k : {'id': k, 'title': v} for k,v in
[['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'],
['CLAIMS', 'Initial Claims'],
['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'],
['HWI', 'Help Wanted Index for United States'],
['AMDMNO', 'New Orders for Durable Goods'],
['S&P 500', "S&P's Common Stock Price Index: Composite"],
['RETAIL', "Retail and Food Services Sales"],
['OILPRICE', 'Crude Oil, spliced WTI and Cushing'],
['COMPAPFF', "3-Month Commercial Paper Minus FEDFUNDS"],
['CP3M', "3-Month AA Financial Commercial Paper Rates"],
['CONSPI', 'Nonrevolving consumer credit to Personal Income'],
['S&P div yield', "S&P's Composite Common Stock: Dividend Yield"],
['S&P PE ratio', "S&P's Composite Common Stock: Price-Earnings Ratio"],
['S&P: indust', "S&P's Common Stock Price Index: Industrials"]]}
@classmethod
def transform(self, data, tcode=1, freq=None, **kwargs):
"""Classmethod to apply time series transformations
Parameters
----------
data : DataFrame
input data
tcode : int in {1, ..., 7}, default is 1
transformation code
freq : str in {'M', 'Q', 'A'}, default is None
set periodicity of dates
log : int, default is 0
number of times to take log
diff : int, default is 0
number of times to take difference
pct_change : bool
whether to apply pct_change operator
periods : int, default is 1
number of periods to lag for pct_change or diff operator
annualize : int. default is 1
annualization factor
shift : int, default is 0
number of rows to shift output (negative to lag)
"""
t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1}
t.update(self.tcode_[tcode])
t.update(kwargs)
df = data.sort_index()
if t['pct_change']:
#df = df.pct_change(fill_method='pad')
df = df.pct_change(fill_method=None)
df = ((1 + df) ** t['annualize']) - 1 # by compounding
for _ in range(t['log']):
df = np.log(df)
for _ in range(t['diff']):
#df = df.fillna(method='pad').diff(periods=t['periods'])
df = df.diff(periods=t['periods'])
df = df * t['annualize'] # by adding
return df.shift(t['shift'])
alfred_api = ("https://api.stlouisfed.org/fred/{api}?series_id={series_id}"
"&realtime_start={start}&realtime_end={end}"
"&api_key={api_key}&file_type=json").format
fred_api = ("https://api.stlouisfed.org/fred/{api}?series_id={series_id}"
"&api_key={api_key}&file_type=json").format
category_api = ("https://api.stlouisfed.org/fred/{api}?"
"category_id={category_id}&api_key={api_key}&"
"file_type=json{args}").format
start = 17760704
end = 99991231
echo_ = config.ECHO
api_key = None
def header(self, series_id, column='title'):
"""Returns a column from last meta record of a series"""
if series_id not in self.header_:
try:
if series_id not in self.cache_: # load via api if not in cache
self.get(series_id)
self.header_[series_id] = self[series_id]['series'].iloc[-1]
except:
return f"*** {series_id} ***"
return self.header_[series_id].get(column, f"*** {series_id} ***")
def keys(self):
"""Return id names of all loaded series data"""
return list(self.cache_.keys())
def values(self, columns=None):
"""Return headers (last metadata row) of all loaded series
Parameters
----------
columns: list of str, default is None
subset of header columns to return
Returns
-------
df : DataFrame
headers of all series loaded
"""
df = DataFrame()
keep = ['id', 'observation_start', 'observation_end', 'frequency_short',
'title', 'popularity', 'seasonal_adjustment_short',
'units_short'] # default list of columns to display
for v in self.cache_.values():
df = df.append(v['series'].iloc[-1], ignore_index=True)
df = df.set_index('id', drop=False)
return df[columns or keep]
def __init__(self, api_key, start=17760704, end=99991231, savefile=None,
echo=config.ECHO):
"""Create object, with api_key, for FRED access and data manipulation"""
self.api_key = api_key
self.start = start
self.end = end
self.savefile = savefile
self.cache_ = dict()
self.header_ = Alfred.header_.copy()
self.echo_ = echo
def _print(self, *args, echo=None):
if echo or self.echo_:
print(*args)
def load(self, savefile=None):
"""Load series data to memory cache from saved file"""
with open(savefile or self.savefile, 'rb') as f:
self.cache_.update(**pickle.load(f))
return len(self.cache_)
def dump(self, savefile=None):
"""Save all memory-cached series data to an output file"""
with open(savefile or self.savefile, 'wb') as f:
pickle.dump(self.cache_, f)
return len(self.cache_)
def clear(self):
self.cache_.clear()
def pop(self, series_id):
return self.cache_.pop(series_id, None)
def get(self, series_id, api_key=None, start=None, end=None):
"""Retrieve metadata and full observations of a series with FRED api
Parameters
----------
series_id : str or list of str
ids of series to retrieve
Returns
-------
n : int
length of observations dataframe
"""
if types.is_list_like(series_id):
return [self.get(s, start=start, end=end) for s in series_id]
series = self.series(series_id, api_key=api_key, start=start, end=end,
echo=self.echo_)
if series is None or series.empty:
return 0
self.cache_[series_id] = {
'observations': self.series_observations(
series_id, api_key=api_key, start=start, end=end,
alfred_mode=True, echo=self.echo_),
'series': series}
return len(self.cache_[series_id]['observations'])
def __call__(self, series_id, start=None, end=None, release=0,
vintage=99991231, label=None, realtime=False, freq=True,
**kwargs):
"""Select from full observations of a series and apply transforms
Parameters
----------
series_id : str or list of str
Labels of series to retrieve
start, end : int, default is None
start and end period dates (inclusive) to keep
label : str, default is None
New label to rename returned series
release : pd.DateOffset or int (default is 0)
maximum release number or date offset (inclusive). If 0: latest
vintage : int, default is None
latest realtime_start date of observations to keep
diff, log, pct_change : int
number of difference, log and pct_change operations to apply
freq : str in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default is True)
resample and replace date index with month ends at selected freqs
Returns
-------
Series or DataFrame
transformed values, name set to label if provided else series_id
"""
if (series_id not in self.cache_ and not self.get(series_id)):
return None
if freq is True:
freq = self.header(series_id, 'frequency_short')
df = self.as_series(
self[series_id]['observations'],
release=release,
vintage=vintage,
start=start or self.start,
end=end or self.end,
freq=freq)
if realtime:
s = self.transform(df['value'], **kwargs).to_frame()
s['realtime_start'] = df['realtime_start'].values
s['realtime_end'] = df['realtime_end'].values
return s.rename(columns={'value': label or series_id})
return self.transform(df['value'], **kwargs).rename(label or series_id)
def __getitem__(self, series_id):
"""Get observations and metadata for {series_id}"""
return self.cache_.get(series_id, None)
@classmethod
def as_series(self, observations, release=0, vintage=99991231,
start=0, end=99991231, freq=None):
"""Classmethod to select a series from alfred observations set
Parameters
----------
observations: DataFrame
from FRED 'series/observations' api call
release : pd.DateOffset or int (default is 0)
maximum release number or date offset (inclusive). If 0: latest
vintage : int, default is None
Latest realtime_start date (inclusive) allowed
Returns
-------
out: Series
value of each period date, optionally indexed by realtime_start
Examples
--------
"""
df = observations.copy()
df['value'] = pd.to_numeric(observations['value'], errors='coerce')
df['date'] = pd.to_datetime(df['date'])
df = df.dropna().reset_index(drop=True)
if freq:
if freq.upper()[0] in ['A']:
df['date'] += YearEnd(0)
if freq.upper()[0] in ['S']:
df['date'] += QuarterEnd(1)
if freq.upper()[0] in ['Q']:
df['date'] += QuarterEnd(0)
if freq.upper()[0] in ['M']:
df['date'] += MonthEnd(0)
if freq.upper()[0] in ['B']:
df['date'] += pd.DateOffset(days=13)
if freq.upper()[0] in ['W']:
df['date'] += pd.DateOffset(days=6)
if np.any(df['realtime_start'] <= _int2date(vintage)):
df = df[df['realtime_start'] <= _int2date(vintage)]
df['value'] = pd.to_numeric(df['value'], errors='coerce')
df = df.sort_values(by=['date', 'realtime_start'])
if isinstance(release, int): # keep latest up to max release
df['release'] = df.groupby('date').cumcount()
df = df[df['release'] + 1 == (release or 99999999)]\
.append(df.drop_duplicates('date', keep='last'))\
.drop_duplicates('date', keep='first')
else: # else latest release up through date offset
df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d')
df = df[df['realtime_start'] <= df['release']]\
.drop_duplicates('date', keep='last')
df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int)
df['realtime_start'] = _date2int(df['realtime_start'])
df['realtime_end'] = _date2int(df['realtime_end'])
df = df.set_index('date').sort_index().drop(columns=['release'])
return df[(df.index <= min(end, vintage)) & (df.index >= start)]
def series(self, series_id, api_key=None, start=None, end=None,
echo=ECHO):
"""API wrapper to retrieve series metadata as dataframe"""
url = self.alfred_api(api="series",
series_id=series_id,
start=_int2date(start or self.start),
end=_int2date(end or self.end),
api_key=api_key or self.api_key)
r = requests_get(url, echo=echo)
if r is None:
url = self.fred_api(api="series",
series_id=series_id,
api_key=api_key or self.api_key)
r = requests_get(url, echo=echo)
if r is None:
return DataFrame()
v = json.loads(r.content)
df = DataFrame(v['seriess'])
df.index.name = str(datetime.now())
return df
def series_observations(self, series_id, api_key=None, start=None, end=None,
alfred_mode=False, echo=ECHO):
"""API wrapper to retrieve full observations of a series as dataframe"""
url = self.alfred_api(api="series/observations",
series_id=series_id,
start=_int2date(start or self.start),
end=_int2date(end or self.end),
api_key=api_key or self.api_key)
r = requests_get(url, echo=echo)
if r is None:
url = self.fred_api(api="series/observations",
series_id=series_id,
api_key=api_key or self.api_key)
r = requests_get(url, echo=echo)
if r is None:
return DataFrame()
contents = json.loads(r.content)
df = DataFrame(contents['observations'])
if alfred_mode: # convert fred to alfred by backfilling realtime_start
f = (df['realtime_start'].eq(contents['realtime_start']) &
df['realtime_end'].eq(contents['realtime_end'])).values
df.loc[f, 'realtime_start'] = df.loc[f, 'date']
return df
def get_category(self, category_id, api_key=None):
c = self.category(category_id, api="category", api_key=api_key)
if 'categories' not in c:
return None
c = c['categories'][0]
c['children'] = self.category(category_id,
api="category/children",
api_key=api_key).get('categories', [])
c['series'] = []
offset = 0
while True:
s = self.category(category_id,
api="category/series",
api_key=api_key,
offset=offset)
if not s['seriess']:
break
c['series'].extend(s['seriess'])
offset += s['limit']
return c
def category(self, category_id, api="category", api_key=None, echo=ECHO,
**kwargs):
"""API wrapper to retrieve category data as dict"""
args = "&".join([f"{k}={v}" for k,v in kwargs.items()])
url = self.category_api(api=api,
category_id=category_id,
api_key=api_key or self.api_key,
args="&" + args if args else '')
r = requests_get(url, echo=echo)
return dict() if r is None else json.loads(r.content)
@classmethod
def popular(self, page=1):
"""Classmethod to web scrape popular series names, by page number"""
assert(page > 0)
url = f"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}"
data = requests.get(url).content
soup = BeautifulSoup(data, 'lxml')
tags = soup.findAll(name='a', attrs={'class': 'series-title'})
details = [tag.get('href').split('/')[-1] for tag in tags]
return details
#tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'})
#details = [tag.get('value') for tag in tags]
#return details
fred_adjust = {'HWI': 'JTSJOL',
'AMDMNO': 'DGORDER',
'S&P 500': 'SP500',
'RETAIL': 'RSAFS',
'OILPRICE': 'MCOILWTICO',
'COMPAPFF': 'CPFF',
'CP3M': 'CPF3M',
'CLAIMS': 'ICNSA', # weekly
'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'],
'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'],
'CONSPI': [Series.div, 'NONREVSL', 'PI']}
def adjusted_series(self, series_id, start=19590101, freq='M'):
"""Retrieve a raw series to update FRED-MD dataset
Notes
-----
http://www.econ.yale.edu/~shiller/data/ie_data.xls
"""
shiller = {'S&P div yield': 's-p-500-dividend-yield',
'S&P PE ratio': 'shiller-pe'}
if series_id in ['S&P: indust']:
s = Series()
elif series_id in ['CLAIMS']:
df = DataFrame(self('ICNSA'))
df['Date'] = to_monthend(df.index)
s = df.groupby('Date').mean().iloc[:,0]
elif series_id in shiller.keys():
v = shiller[series_id]
s = multpl(v)
elif series_id in self.fred_adjust.keys():
v = adjust[series_id]
s = (self(v, freq=freq) if isinstance(v, str) \
else v[0](self(v[1], freq=freq),
self(v[2], freq=freq)))
else:
s = self(series_id, auto_request=True, freq=freq)
return s[s.index >= start].rename(series_id)
def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO):
"""Fill in missing data with factor model and EM algorithm of
<NAME> (1982), Stock & Watson (1998) and Bai & Ng (2002)
Parameters
----------
X : 2D array
T observations/samples in rows, N variables/features in columns
kmax : int, default is None
Maximum number of factors. If None, set to rank from SVD minus 1
p : int in [0, 1, 2, 3], default is 2 (i.e. 'ICp2' criterion)
If 0, number of factors is fixed as kmax. Else picks one of three
methods in Bai & Ng (2002) to auto-determine number in every iteration
Returns
-------
x : 2D arrayint
X with nan's replaced by PCA EM
model : dict
Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter'
"""
X = X.copy() # passed by reference
Y = np.isnan(X) # identify missing entries
assert(not np.any(np.all(Y, axis=1))) # no row can be all missing
assert(not np.any(np.all(Y, axis=0))) # no column can be all missing
for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means
X[Y[:, col], col] = np.nanmean(X[:, col])
M = dict() # latest fitted model parameters
for M['n_iter'] in range(1, n_iter + 1):
old = X.copy()
mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1)
X = (X - mean) / std # standardize
# "M" step: estimate factors
M['u'], M['s'], M['vT'] = np.linalg.svd(X)
# auto-select number of factors if p>0 else fix number of factors
r = BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1
# "E" step: update missing entries
y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # "E" step
X[Y] = y[Y]
X = (X * std) + mean # undo standardization
M['kmax'] = r
M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2
if echo:
print(f"{M['n_iter']:4d} {M['converge']:8.3g} {r}")
if M['converge'] < tol:
break
return X, M
def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO):
"""Determine number of factors based on Bai & Ng (2002) criterion
Parameters
----------
x : 2D array
T observations/samples in rows, N variables/features in columns
p : int in [1, 2, 3], default is 2
use PCp1 or PCp2 or PCp3 penalty
kmax : int, default is None
maximum number of factors. If None, set to rank from SVD
standardize : bool, default is False
if True, then standardize data before processing (works better)
Returns
-------
r : int
best number of factors based on ICp{p} criterion, or 0 if not determined
Notes
-----
See Bai and Ng (2002) and McCracken at
https://research.stlouisfed.org/econ/mccracken/fred-databases/
"""
if standardize:
x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1))
T, N = x.shape
#mR2 = np.sum(marginalR2(x), axis=1)
u, s, vT = np.linalg.svd(x, full_matrices=False)
kmax = min(len(s), kmax or len(s))
mR2 = [0] + list(s**2 / (N * T)) # first case is when no factors used
var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals after k components
lnvar = np.log(np.where(var > 0, var, 1e-26))
NT2 = (N * T)/(N + T)
C2 = min(N, T)
penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1]
ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)]
sign = np.sign(ic[1:] - ic[:-1])
r = np.flatnonzero(sign>0)
return min(r) if len(r) else 0 # first min point
def marginalR2(x, kmax=None, standardize=False):
"""Return marginal R2 of each variable from incrementally adding factors
Parameters
----------
x : 2D array
T observations/samples in rows, N variables/features in columns
kmax : int, default is None
maximum number of factors. If None, set to rank from SVD
standardize : bool, default is False
if True, then standardize data before processing (works better)
Returns
-------
mR2 : 2D array
each row corresponds to adding one factor component
values are the incremental R2 for the variable in the column
Notes
-----
See <NAME> Ng (2002) and McCracken at
https://research.stlouisfed.org/econ/mccracken/fred-databases/
pca.components_[i,:] is vT[i, :]
pca.explained_variance_ is s**2/(T-1)
y = pca.transform(x) # y = s * u: T x n "projection"
beta = np.diag(pca.singular_values_) @ pca.components_ # "loadings"
x.T @ x = beta.T @ beta is covariance matrix
"""
if standardize:
x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1)
u, s, vT = np.linalg.svd(x, full_matrices=False)
# increase in R2 from adding kth (orthogonal) factor as a regressor
mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0)
for k in (np.arange(kmax or len(s)) + 1)])
mR2 = mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1)
return mR2
# units - stromg that indicates a data value transformation.
# lin = Levels (No transformation) [default]
# chg = Change x(t) - x(t-1)
# ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr)
# pch = Percent Change ((x(t)/x(t-1)) - 1) * 100
# pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100
# pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100
# cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100
# cca = Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr
# log = Natural Log ln(x(t))
# Frequency
# A = Annual
# SA = Semiannual
# Q = Quarterly
# M = Monthly
# BW = Biweekly
# W = Weekly
# D = Daily
# Seasonal Adjustment
# SA = Seasonally Adjusted
# NSA = Not Seasonally Adjusted
# SAAR = Seasonally Adjusted Annual Rate
# SSA = Smoothed Seasonally Adjusted
# NA = Not Applicable
| 2.671875 | 3 |
prompt.py | co/TheLastRogue | 8 | 12793792 | from compositecore import Leaf
import menu
import state
__author__ = 'co'
def start_accept_reject_prompt(state_stack, game_state, message):
prompt = menu.AcceptRejectPrompt(state_stack, message)
game_state.start_prompt(state.UIState(prompt))
return prompt.result
class PromptPlayer(Leaf):
def __init__(self, message):
super(PromptPlayer, self).__init__()
self.tags = ["prompt_player"]
self.text = message
def prompt_player(self, **kwargs):
target_entity = kwargs["target_entity"]
return start_accept_reject_prompt(target_entity.game_state.value.menu_prompt_stack,
target_entity.game_state.value, self.text) | 2.171875 | 2 |
insights/parsers/tests/test_ntp_sources.py | mglantz/insights-core | 1 | 12793793 | import pytest
from insights.core.dr import SkipComponent
from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap
from insights.tests import context_wrap
chrony_output = """
210 Number of sources = 3
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
#* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns
^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms
^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms
""".strip()
ntpq_leap_output = """
leap=00
""".strip()
ntpq_leap_output_2 = """
assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg,
leap=00
""".strip()
ntpd_output = """
remote refid st t when poll reach delay offset jitter
==============================================================================
*ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019
+ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05
""".strip()
ntpd_qn = """
remote refid st t when poll reach delay offset jitter
==============================================================================
172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000
"""
ntp_connection_issue = """
/usr/sbin/ntpq: read: Connection refused
""".strip()
def test_get_chrony_sources():
parser_result = ChronycSources(context_wrap(chrony_output))
assert parser_result.data[1].get("source") == "a.b.c"
assert parser_result.data[2].get("state") == "+"
assert parser_result.data[2].get("mode") == "^"
def test_get_ntpq_leap():
parser_result = NtpqLeap(context_wrap(ntpq_leap_output))
assert parser_result.leap == "00"
parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2))
assert parser_result.leap == "00"
with pytest.raises(SkipComponent) as e:
NtpqLeap(context_wrap(ntp_connection_issue))
assert "NTP service is down" in str(e)
def test_get_ntpd_sources():
parser_result = NtpqPn(context_wrap(ntpd_output))
assert parser_result.data[0].get("source") == "ntp103.cm4.tbsi"
assert parser_result.data[1].get("flag") == "+"
assert parser_result.data[1].get("source") == "ntp104.cm4.tbsi"
parser_result2 = NtpqPn(context_wrap(ntpd_qn))
assert parser_result2.data[0].get("source") == "172.16.58.3"
assert parser_result2.data[0].get("flag") == " "
with pytest.raises(SkipComponent) as e:
NtpqPn(context_wrap(ntp_connection_issue))
assert "NTP service is down" in str(e)
| 1.796875 | 2 |
zero_knowledge/verifier.py | Fritingo/hw | 0 | 12793794 | <gh_stars>0
import random as rd
def verifier(C, y, g, p):
e = rd.randint(1,100)
t = yield e
if (g**t == (y**e)*C):
accept = 1
else:
accept = 0
yield accept | 2.28125 | 2 |
setup.py | tarunbatra/password-validator-python | 11 | 12793795 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.rst", "r") as f:
long_description = f.read()
name = "password_validator"
version = "1.0"
setup(name=name,
version=version,
description="Validates password according to flexible and intuitive specifications",
long_description=long_description,
long_description_content_type='text/x-rst',
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/tarunbatra/password-validator-python",
packages=find_packages("src"),
package_dir={"": "src"},
keywords: "password, validation, schema",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"],
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'source_dir': ('setup.py', 'docs/source'),
'build_dir': ('setup.py', 'docs/build')}},
)
| 1.414063 | 1 |
src/ui/exif_view.py | jmacgrillen/perspective | 0 | 12793796 | #! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
exif_view.py
Desscription:
Display any EXIF data attached to the image.
Version:
1 - Initial release
Author:
J.MacGrillen <<EMAIL>>
Copyright:
Copyright (c) <NAME>. All rights reserved.
"""
import logging
from PyQt5.QtWidgets import QDockWidget, QVBoxLayout
from src.tools.exif_data import EXIFData
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
self.logger.debug("Trying to extract EXIF data...")
self.exif_data = EXIFData(self.pil_image)
class EXIFView(QDockWidget):
"""
EXIF viewer
"""
v_layout: QVBoxLayout
def __init_subclass__(cls) -> None:
return super().__init_subclass__()
if __name__ == "__main__":
pass
| 2.796875 | 3 |
setup.py | czajowaty/curry-bot | 3 | 12793797 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='CurryBot',
author='<NAME>',
version='0.1dev',
description='Discord bot for Azure Dreams',
packages=find_packages(exclude=('tests', 'docs', 'data')),
license='MIT',
long_description=open('README.md').read(),
python_requires='>=3.6.0',
install_requires=['beautifulsoup4', 'discord.py', 'gspread', 'oauth2client', 'requests', 'srcomapi'],
)
| 1.179688 | 1 |
byol/datasets/cifar100.py | hongxin001/SSL-Backdoor | 18 | 12793798 | from torchvision.datasets import CIFAR100 as C100
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]
)
class CIFAR100(BaseDataset):
def ds_train(self):
t = MultiSample(
aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples
)
return C100(root="./data", train=True, download=True, transform=t,)
def ds_clf(self):
t = base_transform()
return C100(root="./data", train=True, download=True, transform=t)
def ds_test(self):
t = base_transform()
return C100(root="./data", train=False, download=True, transform=t)
| 2.546875 | 3 |
declarative/overridable_object.py | jrollins/python-declarative | 6 | 12793799 | # -*- coding: utf-8 -*-
"""
"""
import types
from .properties import HasDeclaritiveAttributes
from .utilities.representations import SuperBase
class OverridableObject(HasDeclaritiveAttributes, SuperBase, object):
"""
"""
_overridable_object_save_kwargs = False
_overridable_object_kwargs = None
def _overridable_object_inject(self, **kwargs):
"""
"""
kwargs_unmatched = {}
for key, obj in list(kwargs.items()):
try:
parent_desc = getattr(self.__class__, key)
except AttributeError:
kwargs_unmatched[key] = obj
continue
if isinstance(
parent_desc, (
types.MethodType,
staticmethod,
classmethod
)
):
raise ValueError(
(
"Can only redefine non-method descriptors, {0} a method of class {1}"
).format(key, self.__class__.__name__)
)
try:
use_bd = parent_desc._force_boot_dict
except AttributeError:
use_bd = False
if not use_bd:
setattr(self, key, obj)
else:
self.__boot_dict__[key] = obj
return kwargs_unmatched
def __init__(self, **kwargs):
"""
"""
if self._overridable_object_save_kwargs:
self._overridable_object_kwargs = kwargs
kwargs_unmatched = self._overridable_object_inject(**kwargs)
if kwargs_unmatched:
raise ValueError(
(
"Can only redefine class-specified attributes, class {0} does not have elements {1}"
).format(self.__class__.__name__, list(kwargs_unmatched.keys()))
)
#now run the __mid_init__ before all of the declarative arguments trigger
self.__mid_init__()
super(OverridableObject, self).__init__()
#print("OO: ", self)
return
def __mid_init__(self):
"""
"""
return
| 2.40625 | 2 |
bp_gen/bitpattern.py | nicholas-fr/mezzanine | 3 | 12793800 | <reponame>nicholas-fr/mezzanine<gh_stars>1-10
# Generates a bit pattern containing frame number, total frames, frame rate, horizontal and vertical resolution
# Step 1: define bit pattern for 240x135 video
# Step 2: upscale as needed towards target resolution
# Note: intended for 480x270 video with 2x2 bit pattern "bit" size as the lowest resolution
def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame):
import cv2
import numpy as np
from pathlib import Path
bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration bits)
nof_data_ln = 2 # number_of_data_lines
nof_black_ln_bef = 2 # number_of_black_lines_before
nof_black_ln_aft = 1 # number_of_black_lines_after
nof_black_px_bef = 5 # number_of_black_pixels_before
nof_black_px_aft = 3 # number_of_black_pixels_after
y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total lines in bit pattern,
# includes 1 calibration line
x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total pixels per bit pattern line,
# includes 2 calibration bits
# (note: 1px = 1bit at this stage of the process)
vact_ref = 135.0 # make float, as target result must be float
hact_ref = 240.0 #
vinc = vact_ref / ln_per_frame
hinc = hact_ref / pix_per_ln
# Settings consist of: frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame
# Each setting is followed by the number of bits used to encode it
settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)]
ibp = np.zeros((y_dim, x_dim), dtype='uint8')
bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8')
pnt = 0
for x in settings:
setting = x[0]
for i in range(x[1]):
bitinppat[pnt] = setting % 2
pnt = pnt + 1
setting = setting >> 1
ycur = nof_black_ln_bef
xcur = nof_black_px_bef
for i in range((bits_per_ln+2)):
ibp[ycur, xcur + i] = 1-(i % 2) # calibration line
for j in range(nof_data_ln): # calibration pixels per data line
for i in range(2):
ibp[ycur+1+j, xcur + i] = ((i+j) % 2)
for j in range(nof_data_ln):
for i in range(bits_per_ln):
ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i]
xstart = 0
xstop = int(x_dim/hinc)
ystart = 0
ystop = int(y_dim/vinc)
i00 = np.zeros((ystop, xstop, 3), dtype='uint8')
print(str(xstop)+"x"+str(ystop), end='', flush=True)
for j in range(ystart, ystop): # ystart, ystop, xstart, xstop
for i in range(xstart, xstop):
ycur = int(j*vinc)
xcur = int(i*hinc)
i00[j, i, :] = ibp[ycur, xcur]*255.0
status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\'+str(frame_number).zfill(5)+'.png')), i00)
print("| "+str(settings)+" | "+{True: 'saved', False: 'failed'}[status])
| 3.046875 | 3 |
src/aws_scatter_gather/util/trace.py | cbuschka/aws-scatter-gather | 2 | 12793801 | import time
import aws_scatter_gather.util.logger as logger
def trace(message, *args):
return Trace(message, *args)
def traced(f):
def wrapper(*args, **kwargs):
with trace("{} args={}, kwargs={}", f.__name__, [*args], {**kwargs}):
return f(*args, **kwargs)
return wrapper
class Trace(object):
def __init__(self, message, *args):
self.message = message.format(*args)
def __enter__(self):
self.start = time.time_ns()
logger.info("START \"%s\"...", str(self.message))
return self
def __exit__(self, exc_type, exc_value, tb):
self.end = time.time_ns()
self.duration_milis = int((self.end - self.start) / 1000 / 1000)
if exc_type is None:
logger.info("SUCCESS of \"%s\". Duration %d millis.", str(self.message), self.duration_milis)
else:
logger.info("FAILURE of \"%s\". Duration %d millis.", str(self.message), self.duration_milis,
exc_info=True)
async def __aenter__(self):
self.__enter__()
return self
async def __aexit__(self, exc_type, exc_value, tb):
self.__exit__(exc_type, exc_value, tb)
| 2.390625 | 2 |
slgnn/tests/test_dude_datasets.py | thomasly/slgnn | 2 | 12793802 | import unittest
from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude
from slgnn.config import FILTERED_PUBCHEM_FP_LEN
class TestDudeDatasets(unittest.TestCase):
def test_jak1_jak2_jak3(self):
jak = JAK1Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
jak = JAK3Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
jak = JAK2Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
| 2.0625 | 2 |
users/models.py | patxxi/ClonGram | 1 | 12793803 | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
biography = models.CharField(max_length=250, blank=True, null=True)
image = models.ImageField(upload_to="users/images", blank=True, null=True)
phone_number = models.CharField(max_length=15, blank=True, null=True)
| 2.296875 | 2 |
rpcpy/openapi.py | william-wambua/rpc.py | 152 | 12793804 | <reponame>william-wambua/rpc.py<gh_stars>100-1000
import functools
import inspect
import typing
import warnings
__all__ = [
"BaseModel",
"create_model",
"validate_arguments",
"set_type_model",
"is_typed_dict_type",
"parse_typed_dict",
"TEMPLATE",
]
Callable = typing.TypeVar("Callable", bound=typing.Callable)
try:
from pydantic import BaseModel, ValidationError, create_model
from pydantic import validate_arguments as pydantic_validate_arguments
# visit this issue
# https://github.com/samuelcolvin/pydantic/issues/1205
def validate_arguments(function: Callable) -> Callable:
function = pydantic_validate_arguments(function)
@functools.wraps(function)
def change_exception(*args, **kwargs):
try:
return function(*args, **kwargs)
except ValidationError as exception:
type_error = TypeError(
"Failed to pass pydantic's type verification, please output"
" `.more_info` of this exception to view detailed information."
)
type_error.more_info = exception
raise type_error
return change_exception # type: ignore
except ImportError:
def create_model(*args, **kwargs): # type: ignore
raise NotImplementedError("Need install `pydantic` from pypi.")
def validate_arguments(function: Callable) -> Callable:
return function
BaseModel = type("BaseModel", (), {}) # type: ignore
def set_type_model(func: Callable) -> Callable:
"""
try generate request body model from type hint and default value
"""
sig = inspect.signature(func)
field_definitions: typing.Dict[str, typing.Any] = {}
for name, parameter in sig.parameters.items():
if parameter.annotation == parameter.empty:
# raise ValueError(
# f"You must specify the type for the parameter {func.__name__}:{name}."
# )
return func # Maybe the type hint should be mandatory? I'm not sure.
if parameter.default == parameter.empty:
field_definitions[name] = (parameter.annotation, ...)
else:
field_definitions[name] = (parameter.annotation, parameter.default)
if field_definitions:
try:
body_model: typing.Type[BaseModel] = create_model(
func.__name__, **field_definitions
)
setattr(func, "__body_model__", body_model)
except NotImplementedError:
message = (
"If you wanna using type hint "
"to create OpenAPI docs or convert type, "
"please install `pydantic` from pypi."
)
warnings.warn(message, ImportWarning)
return func
def is_typed_dict_type(type_) -> bool:
return issubclass(type_, dict) and getattr(type_, "__annotations__", False)
def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]:
"""
parse `TypedDict` to generate `pydantic.BaseModel`
"""
annotations = {}
for name, field in typed_dict.__annotations__.items():
if is_typed_dict_type(field):
annotations[name] = (parse_typed_dict(field), ...)
else:
default_value = getattr(typed_dict, name, ...)
annotations[name] = (field, default_value)
return create_model(typed_dict.__name__, **annotations) # type: ignore
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<link type="text/css" rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui.css">
<title>OpenAPI Docs</title>
</head>
<body>
<div id="swagger-ui"></div>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui-bundle.js"></script>
<script>
const ui = SwaggerUIBundle({
url: './get-openapi-docs',
dom_id: '#swagger-ui',
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIBundle.SwaggerUIStandalonePreset
],
layout: "BaseLayout",
deepLinking: true,
showExtensions: true,
showCommonExtensions: true
})
</script>
</body>
</html>
"""
| 2.484375 | 2 |
fyp/guardian/tests.py | Rishi-42/Guardain-1.0 | 0 | 12793805 | <filename>fyp/guardian/tests.py
from urllib import response
from django.test import TestCase
from selenium import webdriver
from account.forms import RegistrationForm
# class FunctionalTestCase(TestCase):
# def setUp(self):
# self.browser = webdriver.Firefox()
# def test(self):
# self.browser.get('http://localhost:8000')
# self.assertIn('Order ID', self.browser.page_source)
# # assert 'Pharmacy' in browser.page_source
# # assert browser.page_source.find('Pharmacy')
# def tearDown(self):
# self.browser.quit()
# class UnitTestCase(TestCase):
# def test_home_homepage_template(self):
# response = self.client.get('/')
# self.assertTemplateUsed(response, 'dashboardpharmacy.html')
# def test_home_status_code(self):
# response = self.client.get('/')
# self.assertEqual(response.status_code, 200)
# def test_register_form(self):
# form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>', 'term':'True','user_type': 'pharmacist'})
# self.assertTrue(form.is_valid())
| 2.265625 | 2 |
deskwork_detector.py | sgarbirodrigo/ml-sound-classifier | 118 | 12793806 | from realtime_predictor import *
emoji = {'Writing': '\U0001F4DD ', 'Scissors': '\u2701 ',
'Computer_keyboard': '\u2328 '}
def on_predicted_deskwork(ensembled_pred):
result = np.argmax(ensembled_pred)
label = conf.labels[result]
if label in ['Writing', 'Scissors', 'Computer_keyboard']:
p = ensembled_pred[result]
level = int(p*10) + 1
print(emoji[label] * level, label, p)
if __name__ == '__main__':
model = get_model(args.model_pb_graph)
# file mode
if args.input_file != '':
process_file(model, args.input_file, on_predicted_deskwork)
my_exit(model)
# device list display mode
if args.input < 0:
print_pyaudio_devices()
my_exit(model)
# normal: realtime mode
FORMAT = pyaudio.paInt16
CHANNELS = 1
audio = pyaudio.PyAudio()
stream = audio.open(
format=FORMAT,
channels=CHANNELS,
rate=conf.sampling_rate,
input=True,
input_device_index=args.input,
frames_per_buffer=conf.rt_chunk_samples,
start=False,
stream_callback=callback # uncomment for non_blocking
)
# main loop
stream.start_stream()
while stream.is_active():
main_process(model, on_predicted_deskwork)
time.sleep(0.001)
stream.stop_stream()
stream.close()
# finish
audio.terminate()
my_exit(model)
| 2.46875 | 2 |
hp/mechanism_hp.py | web8search/neutron-Neutron- | 6 | 12793807 | <reponame>web8search/neutron-Neutron-<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.plugins.ml2 import driver_api
from oslo_log import log as logging
from neutron.common import constants as n_const
from neutron.plugins.ml2.drivers.hp.common import tools
from neutron.plugins.ml2.drivers.hp.common import config
from neutron.plugins.ml2.drivers.hp.common import db
from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg
from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg
from neutron.plugins.ml2.drivers.hp import sync_helper
LOG = logging.getLogger(__name__)
class HPDriver(driver_api.MechanismDriver):
"""
Ml2 Mechanism driver for HP networking hardware.
Automation for VLANs configure with HP switches.
"""
def __init__(self, rpc=None):
config.HPML2Config()
self.leaf_topology = config.HPML2Config.leaf_topology
self.spine_topology = config.HPML2Config.spine_topology
self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap
self.sync_lock = None
self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time)
self.username = cfg.CONF.ml2_hp.username
self.password = cfg.CONF.ml2_hp.password
self.url_schema = cfg.CONF.ml2_hp.schema.lower()
self.default_oem = cfg.CONF.ml2_hp.oem.lower()
self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower()
self.sync_helper = None
self.rpc_clients = {}
def initialize(self):
""" MechanismDriver will call it after __init__. """
if self.rpc_backend == 'netconf':
self._create_nc_clients()
elif self.rpc_backend == 'restful':
self._create_rest_clients()
LOG.info(_("leaf %s, spine %s, user %s, pass %s, url schema %s,"
"timeout %d, rpc backend %s"),
self.leaf_topology, self.spine_topology,
self.username, self.password, self.url_schema,
self.sync_timeout, self.rpc_backend)
# Create a thread.for sync configuration to physical device.
self.sync_helper = sync_helper.SyncHelper(self.leaf_topology,
self.spine_topology,
self.rpc_clients,
self.sync_timeout,
self.sync_overlap)
self.sync_lock = self.sync_helper.get_lock()
self.sync_helper.start()
def _create_rest_clients(self):
""" Create restful instances foreach leaf and spine device."""
for leaf in self.leaf_topology:
rest_client = restful_cfg.RestfulCfg(leaf['ip'],
self.username,
self.password)
self.rpc_clients.setdefault(leaf['ip'], rest_client)
for spine in self.spine_topology:
rest_client = restful_cfg.RestfulCfg(spine['ip'],
self.username,
self.password)
self.rpc_clients.setdefault(spine['ip'], rest_client)
def _create_nc_clients(self):
""" Create NETCONF instances for each leaf and spine device."""
for leaf in self.leaf_topology:
if leaf['oem'] == '':
leaf['oem'] = self.default_oem
nc_client = netconf_cfg.NetConfigClient(leaf['oem'],
leaf['ip'],
self.url_schema,
self.username,
self.password)
self.rpc_clients.setdefault(leaf['ip'], nc_client)
for spine in self.spine_topology:
if spine['oem'] == '':
spine['oem'] = self.default_oem
nc_client = netconf_cfg.NetConfigClient(spine['oem'],
spine['ip'],
self.url_schema,
self.username,
self.password)
self.rpc_clients.setdefault(spine['ip'], nc_client)
def _get_client(self, device_ip):
""" Return a RPC client instance specified by device IP. """
client = None
if self.rpc_clients is not None:
client = self.rpc_clients.get(device_ip, None)
if client is None:
LOG.warn(_("No such switch whose IP is %s in "
"the configuration file."),
str(device_ip))
return client
def create_network_precommit(self, context):
""" We don't care it."""
pass
def create_network_postcommit(self, context):
""" Just insert network information into database.
When the port is created, we do real operations
in our physical device.
"""
LOG.info(_("Create network postcommit begin."))
network = context.current
network_id = network['id']
tenant_id = network['tenant_id']
segments = context.network_segments
if not db.is_network_created(tenant_id, network_id):
LOG.info(_("Create network with id %s."), network_id)
# [{'segmentation_id': id, 'physical_network': value,
# 'id': id, 'network_type': gre | vlan | vxlan }]
segment_type = segments[0]['network_type']
segment_id = segments[0]['segmentation_id']
db.create_network(tenant_id, network_id, segment_id, segment_type)
LOG.info(_("Create network postcommit end."))
def update_network_precommit(self, context):
pass
def update_network_postcommit(self, context):
pass
def delete_network_precommit(self, context):
pass
def delete_network_postcommit(self, context):
""" Delete network information from database."""
LOG.info(_("Delete network begin."))
network = context.current
network_id = network['id']
tenant_id = network['tenant_id']
if db.is_network_created(tenant_id, network_id):
LOG.info(_("Delete network %s from database."), network_id)
db.delete_network(tenant_id, network_id)
LOG.info(_("Delete network end."))
def collect_create_config(self, network_id, host_id, vlan_id):
device_config_dict = {}
vlan_list = db.get_vlanlist_byhost(host_id)
if vlan_id not in vlan_list:
vlan_list.append(vlan_id)
host_list = db.get_host_list(network_id)
# Find which leaf device connects to the host_id.
leaf_need_configure = []
leaf_generator = tools.topology_generator(self.leaf_topology)
leaf_ip_ref = {}
for leaf_ip, topology in leaf_generator:
leaf_host = topology['host']
if leaf_host in host_list:
leaf_ip_ref.setdefault(leaf_ip, set([]))
leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host))
if leaf_host == host_id:
leaf_ip_ref[leaf_ip] |= set([vlan_id])
device_config_dict.setdefault(leaf_ip, {})
device_config_dict[leaf_ip].setdefault('port_vlan', [])
device_config_dict[leaf_ip]['vlan_create'] = vlan_list
device_config_dict[leaf_ip]['port_vlan'].\
append((topology['ports'], vlan_list))
leaf_need_configure.append(leaf_ip)
LOG.info(_("Starting collecting spine's configs with leaf %s."),
str(leaf_need_configure))
# Find which spine device connects to the leaf device
# which is configured above.
spine_generator = tools.topology_generator(self.spine_topology)
for spine_ip, topology in spine_generator:
leaf_ip = topology['leaf_ip']
if leaf_ip in leaf_need_configure:
spine_vlan_list = list(leaf_ip_ref[leaf_ip])
if spine_ip not in device_config_dict:
device_config_dict.setdefault(spine_ip, {})
device_config_dict[spine_ip].setdefault('port_vlan', [])
device_config_dict[spine_ip]['vlan_create'] = vlan_list
device_config_dict[spine_ip]['port_vlan'].\
append((topology['spine_ports'], spine_vlan_list))
if leaf_ip in device_config_dict:
device_config_dict[leaf_ip]['port_vlan'].\
append((topology['leaf_ports'], spine_vlan_list))
LOG.info(_("Collect device configuration: %s"), device_config_dict)
return device_config_dict
def create_port_precommit(self, context):
pass
def _create_vlan_network(self, network_id, host_id, vlan_id):
"""Do real configuration in our physical devices.
:param network_id. The uuid of network.
:param host_id. The host where the port created.
:param vlan_id. Segmentation ID
"""
device_config_list = self.collect_create_config(network_id,
host_id,
vlan_id)
# Execute configuration in physical devices.
for dev_ip in device_config_list:
vlan_list = device_config_list[dev_ip]['vlan_create']
port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan']
rpc_client = self._get_client(dev_ip)
if rpc_client is not None:
LOG.info(_("Begin create vlan network: device %s, "
"create vlan %s, port trunk list %s"),
dev_ip, vlan_list, port_vlan_tuple_list)
result = rpc_client.create_vlan_bulk(vlan_list)
if result is True:
result = rpc_client.port_trunk_bulk(port_vlan_tuple_list)
if result is True:
LOG.info(_("Create vlan config successful for"
" %s."), dev_ip)
LOG.info(_("End create vlan network"))
else:
LOG.warn(_("Failed to create vlan network"))
def create_port_postcommit(self, context):
"""Create network and port on physical device."""
LOG.info(_("Create port begin."))
# Here we only process virtual machine and DHCP server's port.
port = context.current
device_owner = port['device_owner']
if not device_owner.startswith('compute') and \
device_owner != n_const.DEVICE_OWNER_DHCP:
LOG.info(_("Ignore port owner %s when creating port."),
device_owner)
return
device_id = port['device_id']
host_id = context.host
port_id = port['id']
tenant_id = port['tenant_id']
network_id = port['network_id']
with self.sync_lock:
if db.is_vm_created(device_id, host_id,
port_id, network_id, tenant_id):
LOG.info(_("The port %s of virtual machine %s has "
"already inserted into the network %s."),
str(port_id), str(device_id), str(network_id))
return
LOG.info(_("Insert port %s's information into database."),
str(port_id))
db.create_vm(device_id, host_id, port_id, network_id, tenant_id)
# Get the count of port that created in the same network and host.
port_count = db.get_vm_count(network_id, host_id)
if port_count == 1:
segments = context.network.network_segments
segment_type = segments[0]['network_type']
if segment_type == 'vlan':
vlan_id = int(segments[0]['segmentation_id'])
self._create_vlan_network(network_id, host_id, vlan_id)
else:
LOG.info(_("Not supported network type %s"), segment_type)
else:
LOG.info(_("Physical switch has already configured. "
"There are %d VMs in network %s."),
port_count, network_id)
LOG.info(_("Create port end."))
def update_port_precommit(self, context):
pass
def update_port_postcommit(self, context):
"""Just process the migration of virtual machine."""
port = context.current
device_owner = port['device_owner']
LOG.info(_("Update port begin. Device owner is %s."), device_owner)
if not (device_owner.startswith('compute') or
device_owner == n_const.DEVICE_OWNER_DHCP):
LOG.info(_("Ignore port owner %s when update port."),
device_owner)
return
device_id = port['device_id']
port_id = port['id']
tenant_id = port['tenant_id']
network_id = port['network_id']
old_host_id = db.get_vm_host(device_id, port_id,
network_id, tenant_id)
if old_host_id is None or old_host_id == context.host:
LOG.info(_("update port postcommit: No changed."))
return
# Migration is happen.
LOG.info(_("Migration is begin."))
segments = context.network.network_segments
self.delete_port(old_host_id, port, segments)
self.create_port_postcommit(context)
LOG.info(_("Migration is end."))
def collect_delete_config(self, network_id, host_id, vlan_id):
vlan_list = db.get_vlanlist_byhost(host_id)
if vlan_id in vlan_list:
vlan_list.remove(vlan_id)
leaf_generator = tools.topology_generator(self.leaf_topology)
host_list = db.get_host_list(network_id)
LOG.info(_("Delete vlan host list %s"), host_list)
# It is the counter of host that connects to the same
# device specified by ip address.
leaf_ref_vlans = {}
leaf_ref_host = {}
delete_config = {}
for leaf_ip, topology in leaf_generator:
leaf_ref_vlans.setdefault(leaf_ip, set([]))
leaf_ref_host.setdefault(leaf_ip, False)
host = topology['host']
host_vlan = db.get_vlanlist_byhost(host)
if host in host_list:
leaf_ref_vlans[leaf_ip] |= set(host_vlan)
if host == host_id:
delete_config.setdefault(leaf_ip, {})
delete_config[leaf_ip].setdefault('port_vlan', [])
delete_config[leaf_ip]['port_vlan'].\
append((topology['ports'], vlan_list))
delete_config[leaf_ip]['vlan_del'] = []
if host in host_list:
host_list.remove(host)
else:
if len(set([vlan_id]) & set(host_vlan)) > 0:
leaf_ref_host[leaf_ip] = True
# If there is no host connects to leaf in the same network,
# we will remove the configuration in the spine device.
# And remove the vlan configuration in the leaf device.
for leaf_ip in leaf_ref_vlans:
if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config:
leaf_ref_vlans[leaf_ip] -= set([vlan_id])
delete_config[leaf_ip]['vlan_del'] = [vlan_id]
# Check which spine device connects to above leafs.
# We need remove this spine's configuration.
spine_generator = tools.topology_generator(self.spine_topology)
# This dict is used to count the host number in same network
# with leafs connected to spine.
spine_delete_score = {}
for spine_ip, topology in spine_generator:
leaf_ip = topology['leaf_ip']
if leaf_ip in leaf_ref_vlans:
spine_delete_score.setdefault(spine_ip, 0)
if leaf_ref_host[leaf_ip] is True:
spine_delete_score[spine_ip] += 1
if leaf_ip in delete_config:
vlan_list = list(leaf_ref_vlans[leaf_ip])
delete_config[spine_ip] = {}
delete_config[spine_ip].setdefault('port_vlan', [])
delete_config[spine_ip]['port_vlan'].\
append((topology['spine_ports'], vlan_list))
delete_config[spine_ip]['vlan_del'] = []
if len(delete_config[leaf_ip]['vlan_del']) != 0:
delete_config[leaf_ip]['port_vlan'].\
append((topology['leaf_ports'], vlan_list))
# Check does spine need to delete vlan.
for spine_ip in spine_delete_score:
if spine_delete_score[spine_ip] == 0 \
and spine_ip in delete_config:
delete_config[spine_ip]['vlan_del'] = [vlan_id]
LOG.info(_("Delete configuration : %s"), delete_config)
return delete_config
def delete_vlan_config(self, network_id, host_id, vlan_id):
"""Delete vlan configuration from physical devices."""
delete_config = self.collect_delete_config(network_id,
host_id,
vlan_id)
for dev_ip in delete_config:
rpc_client = self._get_client(dev_ip)
port_vlan_tuple_list = delete_config[dev_ip]['port_vlan']
vlan_del_list = delete_config[dev_ip]['vlan_del']
if rpc_client is not None:
if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True:
if rpc_client.delete_vlan_bulk(vlan_del_list) is True:
LOG.info(_("Delete vlan config %s success for %s."),
port_vlan_tuple_list, dev_ip)
else:
LOG.warn(_("Failed to delete vlan %s for %s."),
vlan_del_list, dev_ip)
else:
LOG.warn(_("Failed to port trunk %s for %s"),
port_vlan_tuple_list, dev_ip)
def delete_port_precommit(self, context):
pass
def delete_port(self, host_id, ports, segments):
with self.sync_lock:
network_id = ports['network_id']
device_id = ports['device_id']
port_id = ports['id']
tenant_id = ports['tenant_id']
if not db.is_vm_created(device_id, host_id,
port_id, network_id, tenant_id):
LOG.info(_("No such vm in database, ignore it"))
return
# Delete configuration in device
# only if it is the last vm of host in this network
vm_count = db.get_vm_count(network_id, host_id)
if vm_count == 1:
LOG.info(_("Delete physical port configuration: "
"All VMs of host %s in network %s is deleted. "),
host_id, network_id)
segment_type = segments[0]['network_type']
segment_id = segments[0]['segmentation_id']
if segment_type == 'vlan':
vlan_id = int(segment_id)
self.delete_vlan_config(network_id, host_id, vlan_id)
else:
LOG.info(_("Not supported network type %s."),
str(segment_type))
else:
LOG.info(_("The network %s still have %d vms, "
"ignore this operation."),
network_id, vm_count)
db.delete_vm(device_id, host_id, port_id, network_id, tenant_id)
def delete_port_postcommit(self, context):
"""Delete real configuration from our physical devices."""
LOG.info(_("Delete port post-commit begin."))
# Only process virtual machine device and DHCP port
port = context.current
device_owner = port['device_owner']
if not device_owner.startswith('compute') and\
device_owner != n_const.DEVICE_OWNER_DHCP:
LOG.info(_("Ignore port owner %s when deleting port."),
device_owner)
return
segments = context.network.network_segments
self.delete_port(context.host, port, segments)
LOG.info(_("Delete port post-commit end."))
| 1.492188 | 1 |
test/analytic_full_slab.py | pozulp/narrows | 5 | 12793808 | <reponame>pozulp/narrows<gh_stars>1-10
#!/usr/bin/env python3
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.special as sp
import sys
MY_DIR = os.path.dirname(__file__)
sys.path.append(f'{MY_DIR}/..')
from narrows import parse_input, create_mesh # noqa: E402
sys.path.append(f'{MY_DIR}')
from utility import show_or_save # noqa: E402
plt.style.use(f'{MY_DIR}/style.mplstyle')
def solution(z, src_mag, sigma_a, zstop):
EPS = 1e-25
return ((src_mag / sigma_a) -
(src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z) +
np.exp(sigma_a * (z - zstop))) +
(src_mag / 2) * (z * sp.exp1(sigma_a * z + EPS) +
(zstop - z) * sp.exp1(sigma_a * (zstop - z)
+ EPS)))
def plot_analytic_flux(show, problem):
z, src_mag, sigma_a, zstop = get_parameters_for(problem)
flux = solution(z, src_mag, sigma_a, zstop)
plt.plot(z, flux, label='analytic')
plt.legend()
plt.xlabel('z coordinate')
plt.ylabel(r'$\phi(z)$')
plt.title(f'{problem} flux')
show_or_save(show, problem, 'analytic_flux')
def get_parameters_for(problem):
deck = parse_input([f'{problem}.yaml'])
mesh = create_mesh(deck)
src_mag = deck.src['src1'].magnitude
sigma_a = deck.mat['mat1'].sigma_a
zstop = deck.reg['reg1'].end
return mesh.edge, src_mag, sigma_a, zstop
def parse_args(argv):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Plot analytic flux for full_slab.yaml',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-s', '--show',
action='store_true',
help='show instead of save plot')
args = parser.parse_args(argv)
return args
def main(argv=None):
args = parse_args(argv)
plot_analytic_flux(args.show, f'{MY_DIR}/full_slab')
if __name__ == '__main__':
main()
| 2.234375 | 2 |
examples/wave_share_gps_locator_runner.py | cezaryzelisko/gps-tracker | 1 | 12793809 | <reponame>cezaryzelisko/gps-tracker<filename>examples/wave_share_gps_locator_runner.py
from gps_tracker.runner import main
from gps_tracker.serial_connection import SerialConnection
from gps_tracker.wave_share_config import WaveShareGPS
from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator
from gps_tracker.auth import Auth
from gps_tracker.device import Device
if __name__ == '__main__':
auth = Auth()
device = Device('WaveShare Device', auth)
with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn:
with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps:
enabled = ws_gps.wait_for_gps()
if enabled:
main(ws_gps, device)
else:
print('ERROR: unable to initialize GPS.')
| 2.46875 | 2 |
tests/utils.py | mypaceshun/pyfortune | 0 | 12793810 | <gh_stars>0
import os
from pyfortune.session import Session
def get_login_session():
secrets_path = os.environ.get('SECRETS', 'secrets')
with open(secrets_path, 'r') as f:
lines = f.readlines()
username = lines[0].strip()
password = lines[1].strip()
s = Session()
s.login(username, password)
return s
def get_session():
s = Session()
return s
| 2.296875 | 2 |
jewtrick/utils/settings.py | ZimnyCat/jewtrick-client | 8 | 12793811 | <filename>jewtrick/utils/settings.py
# -*- coding: utf-8 -*-
import utils.settingsHelper as helper
import utils.system as sys
booleanArray = {
"autoclick": "false",
"ping": "true",
"time": "false",
"requests-counter": "true"
}
numArray = {
"delay": 1,
"ping-delay": 5
}
def getBoolean(settingName):
# проверяем
helper.check(settingName)
try:
file = open("settings.txt", "r+")
# ищем настройку
for word in file:
nigga = settingName + " = "
if word.startswith(nigga):
file.close()
if word == nigga + "true\n":
return True
elif word == nigga + "false\n":
return False
# если значение не булевое
sys.crash("Настройка \"" + settingName + "\" не соответствует true или false",
"Проверьте ваш файл settings.txt")
# если такая настройка не найдена в файле
print("Не удалось найти значение \"" + settingName + "\"! Записываем...")
file.write(settingName + " = " + booleanArray[settingName] + "\n")
file.close()
return getBoolean(settingName)
except FileNotFoundError:
# если файла settings.txt нет
helper.createSettingsFile()
return getBoolean(settingName)
def getNum(settingName):
# проверяем
helper.check(settingName)
try:
file = open("settings.txt", "r+")
# ищем настройку
for word in file:
nigga = settingName + " = "
if word.startswith(nigga):
file.close()
try:
return int(word.replace(nigga, ""))
except:
# если значение не число
sys.crash("Настрйка \"" + settingName + "\" не является числом")
# если такая настройка не найдена
print("Не удалось найти значение \"" + settingName + "\"! Записываем...")
file.write(settingName + " = " + str(numArray[settingName]) + "\n")
file.close()
return getNum(settingName)
except:
# если файла settings.txt нет
helper.createSettingsFile()
return getNum(settingName)
| 2.578125 | 3 |
setup.py | gisce/esios | 7 | 12793812 | <reponame>gisce/esios
from setuptools import setup, find_packages
PACKAGES_DATA = {'esios': ['data/*.xsd']}
setup(
name='esios',
version='0.12.2',
packages=find_packages(),
url='https://github.com/gisce/esios',
license='MIT',
install_requires=['libsaas'],
author='GISCE-TI, S.L.',
author_email='<EMAIL>',
description='Interact with e.sios API',
package_data=PACKAGES_DATA,
)
| 1.3125 | 1 |
setup.py | swasun/RFOMT | 2 | 12793813 | <filename>setup.py<gh_stars>1-10
from setuptools import find_packages, setup
setup(
name='bolsonaro',
packages=find_packages(where="code", exclude=['doc', 'dev']),
package_dir={'': "code"},
version='0.1.0',
description='Bolsonaro project of QARMA non-permanents: deforesting random forest using OMP.',
author='QARMA team',
license='MIT',
)
| 1.375 | 1 |
mindhome_alpha/erpnext/patches/v13_0/healthcare_lab_module_rename_doctypes.py | Mindhome/field_service | 1 | 12793814 | <reponame>Mindhome/field_service<gh_stars>1-10
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'):
# rename child doctypes
doctypes = {
'Lab Test Groups': 'Lab Test Group Template',
'Normal Test Items': 'Normal Test Result',
'Sensitivity Test Items': 'Sensitivity Test Result',
'Special Test Items': 'Descriptive Test Result',
'Special Test Template': 'Descriptive Test Template'
}
frappe.reload_doc('healthcare', 'doctype', 'lab_test')
frappe.reload_doc('healthcare', 'doctype', 'lab_test_template')
for old_dt, new_dt in doctypes.items():
if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt):
frappe.rename_doc('DocType', old_dt, new_dt, force=True)
frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt))
frappe.delete_doc_if_exists('DocType', old_dt)
parent_fields = {
'Lab Test Group Template': 'lab_test_groups',
'Descriptive Test Template': 'descriptive_test_templates',
'Normal Test Result': 'normal_test_items',
'Sensitivity Test Result': 'sensitivity_test_items',
'Descriptive Test Result': 'descriptive_test_items'
}
for doctype, parentfield in parent_fields.items():
frappe.db.sql("""
UPDATE `tab{0}`
SET parentfield = %(parentfield)s
""".format(doctype), {'parentfield': parentfield})
# rename field
frappe.reload_doc('healthcare', 'doctype', 'lab_test')
if frappe.db.has_column('Lab Test', 'special_toggle'):
rename_field('Lab Test', 'special_toggle', 'descriptive_toggle')
if frappe.db.exists('DocType', 'Lab Test Group Template'):
# fix select field option
frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template')
frappe.db.sql("""
UPDATE `tabLab Test Group Template`
SET template_or_new_line = 'Add New Line'
WHERE template_or_new_line = 'Add new line'
""")
| 1.921875 | 2 |
test/unit/test_firewall_api_v1.py | IBM/networking-services-python-sdk | 1 | 12793815 | <filename>test/unit/test_firewall_api_v1.py
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import re
import responses
from ibm_cloud_networking_services.firewall_api_v1 import *
crn = 'testString'
zone_identifier = 'testString'
service = FirewallApiV1(
authenticator=NoAuthAuthenticator(),
crn=crn,
zone_identifier=zone_identifier
)
base_url = 'https://api.cis.cloud.ibm.com'
service.set_service_url(base_url)
##############################################################################
# Start of Service: SecurityLevelSetting
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for get_security_level_setting
#-----------------------------------------------------------------------------
class TestGetSecurityLevelSetting():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# get_security_level_setting()
#--------------------------------------------------------
@responses.activate
def test_get_security_level_setting_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')
mock_response = '{"result": {"id": "security_level", "value": "medium", "editable": true, "modified_on": "2014-01-01T05:20:00.12345Z"}, "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_security_level_setting()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_security_level_setting_value_error()
#--------------------------------------------------------
@responses.activate
def test_get_security_level_setting_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')
mock_response = '{"result": {"id": "security_level", "value": "medium", "editable": true, "modified_on": "2014-01-01T05:20:00.12345Z"}, "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.get_security_level_setting(**req_copy)
#-----------------------------------------------------------------------------
# Test Class for set_security_level_setting
#-----------------------------------------------------------------------------
class TestSetSecurityLevelSetting():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# set_security_level_setting()
#--------------------------------------------------------
@responses.activate
def test_set_security_level_setting_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')
mock_response = '{"result": {"id": "security_level", "value": "medium", "editable": true, "modified_on": "2014-01-01T05:20:00.12345Z"}, "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
value = 'under_attack'
# Invoke method
response = service.set_security_level_setting(
value=value,
headers={}
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['value'] == 'under_attack'
#--------------------------------------------------------
# test_set_security_level_setting_required_params()
#--------------------------------------------------------
@responses.activate
def test_set_security_level_setting_required_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')
mock_response = '{"result": {"id": "security_level", "value": "medium", "editable": true, "modified_on": "2014-01-01T05:20:00.12345Z"}, "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.set_security_level_setting()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_set_security_level_setting_value_error()
#--------------------------------------------------------
@responses.activate
def test_set_security_level_setting_value_error(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')
mock_response = '{"result": {"id": "security_level", "value": "medium", "editable": true, "modified_on": "2014-01-01T05:20:00.12345Z"}, "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Pass in all but one required param and check for a ValueError
req_param_dict = {
}
for param in req_param_dict.keys():
req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()}
with pytest.raises(ValueError):
service.set_security_level_setting(**req_copy)
# endregion
##############################################################################
# End of Service: SecurityLevelSetting
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for SecurityLevelSettingRespMessagesItem
#-----------------------------------------------------------------------------
class TestSecurityLevelSettingRespMessagesItem():
#--------------------------------------------------------
# Test serialization/deserialization for SecurityLevelSettingRespMessagesItem
#--------------------------------------------------------
def test_security_level_setting_resp_messages_item_serialization(self):
# Construct a json representation of a SecurityLevelSettingRespMessagesItem model
security_level_setting_resp_messages_item_model_json = {}
security_level_setting_resp_messages_item_model_json['status'] = 'OK'
# Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation
security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json)
assert security_level_setting_resp_messages_item_model != False
# Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation
security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__
security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict)
# Verify the model instances are equivalent
assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2
# Convert model instance back to dict and verify no loss of data
security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict()
assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json
#-----------------------------------------------------------------------------
# Test Class for SecurityLevelSettingRespResult
#-----------------------------------------------------------------------------
class TestSecurityLevelSettingRespResult():
#--------------------------------------------------------
# Test serialization/deserialization for SecurityLevelSettingRespResult
#--------------------------------------------------------
def test_security_level_setting_resp_result_serialization(self):
# Construct a json representation of a SecurityLevelSettingRespResult model
security_level_setting_resp_result_model_json = {}
security_level_setting_resp_result_model_json['id'] = 'security_level'
security_level_setting_resp_result_model_json['value'] = 'medium'
security_level_setting_resp_result_model_json['editable'] = True
security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z'
# Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation
security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json)
assert security_level_setting_resp_result_model != False
# Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation
security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__
security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict)
# Verify the model instances are equivalent
assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2
# Convert model instance back to dict and verify no loss of data
security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict()
assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json
#-----------------------------------------------------------------------------
# Test Class for ResultInfo
#-----------------------------------------------------------------------------
class TestResultInfo():
#--------------------------------------------------------
# Test serialization/deserialization for ResultInfo
#--------------------------------------------------------
def test_result_info_serialization(self):
# Construct a json representation of a ResultInfo model
result_info_model_json = {}
result_info_model_json['page'] = 1
result_info_model_json['per_page'] = 2
result_info_model_json['count'] = 1
result_info_model_json['total_count'] = 200
# Construct a model instance of ResultInfo by calling from_dict on the json representation
result_info_model = ResultInfo.from_dict(result_info_model_json)
assert result_info_model != False
# Construct a model instance of ResultInfo by calling from_dict on the json representation
result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__
result_info_model2 = ResultInfo(**result_info_model_dict)
# Verify the model instances are equivalent
assert result_info_model == result_info_model2
# Convert model instance back to dict and verify no loss of data
result_info_model_json2 = result_info_model.to_dict()
assert result_info_model_json2 == result_info_model_json
#-----------------------------------------------------------------------------
# Test Class for SecurityLevelSettingResp
#-----------------------------------------------------------------------------
class TestSecurityLevelSettingResp():
#--------------------------------------------------------
# Test serialization/deserialization for SecurityLevelSettingResp
#--------------------------------------------------------
def test_security_level_setting_resp_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
result_info_model = {} # ResultInfo
result_info_model['page'] = 1
result_info_model['per_page'] = 2
result_info_model['count'] = 1
result_info_model['total_count'] = 200
security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem
security_level_setting_resp_messages_item_model['status'] = 'OK'
security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult
security_level_setting_resp_result_model['id'] = 'security_level'
security_level_setting_resp_result_model['value'] = 'medium'
security_level_setting_resp_result_model['editable'] = True
security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z'
# Construct a json representation of a SecurityLevelSettingResp model
security_level_setting_resp_model_json = {}
security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model
security_level_setting_resp_model_json['result_info'] = result_info_model
security_level_setting_resp_model_json['success'] = True
security_level_setting_resp_model_json['errors'] = [['testString']]
security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model]
# Construct a model instance of SecurityLevelSettingResp by calling from_dict on the json representation
security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json)
assert security_level_setting_resp_model != False
# Construct a model instance of SecurityLevelSettingResp by calling from_dict on the json representation
security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__
security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict)
# Verify the model instances are equivalent
assert security_level_setting_resp_model == security_level_setting_resp_model2
# Convert model instance back to dict and verify no loss of data
security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict()
assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
| 1.796875 | 2 |
tests/test_bobocep/test_setup/test_bobo_complex_event.py | r3w0p/bobocep | 5 | 12793816 | import unittest
from bobocep.rules.actions.no_action import NoAction
from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern
from bobocep.setup.bobo_complex_event import \
BoboComplexEvent
class TestBoboComplexEvent(unittest.TestCase):
def test_constructor(self):
name = "evdef_name"
pattern = BoboPattern()
action = NoAction()
evdef = BoboComplexEvent(name=name,
pattern=pattern,
action=action)
self.assertEqual(name, evdef.name)
self.assertEqual(pattern, evdef.pattern)
self.assertEqual(action, evdef.action)
def test_constructor_actions_is_none(self):
name = "evdef_name"
pattern = BoboPattern()
action = None
evdef = BoboComplexEvent(name=name,
pattern=pattern,
action=action)
self.assertEqual(name, evdef.name)
self.assertEqual(pattern, evdef.pattern)
self.assertIsNone(evdef.action)
| 2.71875 | 3 |
addressbook/models.py | Saviq/django-addressbook | 0 | 12793817 | <filename>addressbook/models.py
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
PROPERTY_LABELS = (
('home', _('home')),
('work', _('work')),
('other', _('other')),
)
IM_SERVICES = (
('google', _('Google Talk')),
('aim', _('AIM')),
('yahoo', _('Yahoo')),
('msn', _('MSN')),
('icq', _('ICQ')),
('jabber', _('Jabber')),
)
class PrimaryPropertyManager(models.Manager):
def primary(self):
try:
return self.get_queryset().get(is_primary=True)
except ObjectDoesNotExist:
return None
# Base classes
# Every contact property must inherit from either ContactProperty or
# PrimaryPropery
class ContactProperty(models.Model):
class Meta:
abstract = True
def save(self, *args, **kwargs):
self.contact.save()
models.Model.save(self, *args, **kwargs)
class PrimaryProperty(ContactProperty):
is_primary = models.BooleanField(_("primary"), default=False)
objects = PrimaryPropertyManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
update_primary = kwargs.pop('update_primary', True)
if update_primary:
try:
existing = self.__class__.objects.exclude(pk=self.id) \
.filter(contact=self.contact,
is_primary=True).get()
except ObjectDoesNotExist:
existing = None
if self.is_primary:
if existing is not None:
existing.is_primary = False
existing.save(update_primary=False)
elif existing is None:
self.is_primary = True
super(PrimaryProperty, self).save(*args, **kwargs)
# Mixin classes
# Abstacts out common fields and methods, models can implement this for
# themselves if different.
class LabeledProperty(models.Model):
label = models.CharField(_("label"), max_length=200, choices=PROPERTY_LABELS)
class Meta:
abstract = True
def __unicode__(self):
return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self))
class NamedProperty(models.Model):
name = models.CharField(_("name"), max_length=200)
class Meta:
abstract = True
def __unicode__(self):
return u'%s: %s' % (self.name, self.value)
class OptionalNamedProperty(models.Model):
name = models.CharField(_("name"), max_length=200, blank=True)
class Meta:
abstract = True
def __unicode__(self):
return u'%s%s' % (self.name and '%s: ' % self.name or "", self.value)
# Contact properties
class PrimaryPropertyDescriptor(object):
def __init__(self, collection_name):
self.collection_name = collection_name
def get_collection(self, instance):
return getattr(instance, self.collection_name)
def __get__(self, instance, owner):
if instance is None:
return self
return self.get_collection(instance).primary()
def __set__(self, instance, value):
value.is_primary = True
self.get_collection(instance).add(value)
def __delete__(self, instance):
self.get_collection(instance).primary().delete()
for obj in self.get_collection(instance).all():
obj.is_primary = True
return
class CustomField(ContactProperty, NamedProperty):
contact = models.ForeignKey('Contact', related_name="custom_fields")
value = models.TextField(_("value"))
def __unicode__(self):
return u'%s: %s' % (self.name, self.value)
class Date(ContactProperty, NamedProperty):
contact = models.ForeignKey('Contact', related_name="dates")
value = models.DateField(_("date"))
class Meta:
verbose_name = _("date")
verbose_name_plural = _("dates")
class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty):
contact = models.ForeignKey('Contact', related_name="email_addresses")
value = models.EmailField(_("address"))
class Meta:
verbose_name = _("email address")
verbose_name_plural = _("email addresses")
class IMAccount(PrimaryProperty):
contact = models.ForeignKey('Contact', related_name="im_accounts")
service = models.CharField(_("service"), max_length=30, choices=IM_SERVICES)
account = models.CharField(_("account"), help_text=_("user name or email address"), max_length=200)
class Meta:
verbose_name = _("IM account")
verbose_name_plural = _("IM accounts")
@property
def value(self):
return self.account
class Link(ContactProperty, NamedProperty):
contact = models.ForeignKey('Contact', related_name="links")
value = models.URLField(_('URL'), max_length=200, default='http://')
class Meta:
verbose_name = _("link")
verbose_name_plural = _("links")
def save(self, *args, **kwargs):
if self.value == 'http://':
return
super(Link, self).save(*args, **kwargs)
class Organization(PrimaryProperty):
contact = models.ForeignKey('Contact', related_name="organizations")
name = models.CharField(_("name"), max_length=200)
title = models.CharField(_("title"), max_length=200, blank=True)
class Meta:
verbose_name = _("organization")
verbose_name_plural = _("organizations")
def __unicode__(self):
return self.name
class PhoneNumber(PrimaryProperty, OptionalNamedProperty):
PHONE_NUM_LABELS = (
('landline', _('landline')),
('mobile', _('mobile')),
('fax', _('fax')),
)
contact = models.ForeignKey('Contact', related_name="phone_numbers")
label = models.CharField(_("label"), max_length=200, choices=PHONE_NUM_LABELS)
value = models.CharField(_('number'), max_length=100)
class Meta:
verbose_name = _("phone number")
verbose_name_plural = _("phone numbers")
def __unicode__(self):
return u'%s%s [%s]' % (self.name and "%s: " % self.name or "",
self.value, PhoneNumber.get_label_display(self))
class PostalAddress(PrimaryProperty, LabeledProperty):
contact = models.ForeignKey('Contact', related_name="postal_addresses")
address1 = models.CharField(_("address line 1"), max_length=127, blank=False)
address2 = models.CharField(_("address line 2"), max_length=127, blank=True)
city = models.CharField(_("city"), max_length=127, blank=True)
state = models.CharField(_("state/province/region"), max_length=127, blank=True)
country = models.CharField(_("country"), max_length=127)
postcode = models.CharField(_("postal code/zip code"), max_length=31, blank=True)
class Meta:
verbose_name = _("postal address")
verbose_name_plural = _("postal addresses")
@property
def value(self):
data = [self.address1, self.address2, self.city,
self.state, self.country, self.postcode]
return ", ".join([i for i in data if i])
class Contact(models.Model):
""" A person or company.
"""
name = models.CharField(max_length=200)
is_company = models.BooleanField(_("company"), default=False)
photo = models.ImageField(_("photo"), upload_to='var/addressbook/photos', blank=True)
notes = models.TextField(_("notes"), blank=True)
date_created = models.DateTimeField(auto_now_add=True, editable=False)
date_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
verbose_name = _("contact")
verbose_name_plural = _("contacts")
ordering = ('name',)
def __unicode__(self):
return self.name
# primary contact properies
email_address = PrimaryPropertyDescriptor('email_addresses')
im_account = PrimaryPropertyDescriptor('im_accounts')
company = PrimaryPropertyDescriptor('organizations')
phone_number = PrimaryPropertyDescriptor('phone_numbers')
postal_address = PrimaryPropertyDescriptor('postal_addresses')
@property
def address(self):
return self.postal_address
class Group(models.Model):
name = models.CharField(max_length=200, unique=True)
description = models.TextField(_("description"), blank=True)
members = models.ManyToManyField(Contact, verbose_name=_("members"), blank=True)
class Meta:
verbose_name = _("group")
verbose_name_plural = _("groups")
@property
def member_list(self):
return ', '.join([str(c) for c in self.members.all()[:5]])
def __unicode__(self):
return self.name
| 2.1875 | 2 |
RunTCAV.py | zxy317/CIFAR-ZOO | 0 | 12793818 | import cav as cav
import model as model
import tcav as tcav
import utils as utils
import utils_plot as utils_plot # utils_plot requires matplotlib
import os
import torch
import activation_generator as act_gen
import tensorflow as tf
working_dir = './tcav_class_test'
activation_dir = working_dir + '/activations/'
cav_dir = working_dir + '/cavs/'
source_dir = "./data/"
bottlenecks = ['conv2']
utils.make_dir_if_not_exists(activation_dir)
utils.make_dir_if_not_exists(working_dir)
utils.make_dir_if_not_exists(cav_dir)
# this is a regularizer penalty parameter for linear classifier to get CAVs.
alphas = [0.1]
target = 'cat'
concepts = ["dotted", "striped", "zigzagged"]
random_counterpart = 'random500_1'
LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt'
mymodel = model.CNNWrapper(LABEL_PATH)
act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100)
tf.compat.v1.logging.set_verbosity(0)
num_random_exp = 30 # folders (random500_0, random500_1)
mytcav = tcav.TCAV(target,
concepts,
bottlenecks,
act_generator,
alphas,
cav_dir=cav_dir,
num_random_exp=num_random_exp)
results = mytcav.run()
utils_plot.plot_results(results, num_random_exp=num_random_exp) | 2.203125 | 2 |
job.py | DiOS-Analysis/Worker | 3 | 12793819 | <gh_stars>1-10
import os
import logging
import base64
import time
from enum import Enum
from store import AppStore, AppStoreException
from pilot import Pilot
logger = logging.getLogger('worker.'+__name__)
class JobExecutionError(Exception):
pass
class Job(object):
STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed'])
TYPE = Enum([u'run_app', u'install_app', u'exec_cmd'])
def __init__(self, backend, device, jobDict):
self.jobDict = jobDict
if not '_id' in jobDict:
raise JobExecutionError('No jobId present')
self.jobId = jobDict['_id']
self.device = device
self.backend = backend
def execute(self):
raise NotImplementedError
class InstallAppJob(Job):
APP_ARCHIVE_PATH='/tmp/apparchive/'
def __init__(self, backend, device, jobDict):
super(InstallAppJob, self).__init__(backend, device, jobDict)
self.appId = None
def _archive_app_binary(self, bundleId):
logger.debug('archiving %s' % bundleId)
try:
### add app binary to backend
self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True)
appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId)
logger.debug('archiving app %s to %s' % (bundleId, appPath))
self.backend.post_app_archive(self.appId, appPath)
os.remove(appPath) #delete app from disk
except Exception, e:
raise JobExecutionError('unable to archive app binary: %s' % str(e))
def _install_app(self, pilot):
''' try to install the app
will raise a JobExecutionError on failure
returns:
True if the app was just installed
False if the app was already installed before
'''
logger.debug('_install_app')
if not 'jobInfo' in self.jobDict:
raise JobExecutionError('no jobInfo given')
jobInfo = self.jobDict['jobInfo']
if not 'appType' in jobInfo:
raise JobExecutionError('no app type given')
if not 'bundleId' in jobInfo:
raise JobExecutionError('no bundleId given')
bundleId = jobInfo['bundleId']
version = None
if 'version' in jobInfo:
version = jobInfo['version']
#check app type
if 'AppStoreApp' == jobInfo['appType']:
logger.debug('installing appstore app %s' % bundleId)
# use device data due to better version data
installedApps = self.device.installed_apps()
# check if app already installed
alreadyInstalled = False
if bundleId in installedApps:
logger.debug('app %s is already installed' % bundleId)
# check for matching version number
if version:
installedVersion = installedApps[bundleId]['version']
if version != installedVersion:
raise JobExecutionError('wrong app version installed!')
# the app is already installed and versions are compatible
alreadyInstalled = True
# check the backend for already existing app
app = self.backend.get_app_bundleId(bundleId, version)
logger.debug('backend result for bundleId %s: %s' % (bundleId, app))
if app and '_id' in app:
self.appId = app['_id']
# case 1: already installed and registered with backend
if self.appId and alreadyInstalled:
# app is installed and registred with backend
logger.info('App is already installed and registred with backend <%s>' % self.appId)
return False
# case 2: install from backend
elif self.appId:
# install from backend
# dirty check for ipa-size < ~50MB
if app and 'fileSizeBytes' in app:
size = 0
try:
size = int(app['fileSizeBytes'])
except ValueError:
size = -1
if size > 0 or size < 40000000:
# actually install from backend
logger.info('installing app %s from backend (size: %s)' % (bundleId,size))
if not os.path.exists(self.APP_ARCHIVE_PATH):
os.makedirs(self.APP_ARCHIVE_PATH)
appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId)
logger.debug('fetch app %s from backend' % bundleId)
if self.backend.get_app_archive(self.appId, appPath):
logger.info('installing app %s via device handler' % bundleId)
self.device.install(appPath)
os.remove(appPath)
tries = 3
while tries > 0 and bundleId not in self.device.installed_apps():
tries = tries-1
time.sleep(60)
if bundleId in self.device.installed_apps():
return True
else:
logging.warning('installing the app via device handler failed! - Install via AppStore instead')
else:
logger.warning('unable to get app archive from backend. appId: <%s>' % self.appId)
else:
logger.info('skipping install from backend to avoid ideviceinstaller error (ipa to large)')
else:
logger.info('skipping install from backend to avoid ideviceinstaller error (unknown ipa size)')
# case 3: install from appstore
# case 4: installed but unregistred
storeCountry = 'de'
if 'storeCountry' in jobInfo:
storeCountry = jobInfo['storeCountry']
## get appInfo
logger.debug('fetch appInfo from iTunesStore')
store = AppStore(storeCountry)
trackId = 0
appInfo = {}
try:
trackId = store.get_trackId_for_bundleId(bundleId)
appInfo = store.get_app_info(trackId)
except AppStoreException as e:
logger.error('unable to get appInfo: %s ', e)
raise JobExecutionError('unable to get appInfo: AppStoreException')
self.jobDict['appInfo'] = appInfo
logger.debug('using appInfo: %s' % str(appInfo))
## get account
accountId = ''
if alreadyInstalled:
# get account info from device
installedAppInfo = self.device.installed_apps()[bundleId]
if 'accountId' in installedAppInfo:
accountId = installedAppInfo['accountId']
else:
if 'accountId' in jobInfo:
accountId = jobInfo['accountId']
else:
for acc in self.device.accounts():
if acc['storeCountry'] == storeCountry:
accountId = acc['uniqueIdentifier']
if accountId == '':
raise JobExecutionError('unable to find a valid account identifier')
logger.debug('using account %s' % accountId)
# case 3 only
if not alreadyInstalled:
# install via appstore
logger.info('installing app %s via appstore' % bundleId)
if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}):
logger.error("App installation failed")
raise JobExecutionError("App installation failed")
## add app to backend
### the app data is currently taken from ideviceinstaller (via device.installed_apps)
### alternatively the pilot could be used to access the /applications rest api
appData = store.get_app_data(trackId)
appData['account'] = accountId
appData['name'] = appData['trackName']
self.appId = self.backend.post_app(appData)
# end install via appstore
return not alreadyInstalled
elif 'CydiaApp' == jobInfo['appType']:
logger.info('installing app %s via cydia' % bundleId)
pilot.install_cydia(bundleId)
return True
else:
raise JobExecutionError('invalid app type')
def execute(self):
logger.info("executing InstallAppJob %s on device %s" % (self.jobId, self.device))
# allow InstallAppJobs to exist/run without a corresponding backendJob
backendJobData = {}
if self.jobId:
backendJobData = self.backend.get_job(self.jobId)
## set job running
backendJobData['state'] = Job.STATE.RUNNING
self.backend.post_job(backendJobData)
pilot = Pilot(self.device.base_url())
result = True
try:
self.appJustInstalled = self._install_app(pilot)
if not self.appId:
raise JobExecutionError("No appId present")
jobInfo = self.jobDict['jobInfo']
bundleId = jobInfo['bundleId']
if self.device.ios_version()[0] > 8:
logger.debug("skipping app archiving since device is running iOS 9 or later")
else:
logger.debug("check if backend already has an app ipa")
if not self.backend.has_app_archive(self.appId):
self._archive_app_binary(bundleId)
backendJobData['state'] = Job.STATE.FINISHED
except JobExecutionError, e:
logger.error("Job execution failed: %s" % str(e))
backendJobData['state'] = Job.STATE.FAILED
result = False
## set job finished
if self.jobId:
self.backend.post_job(backendJobData)
return result
class RunAppJob(Job):
APP_ARCHIVE_PATH='/tmp/apparchive/'
def __init__(self, backend, device, jobDict):
super(RunAppJob, self).__init__(backend, device, jobDict)
self.appId = None
def _install_app(self, pilot):
''' try to install the app
returns:
True if the app was just installed
False if the app was already installed before
'''
logger.debug('_installApp')
installJobDict = {
'_id': False,
'jobInfo': self.jobDict['jobInfo']
}
installJob = InstallAppJob(self.backend, self.device, installJobDict)
logger.debug('executing InstallJob')
if not installJob.execute():
logger.debug('Unable to install app')
raise JobExecutionError('Unable to install app')
logger.debug('app is installed now')
self.appId = installJob.appId
return installJob.appJustInstalled
def _archive_app_binary(self, bundleId):
logger.debug('archiving %s' % bundleId)
try:
### add app binary to backend
self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True)
appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId)
logger.debug('archiving app %s to %s' % (bundleId, appPath))
self.backend.post_app_archive(self.appId, appPath)
os.remove(appPath) #delete app from disk
except Exception, e:
raise JobExecutionError('unable to archive app binary: %s' % str(e))
def _execute_app(self, pilot, bundleId, runId, executionStrategy=None):
''' execute the app '''
logger.debug('_execute_app')
taskInfo = {
'runId':runId,
'backendUrl':self.backend.baseUrl,
}
if executionStrategy:
taskInfo['executionStrategy'] = executionStrategy
pilot.run_auto_execution(bundleId, taskInfo=taskInfo)
def _save_run_results(self, runId, bundleId, uninstallApp=True):
logger.info("Saving apparchive to backend")
if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False):
appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa'
if os.path.exists(appPath):
f = open(appPath, 'rb')
appData = f.read()
f.close()
try:
appData = base64.b64encode(appData)
self.backend.post_result(runId, 'app_archive', appData)
except TypeError:
logger.error('Unable to encode app archive!')
#delete app archive from disk
os.remove(appPath)
if uninstallApp:
self.device.uninstall(bundleId)
def execute(self):
logger.info("executing RunAppJob %s on device %s" % (self.jobId, self.device))
backendJobData = self.backend.get_job(self.jobId)
## set job running
backendJobData['state'] = Job.STATE.RUNNING
self.backend.post_job(backendJobData)
pilot = Pilot(self.device.base_url())
try:
installDone = self._install_app(pilot)
if not self.appId:
raise JobExecutionError("No appId present")
jobInfo = self.jobDict['jobInfo']
bundleId = jobInfo['bundleId']
if self.device.ios_version()[0] > 8:
logger.debug("skipping app archiving since device is running iOS 9 or later")
else:
if not self.backend.has_app_archive(self.appId):
self._archive_app_binary(bundleId)
executionStrategy = None
if 'executionStrategy' in jobInfo:
executionStrategy = jobInfo['executionStrategy']
logger.debug('post_run')
## add run to backend
runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING)
logger.info('starting app pilot execution')
self._execute_app(pilot, bundleId, runId, executionStrategy)
if installDone:
logger.info("uninstalling app (%s)" % bundleId)
self.device.uninstall(bundleId)
# # save the results and install the app if not previously installed
# self._save_run_results(runId, bundleId, uninstallApp=installDone)
## set run finished
self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy)
except JobExecutionError, e:
logger.error("Job execution failed: %s" % str(e))
backendJobData['state'] = Job.STATE.FAILED
self.backend.post_job(backendJobData)
return False
## set job finished
backendJobData['state'] = Job.STATE.FINISHED
self.backend.post_job(backendJobData)
return True
class ExecuteCmdJob(Job):
def __init__(self, backend, device, jobDict):
super(ExecuteCmdJob, self).__init__(backend, device, jobDict)
if 'process' in jobDict:
self.process = jobDict['process']
if 'command' in jobDict:
self.command = jobDict['command']
def execute(self):
if self.process and self.execute:
pilot = Pilot(self.device.base_url())
pilot.inject(self.process, self.command)
else:
raise JobExecutionError("Process or command missing")
class JobFactory(object):
@classmethod
def job_from_dict(cls, jobDict, backend, device):
job = None
if 'type' in jobDict:
jobType = jobDict['type']
if jobType == Job.TYPE.RUN_APP:
job = RunAppJob(backend, device, jobDict)
elif jobType == Job.TYPE.INSTALL_APP:
job = InstallAppJob(backend, device, jobDict)
elif jobType == Job.TYPE.EXEC_CMD:
job = ExecuteCmdJob(backend, device, jobDict)
else:
logger.error('jobDict does not contain a type!')
if job:
logger.info('job created: %s' % str(job))
return job
| 2.328125 | 2 |
benchmarks/v3-app-note/run_benchmarks_pll_empirical.py | ayresdl/beagle-lib | 110 | 12793820 | <filename>benchmarks/v3-app-note/run_benchmarks_pll_empirical.py
#!/usr/bin/env python2.7
# <NAME>
import sys
import argparse
import subprocess
import re
from math import log, exp
# def gen_log_site_list(min, max, samples):
# log_range=(log(max) - log(min))
# samples_list = []
# for i in range(0, samples):
# samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i))))
# return samples_list
def main():
parser = argparse.ArgumentParser(description='generate synthetictest benchmarks')
parser.add_argument('synthetictest_path', help='path to synthetictest')
args = parser.parse_args()
file_list = ['59', '128', '354', '404']
rates = 4
precision_list = ['double']
states_list = [4]
# site_samples = 40
# sites_min = 100
# sites_max = 1000000
# sites_list = gen_log_site_list(sites_min, sites_max, site_samples)
rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu']
reps = 10
seed_list = range(1,11)
extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree']
throughput_re = re.compile('tree throughput total: (.*) M partials/second')
debug_file = open('debug.txt', 'w')
header = 'iteration, precision, states, file, seed, resource, throughput'
print header
iteration = 0
for file in file_list:
for rsrc in rsrc_list:
for precision in precision_list:
for states in states_list:
for seed in seed_list:
out_string = str(iteration)
out_string += ', ' + str(precision)
out_string += ', ' + str(states)
out_string += ', ' + str(file)
out_string += ', ' + str(seed)
synthetictest_cmd = [args.synthetictest_path]
synthetictest_cmd.extend(['--alignmentdna', file + '.phy'])
synthetictest_cmd.extend(['--tree', file + '.tree'])
synthetictest_cmd.extend(['--states', str(states)])
synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)])
synthetictest_cmd.extend(['--seed', str(seed)])
throughput_re_index = 0
if rsrc == 'cpu':
synthetictest_cmd.extend(['--rsrc', '0', '--postorder'])
elif rsrc == 'cpu-threaded':
synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder'])
elif rsrc == 'pll':
synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder'])
elif rsrc == 'pll-repeats':
synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder'])
elif rsrc == 'gpu':
synthetictest_cmd.extend(['--rsrc', '1'])
elif rsrc == 'dual-gpu':
synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc'])
elif rsrc == 'quadruple-gpu':
synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc'])
synthetictest_cmd.extend(extra_args)
if precision == 'double':
synthetictest_cmd.extend(['--doubleprecision'])
try:
synthetictest_out = subprocess.check_output(synthetictest_cmd)
out_string += ', ' + rsrc
throughput = throughput_re.findall(synthetictest_out)
if throughput:
out_string += ', ' + throughput[throughput_re_index]
print out_string
except subprocess.CalledProcessError:
debug_file.write('ERROR')
debug_file.write('===============================================================\n')
debug_file.write(out_string + '\n')
debug_file.write(' '.join(synthetictest_cmd) + '\n')
debug_file.write(synthetictest_out)
iteration += 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.21875 | 2 |
common_helper_yara/yara_scan.py | mistressofjellyfish/common_helper_yara | 0 | 12793821 | from subprocess import check_output, CalledProcessError, STDOUT
import sys
import re
import json
import logging
from .common import convert_external_variables
def scan(signature_path, file_path, external_variables={}, recursive=False):
'''
Scan files and return matches
:param signature_path: path to signature file
:type signature_path: string
:param file_path: files to scan
:type file_path: string
:return: dict
'''
variables = convert_external_variables(external_variables)
recursive = '-r' if recursive else ''
try:
scan_result = check_output("yara {} {} --print-meta --print-strings {} {}".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT)
except CalledProcessError as e:
logging.error("There seems to be an error in the rule file:\n{}".format(e.output.decode()))
return {}
try:
return _parse_yara_output(scan_result.decode())
except Exception as e:
logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e))
return {}
def _parse_yara_output(output):
resulting_matches = dict()
match_blocks, rules = _split_output_in_rules_and_matches(output)
matches_regex = re.compile(r'((0x[a-f0-9]*):(\S+):\s(.+))+')
for index, rule in enumerate(rules):
for match in matches_regex.findall(match_blocks[index]):
_append_match_to_result(match, resulting_matches, rule)
return resulting_matches
def _split_output_in_rules_and_matches(output):
split_regex = re.compile(r'\n*.*\[.*\]\s\/.+\n*')
match_blocks = split_regex.split(output)
while '' in match_blocks:
match_blocks.remove('')
rule_regex = re.compile(r'(.*)\s\[(.*)\]\s([\.\.\/]|[\/]|[\.\/])(.+)')
rules = rule_regex.findall(output)
assert len(match_blocks) == len(rules)
return match_blocks, rules
def _append_match_to_result(match, resulting_matches, rule):
assert len(rule) == 4
rule_name, meta_string, _, _ = rule
assert len(match) == 4
_, offset, matched_tag, matched_string = match
meta_dict = _parse_meta_data(meta_string)
this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict)
this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode()))
resulting_matches[rule_name] = this_match
def _parse_meta_data(meta_data_string):
'''
Will be of form 'item0=lowercaseboolean0,item1="value1",item2=value2,..'
'''
meta_data = dict()
for item in meta_data_string.split(','):
if '=' in item:
key, value = item.split('=', maxsplit=1)
value = json.loads(value) if value in ['true', 'false'] else value.strip('\"')
meta_data[key] = value
else:
logging.warning('Malformed meta string \'{}\''.format(meta_data_string))
return meta_data
| 2.609375 | 3 |
src/opihiexarata/astrometry/webclient.py | psmd-iberutaru/OpihiExarata | 0 | 12793822 | <reponame>psmd-iberutaru/OpihiExarata
import os
import urllib.parse
import urllib.request
import urllib.error
import random
import astropy.wcs as ap_wcs
import opihiexarata.library as library
import opihiexarata.library.error as error
import opihiexarata.library.hint as hint
# The base URL for the API which all other service URLs are derived from.
_DEFAULT_BASE_URL = "http://nova.astrometry.net/api/"
class AstrometryNetWebAPIEngine(hint.AstrometryEngine):
"""A python-based wrapper around the web API for astrometry.net.
This API does not have the full functionality of the default Python client
seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py.
The point of this class is to be simple enough to be understood by others and
be specialized for OpihiExarata.
Attributes
----------
_apikey : string
The API key used to log in.
original_upload_filename : string
The original filename that was used to upload the data.
session : string
The session ID of this API connection to astrometry.net
"""
# The default arguments for uploading files. In (key, value, type) form.
# Detailed is also their useage cases per
# http://astrometry.net/doc/net/api.html#submitting-a-url
_DEFAULT_URL_ARGUMENTS = [
# These parameters are for licensing and distribution terms.
("allow_commercial_use", "d", str),
("allow_modifications", "d", str),
# For visibility by the general public.
("publicly_visible", "y", str),
# Image scaling parameters, if provided, when known, helps the
# processing a little.
("scale_units", None, str),
("scale_type", None, str),
("scale_lower", None, float),
("scale_upper", None, float),
("scale_est", None, float),
("scale_err", None, float),
# These parameters allows for the establishment of an initial guess
# specified byt he centers, and its maximal deviation as specified
# by the radius parameter. (In degrees.)
("center_ra", None, float),
("center_dec", None, float),
("radius", None, float),
# Image properties, preprocessing it a little can help in its
# determination.
("parity", None, int),
("downsample_factor", None, int),
("positional_error", None, float),
("tweak_order", None, int),
("crpix_center", None, bool),
("invert", None, bool),
# These parameters are needed if being sent instead is an x,y list of
# source star positions.
("image_width", None, int),
("image_height", None, int),
("x", None, list),
("y", None, list),
("album", None, str),
]
def __init__(self, url=None, apikey: str = None, silent: bool = True) -> None:
"""The instantiation, connecting to the web API using the API key.
Parameters
----------
url : string, default = None
The base url which all other API URL links are derived from. This
should be used if the API is a self-hosted install or has a
different web source than nova.astrometry.net. Defaults to the
nova.astrometry.net api service.
apikey : string
The API key of the user.
silent : bool, default = True
Should there be printed messages as the processes are executed.
This is helpful for debugging or similar processes.
Returns
-------
None
"""
# Defining the URL.
self.ASTROMETRY_BASE_API_URL = (
str(url) if url is not None else _DEFAULT_BASE_URL
)
# Use the API key to log in a derive a session key.
self.session = None
session_key = self.__login(apikey=apikey)
self._apikey = apikey
self.session = session_key
# Placeholder variables.
self.original_upload_filename = str()
self._image_return_results = {}
return None
def __login(self, apikey: str) -> str:
"""The method to log into the API system.
Parameters
----------
apikey : string
The API key for the web API service.
Returns
-------
session_key : string
The session key for this login session.
"""
# The key.
args = {"apikey": apikey}
result = self._send_web_request(service="login", args=args)
session = result.get("session", False)
# Check if the session works and that the API key given is valid.
if not session:
raise error.WebRequestError(
"The provided API key did not provide a valid session."
)
else:
# The session should be fine.
session_key = session
return session_key
def __get_submission_id(self) -> str:
"""Extract the submission ID from the image upload results."""
image_results = self._image_return_results
self.__submission_id = image_results.get("subid", None)
return self.__submission_id
def __set_submission_id(self, sub_id) -> None:
"""Assign the submission ID, it should only be done once when the
image is obtained."""
if self.__submission_id is None:
self.__submission_id = sub_id
else:
raise error.ReadOnlyError(
"The submission ID has already been set by obtaining it from the API"
" service."
)
return None
def __del_submission_id(self) -> None:
"""Remove the current submission ID association."""
self.__submission_id = None
return None
__doc_submission_id = (
"When file upload or table upload is sent to the API, the submission ID is"
" saved here."
)
__submission_id = None
submission_id = property(
__get_submission_id,
__set_submission_id,
__del_submission_id,
__doc_submission_id,
)
def __get_job_id(self) -> str:
"""Extract the job ID from the image upload results. It may be the
case that there is not job yet associated with this submission.
"""
# If the job ID already has been obtained, then there is no reason to
# call the API again.
if self.__job_id is not None:
return self.__job_id
# Call the API to get the job ID.
try:
submission_results = self.get_submission_results(
submission_id=self.submission_id
)
except error.WebRequestError:
# Make a more helpful error message for what is going on.
if self.submission_id is None:
raise error.WebRequestError(
"There cannot be a job id without there being a submission for that"
" job to operate on."
)
else:
# What happened is unknown.
raise error.UndiscoveredError("Why the web request failed is unknown.")
else:
job_id_list = submission_results.get("jobs", [])
# If there are no jobs, then it is likely still in queue.
if len(job_id_list) == 0:
self.__job_id = None
else:
self.__job_id = job_id_list[-1]
return self.__job_id
raise error.LogicFlowError
return None
def __set_job_id(self, job_id) -> None:
"""Assign the job ID, it should only be done once when the
image is obtained."""
if self.__job_id is None:
self.__job_id = job_id
else:
raise error.ReadOnlyError(
"The job ID has already been set by obtaining it from the API service."
)
return None
def __del_job_id(self) -> None:
"""Remove the current job ID association."""
self.__job_id = None
return None
__doc_job_id = (
"When file upload or table upload is sent to the API, the job ID of the"
" submission is saved here."
)
__job_id = None
job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id)
def _generate_service_url(self, service: str) -> str:
"""Generate the correct URL for the desired service. Because astrometry.net
uses a convension, we can follow it to obtain the desired service URL.
Parameters
----------
service : str
The service which the API URL for should be generated from.
Returns
-------
url : str
The URL for the service.
"""
url = self.ASTROMETRY_BASE_API_URL + service
return url
def _generate_upload_args(self, **kwargs) -> dict:
"""Generate the arguments for sending a request. This constructs the
needed arguments, replacing the defaults with user provided arguments
where desired.
Parameters
----------
**kwargs : dict
Arguments which would override the defaults.
Returns
-------
args : dict
The arguments which can be used to send the request.
"""
args = {}
for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS:
if keydex in kwargs:
new_value = kwargs.pop(keydex)
new_value = typedex(new_value)
args.update({keydex: new_value})
elif defaultdex is not None:
args.update({keydex: defaultdex})
return args
def _send_web_request(
self, service: str, args: dict = {}, file_args: dict = None
) -> dict:
"""A wrapper function for sending a webrequest to the astrometry.net API
service. Returns the results as well.
Parameters
----------
service : string
The service which is being requested. The web URL is constructed
from this string.
args : dictionary, default = {}
The arguments being sent over the web request.
file_args : dictionary, default = None
If a file is being uploaded instead, special care must be taken to
sure it matches the upload specifications.
Returns
-------
results : dictionary
The results of the web request if it did not fail.
"""
# Obtain the session key derived when this class is instantiated and
# logged into. Use this session key for requests.
if self.session is not None:
args.update({"session": self.session})
# The API requires that the data format must be a JSON based datatype.
json_data = library.json.dictionary_to_json(dictionary=args)
# The URL which to send this request to, constructed from the service
# desired.
api_url = self._generate_service_url(service=service)
# If the request requires that a file be send, then it must be in the
# correct format. Namely, a multipart/form-data format.
if file_args is not None:
boundary_key = "".join([random.choice("0123456789") for __ in range(19)])
boundary = "==============={bkey}==".format(bkey=boundary_key)
headers = {
"Content-Type": 'multipart/form-data; boundary="{bd}"'.format(
bd=boundary
)
}
data_pre = str(
"--"
+ boundary
+ "\n"
+ "Content-Type: text/plain\r\n"
+ "MIME-Version: 1.0\r\n"
+ 'Content-disposition: form-data; name="request-json"\r\n'
+ "\r\n"
+ json_data
+ "\n"
+ "--"
+ boundary
+ "\n"
+ "Content-Type: application/octet-stream\r\n"
+ "MIME-Version: 1.0\r\n"
+ 'Content-disposition: form-data; name="file"; filename="{name}"'.format(
name=file_args["filename"]
)
+ "\r\n"
+ "\r\n"
)
data_post = "\n" + "--" + boundary + "--\n"
data = data_pre.encode() + file_args["data"] + data_post.encode()
else:
# Otherwise, the form should be standard encoded: x-www-form-encoded
headers = {}
data = {"request-json": json_data}
data = urllib.parse.urlencode(data)
data = data.encode("utf-8")
# Finally send the request.
request = urllib.request.Request(url=api_url, headers=headers, data=data)
# Processing the request.
try:
file = urllib.request.urlopen(
request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT
)
text = file.read()
result = library.json.json_to_dictionary(json_string=text)
# Check if the status of the request provided is a valid status.
status = result.get("status")
if status == "error":
error_message = result.get("errormessage", "(none)")
# Try to deduce what the error is.
if error_message == "bad apikey":
raise error.WebRequestError(
"The API key provided is not a valid key."
)
else:
raise error.WebRequestError(
"The server returned an error status message: \n {message}".format(
message=error_message
)
)
else:
return result
except urllib.error.HTTPError:
raise error.WebRequestError(
"The web request output cannot be properly processed. This is likely"
" from a bad web request."
)
# The logic should not flow beyond this point.
raise error.LogicFlowError
return None
def get_job_results(self, job_id: str = None) -> dict:
"""Get the results of a job sent to the API service.
Parameters
----------
job_id : str, default = None
The ID of the job that the results should be obtained from. If not
provided, the ID determined by the file upload is used.
Returns
-------
results : dict
The results of the astrometry.net job. They are, in general: (If
the job has not finished yet, None is returned.)
- Status : The status of the job.
- Calibration : Calibration of the image uploaded.
- Tags : Known tagged objects in the image, people inputted.
- Machine Tags : Ditto for tags, but only via machine inputs.
- Objects in field : Known objects in the image field.
- Annotations : Known objects in the field, with annotations.
- Info : A collection of most everything above.
"""
job_id = job_id if job_id is not None else self.job_id
# Get the result of the job.
service_string = "jobs/{id}".format(id=job_id)
try:
job_result = self._send_web_request(service=service_string)
except error.WebRequestError:
# This error is likely because the job is still in queue.
return None
# Check that the service was successful.
status = job_result.get("status", False)
if status != "success":
raise error.WebRequestError(
"The job result request failed, check that the job ID is correct or try"
" again later."
)
else:
results = {}
# For the status.
results["status"] = status
# For the calibrations.
service_string = "jobs/{id}/calibration".format(id=job_id)
results["calibration"] = self._send_web_request(service=service_string)
# For the tags.
service_string = "jobs/{id}/tags".format(id=job_id)
results["tags"] = self._send_web_request(service=service_string)
# For the machine tags.
service_string = "jobs/{id}/machine_tags".format(id=job_id)
results["machine_tags"] = self._send_web_request(service=service_string)
# For the objects in field.
service_string = "jobs/{id}/objects_in_field".format(id=job_id)
results["objects_in_field"] = self._send_web_request(service=service_string)
# For the annotations.
service_string = "jobs/{id}/annotations".format(id=job_id)
results["annotations"] = self._send_web_request(service=service_string)
# For the info.
service_string = "jobs/{id}/info".format(id=job_id)
results["info"] = self._send_web_request(service=service_string)
# All done.
return results
def get_job_status(self, job_id: str = None) -> str:
"""Get the status of a job specified by its ID.
Parameters
----------
job_id : str, default = None
The ID of the job that the results should be obtained from. If not
provided, the ID determined by the file upload is used.
Returns
-------
status : string
The status of the submission. If the job has not run yet, None is
returned instead.
"""
job_id = job_id if job_id is not None else self.job_id
# Get the result of the job.
service_string = "jobs/{id}".format(id=job_id)
status = None
try:
job_result = self._send_web_request(service=service_string)
except error.WebRequestError:
# This error is likely because the job is still in queue.
status = None
else:
# Check the job status.
status = job_result.get("status")
finally:
return status
# Should not get here.
raise error.LogicFlowError
return None
def get_submission_results(self, submission_id: str = None) -> dict:
"""Get the results of a submission specified by its ID.
Parameters
----------
submission_id : str
The ID of the submission. If it is not passed, the ID determined
by the file upload is used.
Returns
-------
result : dict
The result of the submission.
"""
submission_id = (
submission_id if submission_id is not None else self.submission_id
)
service_string = "submissions/{sub_id}".format(sub_id=submission_id)
result = self._send_web_request(service=service_string)
return result
def get_submission_status(self, submission_id: str = None) -> str:
"""Get the status of a submission specified by its ID.
Parameters
----------
submission_id : str, default = None
The ID of the submission. If it is not passed, the ID determined
by the file upload is used.
Returns
-------
status : string
The status of the submission.
"""
submission_id = (
submission_id if submission_id is not None else self.submission_id
)
results = self.get_submission_results(submission_id=submission_id)
status = results.get("status")
return status
def get_reference_star_pixel_correlation(
self, job_id: str = None, temp_filename: str = None, delete_after: bool = True
) -> hint.Table:
"""This obtains the table that correlates the location of reference
stars and their pixel locations. It is obtained from the fits corr file
that is downloaded into a temporary directory.
Parameters
----------
job_id : string, default = None
The ID of the job that the results should be obtained from. If not
provided, the ID determined by the file upload is used.
temp_filename : string, default = None
The filename that the downloaded correlation file will be
downloaded as. The path is going to still be in the temporary
directory.
delete_after : bool, default = True
Delete the file after downloading it to extract its information.
Returns
-------
correlation_table : Table
The table which details the correlation between the coordinates of
the stars and their pixel locations.
"""
job_id = job_id if job_id is not None else self.job_id
# Download the correlation file to read into a data table.
upload_filename = library.path.get_filename_without_extension(
pathname=self.original_upload_filename
)
fits_table_filename = (
temp_filename if temp_filename is not None else upload_filename + "_corr"
)
# The full path of the filename derived from saving it in a temporary
# directory.
corr_filename = library.path.merge_pathname(
filename=fits_table_filename, extension="fits"
)
corr_pathname = library.temporary.make_temporary_directory_path(
filename=corr_filename
)
# Save the correlation file.
self.download_result_file(
filename=corr_pathname, file_type="corr", job_id=job_id
)
# Load the data from the file.
__, correlation_table = library.fits.read_fits_table_file(
filename=corr_pathname, extension=1
)
# Delete the temporary file after loading it if desired.
if delete_after:
os.remove(corr_pathname)
return correlation_table
def get_wcs(
self, job_id: str = None, temp_filename: str = None, delete_after: bool = True
) -> hint.WCS:
"""This obtains the wcs header file and then computes World Coordinate
System solution from it. Because astrometry.net computes it for us,
we just extract it from the header file using Astropy.
Parameters
----------
job_id : string, default = None
The ID of the job that the results should be obtained from. If not
provided, the ID determined by the file upload is used.
temp_filename : string, default = None
The filename that the downloaded wcs file will be downloaded as.
The path is going to still be in the temporary directory.
delete_after : bool, default = True
Delete the file after downloading it to extract its information.
Returns
-------
wcs : Astropy WCS
The world coordinate solution class for the image provided.
"""
job_id = job_id if job_id is not None else self.job_id
# Download the correlation file to read into a data table.
upload_filename = library.path.get_filename_without_extension(
pathname=self.original_upload_filename
)
fits_table_filename = (
temp_filename if temp_filename is not None else upload_filename + "_wcs"
)
# The full path of the filename derived from saving it in a temporary
# directory.
corr_filename = library.path.merge_pathname(
filename=fits_table_filename, extension="fits"
)
corr_pathname = library.temporary.make_temporary_directory_path(
filename=corr_filename
)
# Save the correlation file.
self.download_result_file(
filename=corr_pathname, file_type="wcs", job_id=job_id
)
# Load the header from the file.
wcs_header = library.fits.read_fits_header(filename=corr_pathname)
wcs = ap_wcs.WCS(wcs_header)
# Delete the temporary file after loading it if desired.
if delete_after:
os.remove(corr_pathname)
return wcs
def upload_file(self, pathname: str, **kwargs) -> dict:
"""A wrapper to allow for the uploading of files or images to the API.
This also determines the submission ID and the job ID for the uploaded
image and saves it.
Parameters
----------
pathname : str
The pathname of the file to open. The filename is extracted and
used as well.
Returns
-------
results : dict
The results of the API call to upload the image.
"""
# When uploading a new file, the submission and job IDs will change.
# They must be reset because of their read-only nature.
del self.submission_id, self.job_id
# Save the file information.
self.original_upload_filename = pathname
args = self._generate_upload_args(**kwargs)
# Process the file upload.
file_args = None
try:
file = open(pathname, "rb")
filename = library.path.get_filename_with_extension(pathname=pathname)
file_args = {"filename": filename, "data": file.read()}
except IOError:
raise error.FileError("File does not exist: {path}".format(path=pathname))
# Extract the submission id. This allows for easier
# association between this class instance and the uploaded file.
upload_results = self._send_web_request("upload", args, file_args)
self._image_return_results = upload_results
return upload_results
def download_result_file(
self, filename: str, file_type: str, job_id: str = None
) -> None:
"""Downloads fits data table files which correspond to the job id.
Parameters
----------
filename : str
The filename of the file when it is downloaded and saved to disk.
file_type : str
The type of file to be downloaded from astrometry.net. It should
one of the following:
- `wcs`: The world corrdinate data table file.
- `new_fits`, `new_image`: A new fits file, containing the
original image, annotations, and WCS header information.
- `rdls`: A table of reference stars nearby.
- `axy`: A table in of the location of stars detected in the
provided image.
- `corr`: A table of the correspondences between reference
stars location in the sky and in pixel space.
job_id : str, default = None
The ID of the job that the results should be obtained from. If not
provided, the ID determined by the file upload is used.
Returns
-------
None
"""
# Get the proper job ID.
job_id = job_id if job_id is not None else self.job_id
# Ensure that the type provided is a valid type which we can pull
# from the API service. Accommodating for capitalization.
file_type = str(file_type).lower()
valid_api_file_types = ("wcs", "new_fits", "rdls", "axy", "corr")
if file_type not in valid_api_file_types:
raise error.WebRequestError(
"The provided file type to be downloaded is not a valid type which can"
" be downloaded, it must be one of: {fty}".format(
fty=valid_api_file_types
)
)
# Construct the URL for the request. It is a little different from the
# normal API scheme so a new method is made.
def _construct_file_download_url(ftype: str, id: str) -> str:
"""Construct the file curl from the file type `ftype` and the
job id `id`."""
url = "http://nova.astrometry.net/{_type}_file/{_id}".format(
_type=ftype, _id=id
)
return url
file_download_url = _construct_file_download_url(ftype=file_type, id=job_id)
# Before downloading the file, check that the file actually exists.
if job_id is None:
raise error.WebRequestError("There is no job to download the file from.")
if library.http.get_http_status_code(url=file_download_url) != 200:
raise error.WebRequestError(
"The file download link is not giving an acceptable http status code."
" It is likely that the job is still processing and thus the data files"
" are not ready."
)
# Download the file.
library.http.download_file_from_url(
url=file_download_url, filename=filename, overwrite=True
)
return None
| 2.515625 | 3 |
docs/notebooks/examples/SED_emulator/ESB_functions.py | MCarmenCampos/XID_plus | 3 | 12793823 | from astropy.io import ascii, fits
from astropy.table import QTable, Table
import arviz as az
from astropy.coordinates import SkyCoord
from astropy import units as u
import os
import pymoc
from astropy import wcs
from astropy.table import vstack, hstack
import numpy as np
import xidplus
# # Applying XID+CIGALE to Extreme Starbursts
# In this notebook, we read in the data files and prepare them for fitting with XID+CIGALE, the SED prior model extension to XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $> 10^{3}\mathrm{M_{\odot}yr^{-1}}$
# In[2]:
def process_prior(c,new_Table=None,
path_to_data=['../../../data/'],
field=['Lockman-SWIRE'],
path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'],
redshift_file=["/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits"],
redshift_prior=[0.1,2.0],
radius=6.0,
alt_model=False):
# Import required modules
# In[3]:
# In[4]:
# Set image and catalogue filenames
# In[5]:
#Folder containing maps
pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map
pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map
plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map
#output folder
output_folder='./'
# Load in images, noise maps, header info and WCS information
# In[6]:
#-----250-------------
hdulist = fits.open(pswfits)
im250phdu=hdulist[0].header
im250hdu=hdulist[1].header
im250=hdulist[1].data*1.0E3 #convert to mJy
nim250=hdulist[3].data*1.0E3 #convert to mJy
w_250 = wcs.WCS(hdulist[1].header)
pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----350-------------
hdulist = fits.open(pmwfits)
im350phdu=hdulist[0].header
im350hdu=hdulist[1].header
im350=hdulist[1].data*1.0E3 #convert to mJy
nim350=hdulist[3].data*1.0E3 #convert to mJy
w_350 = wcs.WCS(hdulist[1].header)
pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----500-------------
hdulist = fits.open(plwfits)
im500phdu=hdulist[0].header
im500hdu=hdulist[1].header
im500=hdulist[1].data*1.0E3 #convert to mJy
nim500=hdulist[3].data*1.0E3 #convert to mJy
w_500 = wcs.WCS(hdulist[1].header)
pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
# XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they cover the same area. It can also take in MOCs as selection functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are interested in. We will use a HEALPix order of 15 (the resolution: higher order means higher resolution)
moc=pymoc.util.catalog.catalog_to_moc(c,100,15)
# Load in catalogue you want to fit (and make any cuts). Here we use HELP's VO database and directly call it using PyVO
# In[10]:
import pyvo as vo
service = vo.dal.TAPService("https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap")
# In[11]:
resultset = service.search("SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',"+str(c.ra.deg[0])+", "+str(c.dec.deg[0])+", 0.028 ))")
# In[12]:
masterlist=resultset.table
def construct_prior(Table=None):
from astropy.coordinates import SkyCoord
#first use standard cut (i.e. not star and is detected in at least 3 opt/nir bands)
prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)]
#make skycoord from masterlist
catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec'])
#make skycoord from input table
c = SkyCoord(ra=Table['ra'], dec=Table['dec'])
#search around all of the new sources
idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec)
#for every new sources
for src in range(0,len(Table)):
#limit to matches around interested sources
ind = idxc == src
#if there are matches
if ind.sum() >0:
#choose the closest and check if its in the prior list all ready
in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id']
#if its not in prior list
if in_prior.sum() <1:
print(in_prior.sum())
#add to appended sources
prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]])
return prior_list
# In[64]:
import astropy.units as u
#create table of candidate source
t = QTable([c.ra, c.dec], names=('ra', 'dec'))
#add candidate source to new sources table, create prior list
if new_Table is not None:
prior_list=construct_prior(vstack([t,new_Table]))
else:
prior_list = construct_prior(t)
if alt_model==True:
sep = 18
separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec
remove_ind = (separation > np.min(separation)) & (separation < sep)
prior_list.remove_rows(remove_ind)
# ## Get Redshift and Uncertianty
#
# <NAME> defines a median and a hierarchical bayes combination redshift. We need uncertianty so lets match via `help_id`
# In[26]:
photoz=Table.read(redshift_file[0])
# In[27]:
#help_id=np.empty((len(photoz)),dtype=np.dtype('U27'))
for i in range(0,len(photoz)):
photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8')
#photoz['help_id']=help_id
# In[28]:
from astropy.table import Column, MaskedColumn
prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list))
prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc'))
# In[29]:
photoz
# In[30]:
ii=0
for i in range(0,len(prior_list)):
ind=photoz['help_id'] == prior_list['help_id'][i]
try:
if photoz['z1_median'][ind]>0.0:
prior_list['redshift'][i]=photoz['z1_median'][ind]
prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])]))
#prior_list['redshift_unc'].mask[i]=False
#prior_list['redshift'].mask[i]=False
except ValueError:
None
# In[33]:
dist_matrix=np.zeros((len(prior_list),len(prior_list)))
from astropy.coordinates import SkyCoord
from astropy import units as u
for i in range(0,len(prior_list)):
for j in range(0,len(prior_list)):
if i>j:
coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs')
coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg)
dist_matrix[i,j] = coord1.separation(coord2).value
# In[35]:
ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0)
xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list)))
yy[ind]
# In[36]:
prior_list[yy[ind]]
# In[37]:
prior_list['redshift'].mask[yy[ind]]=True
# In[38]:
prior_list=prior_list[prior_list['redshift'].mask == False]
# In[39]:
prior_list
# XID+ is built around two python classes. A prior and posterior class. There should be a prior class for each map being fitted. It is initiated with a map, noise map, primary header and map header and can be set with a MOC. It also requires an input prior catalogue and point spread function.
#
# In[40]:
#---prior250--------
prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header
prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)
#---prior350--------
prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)
prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior350.prior_bkg(-5.0,5)
#---prior500--------
prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)
prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior500.prior_bkg(-5.0,5)
# Set PSF. For SPIRE, the PSF can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\mathrm{\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PSF and assign it to the three XID+ prior classes.
# In[41]:
#pixsize array (size of pixels in arcseconds)
pixsize=np.array([pixsize250,pixsize350,pixsize500])
#point response function for the three bands
prfsize=np.array([18.15,25.15,36.3])
#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)
from astropy.convolution import Gaussian2DKernel
##---------fit using Gaussian beam-----------------------
prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)
prf250.normalize(mode='peak')
prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)
prf350.normalize(mode='peak')
prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)
prf500.normalize(mode='peak')
pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map
pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map
pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map
prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid (in pixel scale)
prior350.set_prf(prf350.array,pind350,pind350)
prior500.set_prf(prf500.array,pind500,pind500)
print('fitting '+ str(prior250.nsrc)+' sources \n')
print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels')
print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg))
# Before fitting, the prior classes need to take the PSF and calculate how muich each source contributes to each pixel. This process provides what we call a pointing matrix. Lets calculate the pointing matrix for each prior class
# In[43]:
prior250.get_pointing_matrix()
prior350.get_pointing_matrix()
prior500.get_pointing_matrix()
# In[44]:
return [prior250,prior350,prior500],prior_list
def getSEDs(data, src, nsamp=30,category='posterior'):
import subprocess
if category=='posterior':
d=data.posterior
else:
d=data.prior
subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False)
agn = d.agn.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
z = d.redshift.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
sfr = d.sfr.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
fin = open("/Volumes/pdh_storage/cigale/pcigale_orig.ini")
fout = open("/Volumes/pdh_storage/cigale/pcigale.ini", "wt")
for line in fin:
if 'redshift =' in line:
fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \n')
elif 'fracAGN =' in line:
fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \n')
else:
fout.write(line)
fin.close()
fout.close()
p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')
p.wait()
SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits')
# set more appropriate units for dust
from astropy.constants import L_sun, M_sun
SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value
SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value
wavelengths = []
fluxes = []
for i in range(0, nsamp):
sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id']))
wavelengths.append(sed_plot['wavelength'] / 1E3)
fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu'])
from astropy.table import vstack, hstack
return hstack(wavelengths), hstack(fluxes)
| 1.632813 | 2 |
back/babar_server/apps.py | dryvenn/babar3 | 0 | 12793824 | from django.apps import AppConfig
class BabarServerConfig(AppConfig):
name = 'babar_server'
| 1.125 | 1 |
app/cronscript.py | Stienvdh/new-employee-onboarding | 1 | 12793825 | <gh_stars>1-10
"""Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import os, requests
def update_Admin(messageToAdmin):
s = requests.Session()
s.headers.update({
'Authorization': "Bearer " + os.environ['WEBEX_BOT_TOKEN']
})
WEBEX_BASE_URL = "https://webexapis.com"
url = f"{WEBEX_BASE_URL}/v1/messages"
data = {
"toPersonEmail": "<EMAIL>",
"text": messageToAdmin,
}
resp = s.post(url, json=data)
resp.raise_for_status()
url = "https://webexapis.com/v1/access_token"
payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN']
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code == 200 or response.status_code == 204 :
print(response.text)
access_token = response.json()['access_token']
refresh_token = response.json()['refresh_token']
print(access_token)
os.environ["WEBEX_ACCESS_TOKEN"] = access_token
os.environ["WEBEX_REFRESH_TOKEN"] = refresh_token
update_Admin('Hello, New access tocken has been generated for the new-employee-onboarding integration. Access Token: ' + access_token + ' Refresh token: ' + refresh_token)
else :
update_Admin('Hello, the cronjob was not able to generate new access tocken for the new-employee-onboarding integration. Here is the response code : ' +
str(response.status_code) + '.')
# samlpe response text below. access_token is the newly generated token
# {
# "access_token": "<KEY>",
# "expires_in": 1209599,
# "refresh_token": "OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901",
# "refresh_token_expires_in": 6435474
# }
print('*** Ran the cron script ***')
| 1.734375 | 2 |
fmri/experiment.py | mwaskom/pulses_experiment_code | 0 | 12793826 | <filename>fmri/experiment.py<gh_stars>0
from __future__ import division
import os
import json
from glob import glob
import numpy as np
import pandas as pd
from scipy import stats
from scipy.spatial import distance
from psychopy.visual import TextStim, Rect
from visigoth.stimuli import Point, Points, PointCue, Pattern
from visigoth import AcquireFixation, AcquireTarget, flexible_values
from visigoth.ext.bunch import Bunch
def define_cmdline_params(self, parser):
"""Add extra parameters to be defined at runtime."""
parser.add_argument("--acceleration", default=1, type=float)
parser.add_argument("--blocks", default=1, type=int)
def create_stimuli(exp):
"""Initialize stimulus objects."""
# Fixation point
fix = Point(exp.win,
exp.p.fix_pos,
exp.p.fix_radius,
exp.p.fix_iti_color)
# Spatial cue
cue = PointCue(exp.win,
exp.p.cue_norm,
exp.p.cue_radius,
exp.p.cue_color)
# Saccade targets
targets = Points(exp.win,
exp.p.target_pos,
exp.p.target_radius,
exp.p.target_color)
# Average of multiple sinusoidal grating stimulus
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=exp.p.stim_size,
sfs=exp.p.stim_sf,
pos=(0, 0)
)
return locals()
def generate_trials(exp):
"""Yield trial and pulse train info."""
# TODO let us set random number generator somehow. Command line?
# Build the full experimental design
constraints = Bunch(exp.p.design_constraints)
all_trials, all_pulses = generate_block(constraints, exp.p)
for i in range(exp.p.blocks - 1):
trial_part, pulse_part = generate_block(constraints, exp.p)
trial_part["trial"] += len(all_trials)
pulse_part["trial"] += len(all_trials)
all_trials = all_trials.append(trial_part, ignore_index=True)
all_pulses = all_pulses.append(pulse_part, ignore_index=True)
# Adjust the timing of some components for training
all_trials["wait_pre_stim"] /= exp.p.acceleration
all_pulses["gap_dur"] /= exp.p.acceleration
# Add in name information that matches across tables
all_trials = all_trials.assign(
subject=exp.p.subject,
session=exp.p.session,
run=exp.p.run
)
all_pulses = all_pulses.assign(
subject=exp.p.subject,
session=exp.p.session,
run=exp.p.run
)
# Add in information that's not part of the saved design
gen_dist = all_trials["gen_dist"]
all_trials = all_trials.assign(
gen_mean=np.take(exp.p.dist_means, gen_dist),
gen_sd=np.take(exp.p.dist_sds, gen_dist),
target=np.take(exp.p.dist_targets, gen_dist),
wait_resp=exp.p.wait_resp,
wait_feedback=exp.p.wait_feedback,
)
all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur)
# Add in blank fields that will be filled in later
empty_cols = ["onset_fix", "offset_fix",
"onset_cue", "offset_cue",
"onset_targets", "onset_feedback",
"result", "response", "correct", "rt"]
all_trials = all_trials.assign(
fixbreaks=0,
responded=False,
**{col: np.nan for col in empty_cols}
)
all_pulses = all_pulses.assign(
occurred=False,
blink=False,
blink_pad=np.nan,
dropped_frames=np.nan,
pulse_onset=np.nan,
pulse_offset=np.nan,
)
# Add trial-level information computed from pulse-level table
all_trials = all_trials.set_index("trial", drop=False)
trial_pulses = all_pulses.groupby("trial")
pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum()
trial_duration = all_trials["wait_pre_stim"] + pulse_train_dur
start_time = (all_trials["wait_iti"].cumsum()
+ trial_duration.shift(1).fillna(0).cumsum())
all_trials = all_trials.assign(
trial_llr=trial_pulses.pulse_llr.sum(),
log_contrast_mean=trial_pulses.log_contrast.mean(),
pulse_train_dur=pulse_train_dur,
trial_duration=trial_duration,
start_time=start_time,
)
# Generate information for each trial
for trial, trial_info in all_trials.iterrows():
pulse_info = all_pulses.loc[all_pulses["trial"] == trial].copy()
yield trial_info, pulse_info
def generate_block(constraints, p, rng=None):
"""Generated a balanced set of trials, might be only part of a run."""
if rng is None:
rng = np.random.RandomState()
n_trials = constraints.trials_per_run
# --- Assign trial components
# Assign the stimulus to a side
stim_pos = np.repeat([0, 1], n_trials // 2)
while max_repeat(stim_pos) > constraints.max_stim_repeat:
stim_pos = rng.permutation(stim_pos)
# Assign the target to a side
gen_dist = np.repeat([0, 1], n_trials // 2)
while max_repeat(gen_dist) > constraints.max_dist_repeat:
gen_dist = rng.permutation(gen_dist)
# Assign pulse counts to each trial
count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1
count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1])
expected_count_dist = count_pmf * n_trials
count_error = np.inf
while count_error > constraints.sum_count_error:
pulse_count = flexible_values(p.pulse_count, n_trials, rng,
max=p.pulse_count_max).astype(int)
count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1)
count_error = np.sum(np.abs(count_dist[count_support]
- expected_count_dist))
# Assign initial ITI to each trial
total_iti = np.inf
while not_in_range(total_iti, constraints.iti_range):
wait_iti = flexible_values(p.wait_iti, n_trials, rng)
if p.skip_first_iti:
wait_iti[0] = 0
total_iti = wait_iti.sum()
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# --- Build the trial_info structure
trial = np.arange(1, n_trials + 1)
trial_info = pd.DataFrame(dict(
trial=trial,
gen_dist=gen_dist,
stim_pos=stim_pos,
pulse_count=pulse_count.astype(int),
wait_iti=wait_iti,
))
# --- Assign trial components
# Map from trial to pulse
trial = np.concatenate([
np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1)
])
pulse = np.concatenate([
np.arange(c) + 1 for c in pulse_count
])
n_pulses = pulse_count.sum()
# Assign gaps between pulses
run_duration = np.inf
while not_in_range(run_duration, constraints.run_range):
wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng)
gap_dur = flexible_values(p.pulse_gap, n_pulses, rng)
run_duration = np.sum([
wait_iti.sum(),
wait_pre_stim.sum(),
gap_dur.sum(),
p.pulse_dur * n_pulses,
])
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# Assign pulse intensities
max_contrast = np.log10(1 / np.sqrt(p.stim_gratings))
log_contrast = np.zeros(n_pulses)
pulse_dist = np.concatenate([
np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist)
])
llr_mean = np.inf
llr_sd = np.inf
expected_acc = np.inf
while (not_in_range(llr_mean, constraints.mean_range)
or not_in_range(llr_sd, constraints.sd_range)
or not_in_range(expected_acc, constraints.acc_range)):
for i in [0, 1]:
dist = "norm", p.dist_means[i], p.dist_sds[i]
rows = pulse_dist == i
n = rows.sum()
log_contrast[rows] = flexible_values(dist, n, rng,
max=max_contrast)
pulse_llr = compute_llr(log_contrast, p)
target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr)
llr_mean = target_llr.mean()
llr_sd = target_llr.std()
dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum()
dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count)
expected_acc = stats.norm(dv, dv_sd).sf(0).mean()
# --- Build the pulse_info structure
pulse_info = pd.DataFrame(dict(
trial=trial,
pulse=pulse,
gap_dur=gap_dur,
log_contrast=log_contrast,
contrast=10 ** log_contrast,
pulse_llr=pulse_llr,
))
# --- Update the trial_info structure
trial_info["wait_pre_stim"] = wait_pre_stim
trial_llr = (pulse_info
.groupby("trial")
.sum()
.loc[:, "pulse_llr"]
.rename("trial_llr"))
trial_info = trial_info.join(trial_llr, on="trial")
# TODO reorder the columns so they are more intuitively organized?
return trial_info, pulse_info
# --- Support functions for block generation
def not_in_range(val, limits):
"""False if val is outside of limits."""
return val < limits[0] or val > limits[1]
def max_repeat(s):
"""Maximumum number of times the same value repeats in sequence."""
s = pd.Series(s)
switch = s != s.shift(1)
return switch.groupby(switch.cumsum()).cumcount().max() + 1
def trunc_geom_pmf(support, p):
"""Probability mass given truncated geometric distribution."""
a, b = min(support) - 1, max(support)
dist = stats.geom(p=p, loc=a)
return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a))
def compute_llr(c, p):
"""Signed LLR of pulse based on contrast and generating distributions."""
m0, m1 = p.dist_means
s0, s1 = p.dist_sds
d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1)
l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c))
llr = l1 - l0
return llr
# --- Exeperiment execution
def run_trial(exp, info):
"""Function that executes what happens in each trial."""
t_info, p_info = info
# ~~~ Set trial-constant attributes of the stimuli
exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos]
exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos]
# ~~~ Inter-trial interval
exp.s.fix.color = exp.p.fix_iti_color
if exp.p.keep_on_time:
exp.wait_until(t_info["start_time"], draw="fix", check_abort=True)
else:
exp.wait_until(exp.iti_end, draw="fix", check_abort=True,
iti_duration=t_info.wait_iti)
# ~~~ Trial onset
t_info["onset_fix"] = exp.clock.getTime()
exp.s.fix.color = exp.p.fix_ready_color
if exp.p.enforce_fix:
res = exp.wait_until(AcquireFixation(exp),
timeout=exp.p.wait_fix,
draw="fix")
if res is None:
t_info["result"] = "nofix"
exp.sounds.nofix.play()
return t_info, p_info
for frame in exp.frame_range(seconds=exp.p.wait_start):
exp.check_fixation(allow_blinks=True)
exp.draw("fix")
# ~~~ Pre-stimulus period
exp.s.fix.color = exp.p.fix_trial_color
prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim,
yield_skipped=True)
for frame, skipped in prestim_frames:
if not exp.check_fixation(allow_blinks=True):
if exp.p.enforce_fix:
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
else:
t_info["fixbreaks"] += 1
flip_time = exp.draw(["fix", "cue", "targets"])
if not frame:
t_info["onset_targets"] = flip_time
t_info["onset_cue"] = flip_time
# ~~~ Stimulus period
for p, info in p_info.iterrows():
# Allow aborts in the middle of a trial
exp.check_abort()
# Update the pattern
exp.s.pattern.contrast = info.contrast
exp.s.pattern.randomize_phases()
# TODO commenting out until we get a good solution for saving these
# Currently it errors out (maybe because the info df isn't seeded?)
# p_info.loc[p, "phases"] = exp.s.pattern.array.phases
# Check if the eye is blinking and possibly wait a bit if so
blink_pad_start = exp.clock.getTime()
for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout):
if exp.check_fixation():
break
exp.draw(["fix", "cue", "targets"])
# TODO do we want to wait a smidge if they were blinking before
# showing the stimulus? How much vision do people have right when
# they come out of the blink (according to Eyelink?)
# TODO can we make life easier later by updating the gap duration
# information or are we just going to have to deal?
p_info.loc[p, "blink_pad"] = exp.clock.getTime() - blink_pad_start
# Show each frame of the stimulus
for frame in exp.frame_range(seconds=info.pulse_dur):
if not exp.check_fixation(allow_blinks=True):
if exp.p.enforce_fix:
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
else:
t_info["fixbreaks"] += 1
stims = ["fix", "cue", "targets", "pattern"]
flip_time = exp.draw(stims)
if not frame:
exp.tracker.send_message("pulse_onset")
p_info.loc[p, "occurred"] = True
p_info.loc[p, "pulse_onset"] = flip_time
blink = not exp.tracker.check_eye_open(new_sample=False)
p_info.loc[p, "blink"] |= blink
# This counter is reset at beginning of frame_range
# so it should correspond to frames dropped during the stim
p_info.loc[p, "dropped_frames"] = exp.win.nDroppedFrames
for frame in exp.frame_range(seconds=info.gap_dur):
if not exp.check_fixation(allow_blinks=True):
if exp.p.enforce_fix:
exp.sounds.fixbreak.play()
exp.flicker("fix")
t_info["result"] = "fixbreak"
t_info["offset_cue"] = exp.clock.getTime()
return t_info, p_info
else:
t_info["fixbreaks"] += 1
flip_time = exp.draw(["fix", "cue", "targets"])
# Record the time of first flip as the offset of the last pulse
if not frame:
p_info.loc[p, "pulse_offset"] = flip_time
# ~~~ Response period
# Collect the response
now = exp.clock.getTime()
t_info["offset_fix"] = now
t_info["offset_cue"] = now
response_handler = AcquireTarget(exp, t_info.target,
allow_retry=not exp.p.enforce_fix)
res = exp.wait_until(response_handler,
timeout=exp.p.wait_resp,
draw="targets")
if res is None:
t_info["result"] = "nochoice"
else:
t_info.update(pd.Series(res))
# Give feedback
t_info["onset_feedback"] = exp.clock.getTime()
exp.sounds[t_info.result].play()
exp.show_feedback("targets", t_info.result, t_info.response)
exp.wait_until(timeout=exp.p.wait_feedback, draw=["targets"])
exp.s.targets.color = exp.p.target_color
# Prepare for the inter-trial interval
exp.s.fix.color = exp.p.fix_iti_color
exp.draw("fix")
return t_info, p_info
def serialize_trial_info(exp, info):
"""Package trial information for the remote."""
t_info, _ = info
return t_info.to_json()
def compute_performance(self):
"""Compute run-wise performance information."""
# TODO Track fixation breaks here? Also in the remote?
if self.trial_data:
data = pd.DataFrame([t for t, _ in self.trial_data])
mean_acc = data["correct"].mean()
responses = data["responded"].sum()
return mean_acc, responses
else:
return None, None
def show_performance(exp, run_correct, run_trials):
"""Show the subject a report of their performance."""
lines = ["End of the run!"]
prior_trials = prior_correct = 0
output_dir = os.path.dirname(exp.output_stem)
prior_fnames = glob(os.path.join(output_dir, "*_trials.csv"))
if prior_fnames:
prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames])
prior_trials = len(prior_data)
if prior_trials:
prior_correct = prior_data["correct"].mean()
if run_correct is not None:
lines.extend([
"", "You got {:.0%} correct!".format(run_correct),
])
if (prior_trials + run_trials):
total_correct = np.average([prior_correct, run_correct],
weights=[prior_trials, run_trials])
lines.extend([
"", "You're at {:.0%} correct today!".format(total_correct),
])
n = len(lines)
height = .5
heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height
for line, y in zip(lines, heights):
TextStim(exp.win, line, pos=(0, y), height=height).draw()
exp.win.flip()
def save_data(exp):
"""Output data files to disk."""
if exp.trial_data and exp.p.save_data:
trial_data = [t_data for t_data, _ in exp.trial_data]
pulse_data = [p_data for _, p_data in exp.trial_data]
data = pd.DataFrame(trial_data)
out_data_fname = exp.output_stem + "_trials.csv"
data.to_csv(out_data_fname, index=False)
data = pd.concat(pulse_data)
out_data_fname = exp.output_stem + "_pulses.csv"
data.to_csv(out_data_fname, index=False)
out_json_fname = exp.output_stem + "_params.json"
with open(out_json_fname, "w") as fid:
json.dump(exp.p, fid, sort_keys=True, indent=4)
# ----------------------------------------------------------------------- #
# Demo-related code
# ----------------------------------------------------------------------- #
def demo_mode(exp):
exp.wait_until("space", draw="fix", check_abort=True)
exp.s.fix.color = exp.p.fix_trial_color
exp.wait_until("space", draw=["fix", "targets"], check_abort=True)
all_stims = ["fix", "targets", "cue", "pattern"]
exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means)
for pos in [0, 1]:
exp.s.cue.pos = exp.p.stim_pos[pos]
exp.s.pattern.pos = exp.p.stim_pos[pos]
exp.wait_until("space", draw=all_stims, check_abort=True)
for frame in exp.frame_range(seconds=1):
exp.draw(["fix", "targets", "cue"])
for frame in exp.frame_range(seconds=exp.p.pulse_dur):
exp.draw(all_stims)
exp.wait_until("space", draw=["fix", "targets", "cue"], check_abort=True)
exp.wait_until("space", draw=all_stims, check_abort=True)
exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1])
exp.wait_until("space", draw=all_stims, check_abort=True)
exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0])
exp.wait_until("space", draw=all_stims, check_abort=True)
exp.s["box_l"] = StimBox(exp, [-7, 0], 0)
exp.s["box_h"] = StimBox(exp, [+7, 0], 1)
exp.wait_until("space", draw=["fix", "box_h", "box_l"], check_abort=True)
exp.sounds["correct"].play()
exp.wait_until("space", draw="fix", check_abort=True)
exp.sounds["wrong"].play()
exp.wait_until("space", draw="fix", check_abort=True)
exp.sounds["fixbreak"].play()
exp.wait_until("space", draw="fix", check_abort=True)
def poisson_disc_sample(size, radius, candidates=100, rng=None):
"""Find positions using poisson-disc sampling."""
# TODO make more general and move into visigoth
# TODO currently assumes square array
# See http://bost.ocks.org/mike/algorithms/
if rng is None:
rng = np.random.RandomState()
uniform = rng.uniform
randint = rng.randint
# Start at a fixed point we know will work
start = 0, 0
samples = [start]
queue = [start]
while queue:
# Pick a sample to expand from
s_idx = randint(len(queue))
s_x, s_y = queue[s_idx]
for i in range(candidates):
# Generate a candidate from this sample
a = uniform(0, 2 * np.pi)
r = uniform(radius, 2 * radius)
x, y = s_x + r * np.cos(a), s_y + r * np.sin(a)
# Check the three conditions to accept the candidate
in_array = (np.abs(x) < size / 2) & (np.abs(y) < size / 2)
in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius)
if in_array and in_ring:
# Accept the candidate
samples.append((x, y))
queue.append((x, y))
break
if (i + 1) == candidates:
# We've exhausted the particular sample
queue.pop(s_idx)
return np.asarray(samples)
class StimBox(object):
def __init__(self, exp, center, dist, size=8):
stim_sf = exp.p.stim_sf * 2
stim_size = exp.p.stim_size / 5
xy = poisson_disc_sample(size, stim_size)
xy[:, 0] += center[0]
xy[:, 1] += center[1]
self.box = Rect(exp.win,
size + stim_size, size + stim_size,
pos=center,
fillColor=exp.win.color,
lineColor="white")
self.patterns = patterns = []
for xy_i in xy:
pattern = Pattern(exp.win,
n=exp.p.stim_gratings,
elementTex=exp.p.stim_tex,
elementMask=exp.p.stim_mask,
sizes=stim_size,
sfs=stim_sf,
pos=xy_i)
patterns.append(pattern)
n = len(patterns)
qs = np.linspace(.05, .95, n)
m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist]
cs = 10 ** stats.norm.ppf(qs, m, s)
for pat, c in zip(patterns, cs):
pat.contrast = c
def draw(self):
self.box.draw()
for p in self.patterns:
p.draw()
| 1.96875 | 2 |
pi4home/components/binary_sensor/rdm6300.py | khzd/pi4home | 1 | 12793827 | <reponame>khzd/pi4home
import voluptuous as vol
from pi4home.components import binary_sensor, rdm6300
import pi4home.config_validation as cv
from pi4home.const import CONF_NAME, CONF_UID
from pi4home.cpp_generator import get_variable
DEPENDENCIES = ['rdm6300']
CONF_RDM6300_ID = 'rdm6300_id'
RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor',
binary_sensor.BinarySensor)
PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({
cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor),
vol.Required(CONF_UID): cv.uint32_t,
cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component)
}))
def to_code(config):
for hub in get_variable(config[CONF_RDM6300_ID]):
yield
rhs = hub.make_card(config[CONF_NAME], config[CONF_UID])
binary_sensor.register_binary_sensor(rhs, config)
| 2.203125 | 2 |
aiocloudflare/api/accounts/storage/analytics/analytics.py | Stewart86/aioCloudflare | 2 | 12793828 | <gh_stars>1-10
from aiocloudflare.commons.auth import Auth
from .stored.stored import Stored
class Analytics(Auth):
_endpoint1 = "accounts"
_endpoint2 = "storage/analytics"
_endpoint3 = None
@property
def stored(self) -> Stored:
return Stored(self._config, self._session)
| 1.84375 | 2 |
Python/processors/process_xsum.py | Williamyzd/NonIntNLP | 1 | 12793829 | <gh_stars>1-10
from transformers import XLNetTokenizer
import glob, os
from multiprocessing import Pool
import random
import torch
# This function processes news articles gathered and preprocessed by the XSum data processor:
# https://github.com/EdinburghNLP/XSum
#
# The pre-processor generates a large number of files, each of which corresponds to a single article. The format of
# each file is text, with following format:
# [XSUM]URL[XSUM]
# <URL where article originates from>
# [XSUM]INTRODUCTION[XSUM]
# <Summary of the article>
# [XSUM]RESTBODY[XSUM]
# <Article text>
JUNK_HEADER_TEXT = ["Share this with\n",
"Email\n",
"FaceBook\n",
"Facebook\n",
"Messenger\n",
"Twitter\n",
"Pinterest\n",
"WhatsApp\n",
"LinkedIn\n",
"Linkedin\n",
"Copy this link\n",
"These are external links and will open in a new window\n"]
# Processes the contents of an XSUM file and returns a dict: {'text', 'summary'}
def map_read_files(filepath):
with open(filepath, encoding="utf-8") as file:
content = file.read()
SUMMARY_INDEX = 4
TEXT_INDEX = 6
splitted = content.split("[XSUM]")
summary = splitted[SUMMARY_INDEX].strip()
text = splitted[TEXT_INDEX]
for junk in JUNK_HEADER_TEXT:
text = text.replace(junk, "").strip()
# Don't accept content with too small of text content or title content. Often these are very bad examples.
if len(text) < 1024:
return None
if len(summary) < 30:
return None
return {"summary": summary, "text": text}
tok = XLNetTokenizer.from_pretrained("xlnet-base-cased")
# This is a map function for processing reviews. It returns a dict:
# { 'text' { input_ids_as_tensor },
# 'target' { input_ids_as_tensor } }
def map_tokenize_news(processed):
text = processed["text"]
text_enc = tok.encode(
text, add_special_tokens=False, max_length=None, pad_to_max_length=False
)
title = processed["summary"]
# Insert the title as the second sentence, forcing the proper token types.
title_enc = tok.encode(
title, add_special_tokens=False, max_length=None, pad_to_max_length=False
)
# Push resultants to a simple list and return it
return {
"text": torch.tensor(text_enc, dtype=torch.long),
"target": torch.tensor(title_enc, dtype=torch.long),
}
if __name__ == "__main__":
# Fetch the news.
folder = "C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads"
os.chdir(folder)
files = glob.glob("*.data")
output_folder = "/".join([folder, "outputs"])
p = Pool(20)
# Basic workflow:
# MAP: [list of files to process] => list_of_news
# MAP: [single list of shuffled news] => map_tokenize_news
# REDUCE: [tokenized results] => [single list of tokenized results]
print("Reading from files..")
all_texts = p.map(map_read_files, files)
all_texts = [m for m in all_texts if m is not None]
print("Tokenizing news..")
all_news = p.map(map_tokenize_news, all_texts)
print("Writing news to output file.")
random.shuffle(all_news)
val_news = all_news[0:2048]
test_news = all_news[2048:6144]
train_news = all_news[6144:]
if not os.path.exists(output_folder):
os.makedirs(output_folder)
torch.save(train_news, "/".join([output_folder, "train.pt"]))
torch.save(val_news, "/".join([output_folder, "val.pt"]))
torch.save(test_news, "/".join([output_folder, "test.pt"]))
| 3.078125 | 3 |
openeo/extra/spectral_indices/__init__.py | bontekasper/openeo-python-client | 75 | 12793830 | """
Easily calculate spectral indices (vegetation, water, urban etc.).
Supports the indices defined in the
`Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project
by `<NAME> <https://github.com/davemlz>`_.
.. versionadded:: 0.9.1
"""
from openeo.extra.spectral_indices.spectral_indices import *
| 1.445313 | 1 |
sipam/serializers/__init__.py | Selfnet/sipam | 2 | 12793831 | <gh_stars>1-10
from .assignment import AssignmentSerializer
from .cidr import CIDRSerializer, RecursiveCIDRSerializer
from .pool import PoolSerializer
from .label import LabelSerializer
| 1.3125 | 1 |
koyeb_nb2/__init__.py | ffreemt/koyeb-nb2 | 4 | 12793832 | """Init."""
__version__ = "0.1.0"
from .koyeb_nb2 import koyeb_nb2
# from .nb2chan import nb2chan
__all__ = ("koyeb_nb2",)
| 0.984375 | 1 |
medium/15-3Sum.py | Davidxswang/leetcode | 2 | 12793833 | """
https://leetcode.com/problems/3sum/
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
Example:
Given array nums = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
# time complexity: O(n^2), space complexity: O(1)
# this is such a classic question and I got the inspiration from @christopherwu0529 in the discussion area. The idea is to first sort the nums in O(nlogn), then for each number in this list, two pointers go over the number after it.
# To make it faster, early stop if the fixed number is larger than 0, because there will be no way we can make the sum equal to 0.
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums.sort()
result = []
i = 0
while i < len(nums) - 2:
if nums[i] > 0:
break
if i == 0 or nums[i] != nums[i-1]:
low = i + 1
high = len(nums) - 1
while low < high:
if nums[i] + nums[low] + nums[high] == 0:
result.append([nums[i], nums[low], nums[high]])
while low < high and nums[low] == nums[low+1]:
low += 1
while low < high and nums[high] == nums[high-1]:
high -= 1
low += 1
high -= 1
elif nums[i] + nums[low] + nums[high] < 0:
low += 1
else:
high -= 1
i += 1
return result
| 3.578125 | 4 |
mpl_interactions/ipyplot.py | redeboer/mpl-interactions | 67 | 12793834 | from .pyplot import interactive_axhline as axhline
from .pyplot import interactive_axvline as axvline
from .pyplot import interactive_hist as hist
from .pyplot import interactive_imshow as imshow
from .pyplot import interactive_plot as plot
from .pyplot import interactive_scatter as scatter
from .pyplot import interactive_title as title
from .pyplot import interactive_xlabel as xlabel
from .pyplot import interactive_ylabel as ylabel
| 1.203125 | 1 |
codes/algorithms/Harmony.py | pocokhc/metaheuristics | 1 | 12793835 | import math
import random
from ..algorithm_common import AlgorithmCommon as AC
from ..algorithm_common import IAlgorithm
class Harmony(IAlgorithm):
def __init__(self,
harmony_max,
bandwidth=0.1,
enable_bandwidth_rate=False,
select_rate=0.8,
change_rate=0.3,
):
self.harmony_max = harmony_max
self.bandwidth = bandwidth
self.enable_bandwidth_rate = enable_bandwidth_rate
self.select_rate = select_rate
self.change_rate = change_rate
def init(self, problem):
self.problem = problem
self.count = 0
self.harmonys = []
for _ in range(self.harmony_max):
self.harmonys.append(problem.create())
def getMaxElement(self):
self.harmonys.sort(key=lambda x: x.getScore())
return self.harmonys[-1]
def getElements(self):
return self.harmonys
def step(self):
# 新しいharmonyを作成
arr = []
for i in range(self.problem.size):
if random.random() < self.select_rate:
# 新しく和音を生成
arr.append(self.problem.randomVal())
continue
# harmonyを1つ選択
h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray()
if random.random() < self.change_rate:
# 和音を変更
if self.enable_bandwidth_rate:
# 割合で bandwidth を指定
bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL)
else:
bandwidth = self.bandwidth
n = h_arr[i] + bandwidth * (random.random()*2-1)
arr.append(n)
else:
# 和音を複製
arr.append(h_arr[i])
harmony = self.problem.create(arr)
self.count += 1
# 新しいharmonyが最悪harmonyより評価が高ければ置き換え
self.harmonys.sort(key=lambda x: x.getScore())
if self.harmonys[0].getScore() < harmony.getScore():
self.harmonys[0] = harmony
| 2.953125 | 3 |
autos/test/utils/test_date.py | hans-t/autos | 1 | 12793836 | import datetime
import unittest
import autos.utils.date as date
class TestDateRange(unittest.TestCase):
def test_returns_today_date_as_default(self):
actual = list(date.date_range())
expected = [datetime.date.today()]
self.assertEqual(actual, expected)
def test_returns_correct_range(self):
actual = list(date.date_range(
since=(datetime.date.today() - datetime.timedelta(days=3)),
until=(datetime.date.today() - datetime.timedelta(days=1)),
))
expected = [
(datetime.date.today() - datetime.timedelta(days=1)),
(datetime.date.today() - datetime.timedelta(days=2)),
(datetime.date.today() - datetime.timedelta(days=3)),
]
self.assertEqual(actual, expected)
class TestGetPastDate(unittest.TestCase):
def test_returns_today_date_by_default(self):
actual = date.get_past_date()
expected = (datetime.date.today() - datetime.timedelta(days=0))
self.assertEqual(actual, expected)
def test_returns_past_3_days_ago_date(self):
actual = date.get_past_date(days=3)
expected = datetime.date.today() - datetime.timedelta(days=3)
self.assertEqual(actual, expected)
def test_returns_past_5_weeks_ago_date(self):
actual = date.get_past_date(weeks=5)
expected = datetime.date.today() - datetime.timedelta(weeks=5)
self.assertEqual(actual, expected)
def test_returns_past_3_days_and_2_weeks_ago_date(self):
actual = date.get_past_date(days=3, weeks=2)
expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2)
self.assertEqual(actual, expected)
def test_returns_future_date_on_negative_input(self):
actual = date.get_past_date(days=-3, weeks=-2)
expected = datetime.date.today() + datetime.timedelta(days=3, weeks=2)
self.assertEqual(actual, expected)
| 2.9375 | 3 |
shawl/core/_base_collection.py | oiakinat/shawl | 3 | 12793837 | # -*- coding: utf-8 -*-
from typing import Iterator, List, Optional, Tuple
from selenium.common.exceptions import (
StaleElementReferenceException,
TimeoutException
)
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.expected_conditions import (
presence_of_all_elements_located,
visibility_of_all_elements_located,
visibility_of_any_elements_located
)
from selenium.webdriver.support.wait import WebDriverWait
from ..config import SHAWL_CONFIG as CONFIG
from ..exceptions import NoSuchElementsException
from ..utils._waits import wait_until
class BaseCollection:
"""
This class is base for all PageElement collections.
This class is a wrap above list of WebElement.
Property `collection` contains
list of WebElement and provide lazy load of it.
It will wait for any of WebElement to be present on the DOM for
`SHAWL_LAZY_LOAD_TIMEOUT` seconds.
Also, you can work with this class instance as with basic list.
For example::
base_collection = BaseCollection(driver, **{'css selector': 'div'})
for element in base_collection:
print(element.text)
first_element = base_collection[0]
assert len(base_collection) == 50
"""
def __init__(self,
driver: WebDriver,
repr_name: Optional[str] = None,
**locators):
self._driver: WebDriver = driver
self._selector: Tuple[str, str] = list(locators.items())[0]
self._collection: List[WebElement] = []
self._repr_name: str = repr_name or (f'{self.__class__.__name__}: '
f'{self._selector}')
def __str__(self) -> str:
return f'Selector: {self._selector}, Collection: {self._collection}'
def __repr__(self) -> str:
return self._repr_name
def __len__(self) -> int:
return len(self.collection)
def __iter__(self) -> Iterator[WebElement]:
return iter(self.collection)
def __getitem__(self, item) -> WebElement:
return self.collection[item]
def __bool__(self) -> bool:
return bool(self.collection)
def _load(self):
try:
self._collection = WebDriverWait(
self._driver,
CONFIG.lazy_load_timeout
).until(presence_of_all_elements_located(self._selector))
except TimeoutException as t_exc:
raise NoSuchElementsException(
'no such elements: '
'Unable to locate elements: '
'{"method":"%s","selector":"%s"}' % self._selector) from t_exc
def _return_locator(self, selector_type: str) -> str:
if self._selector[0] == selector_type:
return self._selector[1]
return ''
@property
def selector(self) -> Tuple[str, str]:
return self._selector
@property
def id(self) -> str:
# pylint: disable=invalid-name
return self._return_locator('id')
@property
def xpath(self) -> str:
return self._return_locator('xpath')
@property
def link_text(self) -> str:
return self._return_locator('link text')
@property
def partial_link_text(self) -> str:
return self._return_locator('partial link text')
@property
def name(self) -> str:
return self._return_locator('name')
@property
def tag_name(self) -> str:
return self._return_locator('tag name')
@property
def class_name(self) -> str:
return self._return_locator('class name')
@property
def css_selector(self) -> str:
return self._return_locator('css selector')
@property
def collection(self) -> List[WebElement]:
if not self._collection or not isinstance(self._collection, list):
self._load()
try:
for e in self._collection:
isinstance(e.location, dict)
except StaleElementReferenceException:
self._load()
return self._collection
def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that at least one element from collection is visible
on a web page during 'wait' seconds.
Returns True if at least one element from collection is visible,
False otherwise
"""
return wait_until(self._driver,
wait,
visibility_of_any_elements_located(self._selector))
def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that all elements from collection are present on the DOM of
a page and visible during 'wait' seconds.
Returns True if all elements from collection are visible,
False otherwise
"""
return wait_until(self._driver,
wait,
visibility_of_all_elements_located(self._selector))
def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that at least one element from collection is present
on a web page during 'wait' seconds.
Returns True if at least one element from collection is present,
False otherwise
"""
return wait_until(self._driver,
wait,
presence_of_all_elements_located(self._selector))
__all__ = ['BaseCollection']
| 2.84375 | 3 |
examples/ReadDemo.py | Ellis0817/Introduction-to-Programming-Using-Python | 0 | 12793838 | <gh_stars>0
def main():
# Open file for input
infile = open("Presidents.txt", "r")
print("(1) Using read(): ")
print(infile.read())
infile.close() # Close the input file
# Open file for input
infile = open("Presidents.txt", "r")
print("\n(2) Using read(number): ")
s1 = infile.read(4)
print(s1)
s2 = infile.read(10)
print(repr(s2))
infile.close() # Close the input file
# Open file for input
infile = open("Presidents.txt", "r")
print("\n(3) Using readline(): ")
line1 = infile.readline()
line2 = infile.readline()
line3 = infile.readline()
line4 = infile.readline()
print(repr(line1))
print(repr(line2))
print(repr(line3))
print(repr(line4))
infile.close() # Close the input file
# Open file for input
infile = open("Presidents.txt", "r")
print("\n(4) Using readlines(): ")
print(infile.readlines())
infile.close() # Close the input file
main() # Call the main function
| 3.703125 | 4 |
octoprint_marlin_flasher/flasher/base_flasher.py | Renaud11232/Octoprint-Marlin-Flasher | 0 | 12793839 | from .flasher_error import FlasherError
import time
import flask
import requests
import tempfile
import os
import re
from threading import Thread
class BaseFlasher:
def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger):
self._settings = settings
self._printer = printer
self._plugin = plugin
self._plugin_manager = plugin_manager
self._identifier = identifier
self._logger = logger
self._firmware = None
self._firmware_version = None
self._firmware_author = None
self._firmware_upload_time = None
self._should_run_post_script = False
self._flash_status = None
def _background_run(self, target, args=None):
thread = Thread(target=target, args=args)
thread.start()
return thread
def _run_pre_flash_script(self):
pre_flash_script = self._settings.get_pre_flash_script()
if pre_flash_script:
self._logger.debug("Running pre-flash GCode script :")
self._logger.debug(pre_flash_script)
commands = [line.strip() for line in pre_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No pre-flash GCode script defined")
def _wait_pre_flash_delay(self):
self._logger.debug("Waiting pre-flash delay...")
time.sleep(self._settings.get_pre_flash_delay())
def _run_post_flash_script(self):
post_flash_script = self._settings.get_post_flash_script()
if post_flash_script:
self._logger.debug("Running post-flash script")
self._logger.debug(post_flash_script)
commands = [line.strip() for line in post_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No script defined")
def _wait_post_flash_delay(self):
self._logger.debug("Waiting post-flash delay...")
time.sleep(self._settings.get_post_flash_delay())
def _validate_firmware_file(self, file_path):
raise FlasherError("Unsupported function call.")
def handle_connected_event(self):
if self._should_run_post_script:
self._run_post_flash_script()
self._should_run_post_script = False
def check_setup_errors(self):
raise FlasherError("Unsupported function call.")
def upload(self):
self._logger.debug("Firmware uploaded by the user")
uploaded_file_path = flask.request.values["firmware_file." + self._settings.get_upload_path_suffix()]
errors = self._validate_firmware_file(uploaded_file_path)
if errors:
self._push_firmware_info()
return None, errors
result = self._handle_firmware_file(uploaded_file_path)
self._push_firmware_info()
return result
def download(self):
self._logger.debug("Downloading firmware...")
r = requests.get(flask.request.values["url"])
self._logger.debug("Saving downloaded firmware...")
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(r.content)
temp_path = temp.name
errors = self._validate_firmware_file(temp_path)
if errors:
self._push_firmware_info()
os.remove(temp_path)
return None, errors
result = self._handle_firmware_file(temp_path)
self._push_firmware_info()
self._logger.debug("Clearing downloaded firmware...")
os.remove(temp_path)
return result
def _handle_firmware_file(self, firmware_file_path):
raise FlasherError("Unsupported function call.")
def _find_firmware_info(self):
for root, dirs, files in os.walk(self._firmware):
for f in files:
if f == "Version.h":
self._logger.debug("Found Version.h, opening it...")
with open(os.path.join(root, f), "r") as version_file:
for line in version_file:
version = re.findall(r'#define +SHORT_BUILD_VERSION +"([^"]*)"', line)
if version:
self._firmware_version = version[0]
self._logger.debug("Found SHORT_BUILD_VERSION : %s" % self._firmware_version)
break
elif f == "Configuration.h":
self._logger.debug("Found Configuration.h, opening it...")
with open(os.path.join(root, f), "r") as configfile:
for line in configfile:
author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +"([^"]*)"', line)
if author:
self._firmware_author = author[0]
self._logger.debug("Found STRING_CONFIG_H_AUTHOR : %s" % self._firmware_author)
break
def _firmware_info_event_name(self):
raise FlasherError("Undefined function call")
def _push_firmware_info(self):
self._logger.debug("Sending firmware info through websocket")
self._plugin_manager.send_plugin_message(self._identifier, dict(
type=self._firmware_info_event_name(),
version=self._firmware_version,
author=self._firmware_author,
upload_time=self._firmware_upload_time.strftime("%d/%m/%Y, %H:%M:%S") if self._firmware_upload_time is not None else None,
firmware=self._firmware
))
def _push_flash_status(self, event_name):
if self._flash_status:
data = dict(
type=event_name
)
data.update(self._flash_status)
self._plugin_manager.send_plugin_message(self._identifier, data)
def send_initial_state(self):
self._push_firmware_info()
| 2.25 | 2 |
OpenGLCffi/GLX/EXT/SGIX/dmbuffer.py | cydenix/OpenGLCffi | 0 | 12793840 | from OpenGLCffi.GLX import params
@params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer'])
def glXAssociateDMPbufferSGIX(dpy, pbuffer, params, dmbuffer):
pass
| 1.742188 | 2 |
python/src/main/python/pygw/statistics/field/time_range_statistic.py | MC-JY/geowave | 0 | 12793841 | #
# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.config import geowave_pkg
from ..statistic import FieldStatistic
from ..statistic_type import FieldStatisticType
from ...base.interval import IntervalTransformer
class TimeRangeStatistic(FieldStatistic):
"""
Tracks the time range of a temporal field.
"""
STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE)
def __init__(self, type_name=None, field_name=None, java_ref=None):
if java_ref is None:
if type_name is None and field_name is None:
java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic()
else:
java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name, field_name)
super().__init__(java_ref, IntervalTransformer())
| 1.882813 | 2 |
app/routers/__init__.py | gmelodie/pystebin | 0 | 12793842 | # This empty file allows us to
# use app/ as a python package
| 1.148438 | 1 |
src/brusher/c_highlighter.py | CihatAltiparmak/C-EDITOR | 11 | 12793843 | <filename>src/brusher/c_highlighter.py
#!/usr/bin/env python
from PyQt5.QtCore import QFile, QRegExp, Qt
from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor
from syntaxer import Syntax_Patcher
class C_Highlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(C_Highlighter, self).__init__(parent)
self.parent = parent
parcher = Syntax_Patcher()
keyword_patterns = parcher.c_parcher()
# -----------------> data types format
data_types_patterns = parcher.c_parcher()
self.data_types_format = QTextCharFormat()
self.data_types_format.setForeground(self.give_color(data_types_patterns["data_types"]["color"])) #yellow
self.data_types_format.setFontWeight(QFont.ExtraBold)
self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns["data_types"]["words"]]
#print("data_types_uzunlugu: ",len(data_types_patterns["data_types"]["words"]))
# -----------------> function format
func_format = QTextCharFormat()
func_format.setForeground(Qt.red)
func_format.setFontWeight(QFont.Bold)
self.highlight_rules.append((QRegExp("\\b[A-Za-z0-9_]+(?=\\()"), func_format))
# -----------------> keyword format
self.keyword_format = QTextCharFormat()
self.keyword_format.setForeground(self.give_color(keyword_patterns["keywords"]["color"])) #white
self.keyword_format.setFontWeight(QFont.Bold)
for pattern in keyword_patterns["keywords"]["words"]:
self.highlight_rules.append((QRegExp(pattern), self.keyword_format))
# -----------------> single line comment format
single_comment_format = QTextCharFormat()
single_comment_format.setForeground(QColor(0,0,0,100))
single_comment_format.setFontWeight(QFont.Courier)
self.highlight_rules.append((QRegExp("//[^\n]*"), single_comment_format))
# -----------------> multiline comment format
self.multi_comment_format = QTextCharFormat()
self.multi_comment_format.setForeground(Qt.cyan)
self.multi_comment_format.setFontWeight(QFont.Courier)
# -----------------> single line string format
str_format = QTextCharFormat()
str_format.setForeground(Qt.magenta)
str_format.setFontWeight(QFont.DemiBold)
self.highlight_rules.append((QRegExp("\".*\""), str_format))
self.commentStart = QRegExp("/\\*")
self.commentEnd = QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format_ in self.highlight_rules:
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format_)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStart.indexIn(text, startIndex)
while startIndex >= 0:
endIndex = self.commentEnd.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEnd.matchedLength()
self.setFormat(startIndex, commentLength, self.multi_comment_format)
startIndex = self.commentStart.indexIn(text, startIndex + commentLength);
def give_color(self, color):
if color == "red":
return Qt.red
if color == "blue":
return Qt.blue
if color == "green":
return Qt.green
if color == "yellow":
return Qt.yellow
if color == "white":
return Qt.white
if color == "magenta":
return Qt.magenta
if color == "cyan":
return Qt.cyan
def _reinit__(self):
self.__init__(parent = self.parent)
| 2.3125 | 2 |
pyradar/Chapter06/optimum_binary_example.py | mberkanbicer/software | 1 | 12793844 | """
Project: RadarBook
File: optimum_binary_example.py
Created by: <NAME>
On: 10/11/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow
from numpy import arange, ceil
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class OptimumBinary(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.number_of_pulses.returnPressed.connect(self._update_canvas)
self.target_type.currentIndexChanged.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.axes1 = fig.add_subplot(111)
self.my_canvas = FigureCanvas(fig)
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes an input value.
:return:
"""
# Get the parameters from the form
number_of_pulses = int(self.number_of_pulses.text())
# Get the selected target type from the form
target_type = self.target_type.currentText()
if target_type == 'Swerling 0':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 1':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 2':
alpha = 0.91
beta = -0.38
elif target_type == 'Swerling 3':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 4':
alpha = 0.873
beta = -0.27
# Calculate the optimum choice for M
np = arange(1, number_of_pulses+1)
m_optimum = [ceil(10.0 ** beta * n ** alpha) for n in np]
# Clear the axes for the updated plot
self.axes1.clear()
# Display the results
self.axes1.plot(np, m_optimum, '')
# Set the plot title and labels
self.axes1.set_title('Optimum M for Binary Integration', size=14)
self.axes1.set_xlabel('Number of Pulses', size=12)
self.axes1.set_ylabel('M', size=12)
# Set the tick label size
self.axes1.tick_params(labelsize=12)
# Turn on the grid
self.axes1.grid(linestyle=':', linewidth=0.5)
# Update the canvas
self.my_canvas.draw()
def start():
form = OptimumBinary() # Set the form
form.show() # Show the form
def main():
app = QApplication(sys.argv) # A new instance of QApplication
form = OptimumBinary() # Set the form
form.show() # Show the form
app.exec_() # Execute the app
if __name__ == '__main__':
main()
| 2.453125 | 2 |
tests/test_auth.py | initfve/notify-server | 1 | 12793845 | <reponame>initfve/notify-server
import httpx
import pytest
import respx
from app import auth
@respx.mock
@pytest.mark.parametrize(
"side_effect, expected",
[
(httpx.Response(200), True),
(httpx.Response(201), True),
(httpx.ConnectError, False),
(httpx.ConnectTimeout, False),
(httpx.Response(400), False),
(httpx.Response(403), False),
(httpx.Response(405), False),
(httpx.Response(500), False),
(httpx.Response(429), False),
],
)
def test_url_authenticate_user(side_effect, expected):
request = respx.post(
"https://auth.example.org/login",
)
request.side_effect = side_effect
result = auth.url_authenticate_user("john", "secret")
assert request.called
assert result is expected
| 2.171875 | 2 |
tests/scheduler/instrument_coordinator/components/test_zhinst.py | quantify-os/quantify-scheduler | 1 | 12793846 | <reponame>quantify-os/quantify-scheduler
# Repository: https://gitlab.com/quantify-os/quantify-scheduler
# Licensed according to the LICENCE file on the master branch
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=redefined-outer-name
from __future__ import annotations
from typing import Any, Dict, Tuple
from pathlib import Path
from unittest.mock import call
import numpy as np
import pytest
from zhinst import qcodes
from quantify_scheduler.backends.zhinst import helpers as zi_helpers
from quantify_scheduler.backends.zhinst import settings
from quantify_scheduler.backends.zhinst_backend import (
ZIAcquisitionConfig,
ZIDeviceConfig,
)
from quantify_scheduler.instrument_coordinator.components import zhinst
from quantify_scheduler.types import Schedule
@pytest.fixture
def make_hdawg(mocker):
def _make_hdawg(
name: str, serial: str
) -> zhinst.HDAWGInstrumentCoordinatorComponent:
mocker.patch("qcodes.instrument.Instrument.record_instance")
hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True)
hdawg.name = name
hdawg._serial = serial
hdawg.awgs = [None] * 4
for i in range(4):
hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True)
component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=hdawg)
return component
yield _make_hdawg
@pytest.fixture
def make_uhfqa(mocker):
def _make_uhfqa(
name: str, serial: str
) -> zhinst.HDAWGInstrumentCoordinatorComponent:
mocker.patch("qcodes.instrument.Instrument.record_instance")
uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True)
uhfqa.name = name
uhfqa._serial = serial
uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True)
# the quantum analyzer setup "qas"
uhfqa.qas = [None] * 1
uhfqa.qas[0] = mocker.create_autospec(None, instance=True)
component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa)
mocker.patch.object(component.instrument_ref, "get_instr", return_value=uhfqa)
return component
yield _make_uhfqa
def test_initialize_hdawg(make_hdawg):
make_hdawg("hdawg0", "dev1234")
def test_hdawg_start(mocker, make_hdawg):
# Arrange
hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg("hdawg0", "dev1234")
get_awg_spy = mocker.patch.object(hdawg, "get_awg", wraps=hdawg.get_awg)
hdawg.zi_settings = settings.ZISettings(
list(),
[
(0, mocker.Mock()),
(1, mocker.Mock()),
(2, mocker.Mock()),
(3, mocker.Mock()),
],
)
# Act
hdawg.start()
# Assert
assert get_awg_spy.call_args_list == [
call(3),
call(2),
call(1),
call(0),
]
for i in range(4):
hdawg.get_awg(i).run.assert_called()
def test_hdawg_stop(mocker, make_hdawg):
# Arrange
hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg("hdawg0", "dev1234")
get_awg_spy = mocker.patch.object(hdawg, "get_awg", wraps=hdawg.get_awg)
hdawg.zi_settings = settings.ZISettings(
list(),
[
(0, mocker.Mock()),
(1, mocker.Mock()),
(2, mocker.Mock()),
(3, mocker.Mock()),
],
)
# Act
hdawg.stop()
# Assert
assert get_awg_spy.call_args_list == [
call(0),
call(1),
call(2),
call(3),
]
for i in range(4):
hdawg.get_awg(i).stop.assert_called()
def test_hdawg_prepare(mocker, make_hdawg):
# Arrange
hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg("hdawg0", "dev1234")
config = ZIDeviceConfig(
"hdawg0", Schedule("test"), settings.ZISettingsBuilder(), None
)
serialize = mocker.patch.object(settings.ZISettings, "serialize")
apply = mocker.patch.object(settings.ZISettings, "apply")
mocker.patch("quantify_core.data.handling.get_datadir", return_value=".")
# Act
hdawg.prepare(config)
# Assert
hdawg_serialize_settings = settings.ZISerializeSettings(
f"ic_{hdawg.instrument.name}", hdawg.instrument._serial, hdawg.instrument._type
)
serialize.assert_called_with(Path("."), hdawg_serialize_settings)
apply.assert_called_with(hdawg.instrument)
def test_hdawg_retrieve_acquisition(make_hdawg):
# Arrange
hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg("hdawg0", "dev1234")
# Act
acq_result = hdawg.retrieve_acquisition()
# Assert
assert acq_result is None
def test_hdawg_wait_done(mocker, make_hdawg):
# Arrange
hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg("hdawg0", "dev1234")
get_awg_spy = mocker.patch.object(hdawg, "get_awg", wraps=hdawg.get_awg)
hdawg.zi_settings = settings.ZISettings(
list(),
[
(0, mocker.Mock()),
(1, mocker.Mock()),
(2, mocker.Mock()),
(3, mocker.Mock()),
],
)
timeout: int = 20
# Act
hdawg.wait_done(timeout)
# Assert
assert get_awg_spy.call_args_list == [
call(3),
call(2),
call(1),
call(0),
]
for i in range(4):
hdawg.get_awg(i).wait_done.assert_called_with(timeout)
def test_initialize_uhfqa(make_uhfqa):
make_uhfqa("uhfqa0", "dev1234")
def test_uhfqa_start(mocker, make_uhfqa):
# Arrange
uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa("uhfqa0", "dev1234")
uhfqa.zi_settings = settings.ZISettings(
list(),
[
(0, mocker.Mock()),
],
)
# Act
uhfqa.start()
# Assert
uhfqa.instrument.awg.run.assert_called()
def test_uhfqa_stop(mocker, make_uhfqa):
# Arrange
uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa("uhfqa0", "dev1234")
uhfqa.zi_settings = settings.ZISettings(
list(),
[
(0, mocker.Mock()),
],
)
# Act
uhfqa.stop()
# Assert
uhfqa.instrument.awg.stop.assert_called()
def test_uhfqa_prepare(mocker, make_uhfqa):
# Arrange
uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa("uhfqa0", "dev1234")
config = ZIDeviceConfig(
"hdawg0", Schedule("test"), settings.ZISettingsBuilder(), None
)
serialize = mocker.patch.object(settings.ZISettings, "serialize")
apply = mocker.patch.object(settings.ZISettings, "apply")
mocker.patch("quantify_core.data.handling.get_datadir", return_value=".")
mocker.patch.object(zi_helpers, "get_waves_directory", return_value=Path("waves/"))
mocker.patch.object(Path, "glob", return_value=["uhfqa0_awg0.csv"])
copy2 = mocker.patch("shutil.copy2")
# Act
uhfqa.prepare(config)
# Assert
uhfqa_serialize_settings = settings.ZISerializeSettings(
f"ic_{uhfqa.instrument.name}", uhfqa.instrument._serial, uhfqa.instrument._type
)
serialize.assert_called_with(Path("."), uhfqa_serialize_settings)
apply.assert_called_with(uhfqa.instrument)
copy2.assert_called_with("uhfqa0_awg0.csv", "waves")
def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa):
# Arrange
uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa("uhfqa0", "dev1234")
expected_data = np.ones(64)
def resolver(uhfqa): # pylint: disable=unused-argument
return expected_data
config = ZIDeviceConfig(
"hdawg0",
Schedule("test"),
settings.ZISettingsBuilder(),
ZIAcquisitionConfig(1, {0: resolver}),
)
mocker.patch.object(settings.ZISettings, "serialize")
mocker.patch.object(settings.ZISettings, "apply")
mocker.patch("quantify_core.data.handling.get_datadir", return_value=".")
mocker.patch.object(zi_helpers, "get_waves_directory", return_value=Path("waves/"))
mocker.patch.object(Path, "glob", return_value=[])
uhfqa.prepare(config)
# Act
acq_result = uhfqa.retrieve_acquisition()
expected_acq_result: Dict[Tuple[int, int], Any] = dict()
for i, value in enumerate(expected_data):
expected_acq_result[(0, i)] = (value, 0.0)
# Assert
assert not acq_result is None
assert (0, 2) in acq_result
for key in acq_result:
assert acq_result[key] == expected_acq_result[key]
def test_uhfqa_wait_done(mocker, make_uhfqa):
# Arrange
uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa("uhfqa0", "dev1234")
wait_done = mocker.patch.object(uhfqa.instrument.awg, "wait_done")
timeout: int = 20
# Act
uhfqa.wait_done(timeout)
# Assert
wait_done.assert_called_with(timeout)
| 1.851563 | 2 |
app/dashapp1/callbacks.py | rseed42/dash_on_flask | 258 | 12793847 | from datetime import datetime as dt
from dash.dependencies import Input
from dash.dependencies import Output
from dash.dependencies import State
from flask_login import current_user
import pandas_datareader as pdr
def register_callbacks(dashapp):
@dashapp.callback(
Output('my-graph', 'figure'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def update_graph(selected_dropdown_value, data):
df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now())
return {
'data': [{
'x': df.index,
'y': df.Close
}],
'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}
}
@dashapp.callback(
Output('user-store', 'data'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def cur_user(args, data):
if current_user.is_authenticated:
return current_user.username
@dashapp.callback(Output('username', 'children'), Input('user-store', 'data'))
def username(data):
if data is None:
return ''
else:
return f'Hello {data}'
| 2.421875 | 2 |
mainapp/models.py | Raistlin11123/inquiry-soso | 0 | 12793848 | <reponame>Raistlin11123/inquiry-soso<filename>mainapp/models.py<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
import datetime
now = datetime.datetime.now()
"""
class Contact(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField()
content = models.TextField()
def __str__(self):
return "Message de {}".format(self.name)
"""
class Clue(models.Model):
title = models.CharField(max_length=42, verbose_name='Titre')
url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image
paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True)
code = models.CharField(null=True, verbose_name="code", max_length=30) #code pour accéder à l'indice
#question d'entrée optionnel (réponse)
optional_question = models.CharField(null=True, verbose_name="question optionelle", max_length=255, blank=True)
optional_answer = models.CharField(null=True, verbose_name="réponse optionelle", max_length=255, blank=True)
def __str__(self):
return "{}".format(self.title)
class UserClues(models.Model):
player = models.ForeignKey(User, on_delete=models.CASCADE)
clue = models.ForeignKey(Clue, on_delete=models.CASCADE)
date = models.DateTimeField(default=now, blank=True)
def __str__(self):
return "indice de {}".format(self.player.username)
#trouver un nom singulier autre que UserClue (déjà utilisé par django) | 2.40625 | 2 |
model-ms/benchmark/make_fixture_models.py | ncoop57/deep_parking | 126 | 12793849 | <reponame>ncoop57/deep_parking<filename>model-ms/benchmark/make_fixture_models.py
import torchvision
from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats
data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)
learner = cnn_learner(data, torchvision.models.resnet34)
learner.export()
data = (SegmentationItemList.from_folder('fixtures/segmentation/images')
.split_none()
.label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2])
.databunch()
.normalize(imagenet_stats))
learner = unet_learner(data, torchvision.models.resnet50)
learner.export('../export.pkl')
| 1.992188 | 2 |
vnpy/app/algo_trading/algos/liquid_mining_algo.py | dos2004/vnpy | 0 | 12793850 | from collections import defaultdict
from decimal import Decimal
from _datetime import datetime, timedelta
from enum import Enum
import math
import random
import re
import requests
import time
from vnpy.app.algo_trading import AlgoTemplate
from vnpy.trader.utility import round_to
from vnpy.trader.constant import Direction, Status, OrderType
from vnpy.trader.object import AccountData, OrderData, TradeData, TickData
from vnpy.trader.engine import BaseEngine
class LiquidMiningAlgo(AlgoTemplate):
""""""
display_name = "交易所 流动性挖坑"
default_setting = {
"vt_symbol": "",
"price_offset": 0.05,
"price_offset_max": 0.1,
"volume": 2,
"max_volume_ratio": 0,
"interval": 3,
"min_order_level": 1,
"min_order_volume": 0,
"sell_max_volume": 0,
"buy_max_volume": 0,
"auto_trade_volume": 310,
"sell_max_ratio": 1,
"buy_max_ratio": 1,
"reward_ratio": 0.01,
"min_pos": 50000,
"max_pos": 50000,
}
variables = [
"pos",
"timer_count",
"vt_ask_orderid",
"vt_bid_orderid"
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.price_offset = setting["price_offset"]
self.price_offset_max = setting["price_offset_max"]
self.volume = setting["volume"]
self.max_volume_ratio = setting.get("max_volume_ratio", 0)
assert 0 <= self.max_volume_ratio <= 1
self.interval = setting["interval"]
self.min_order_level = setting["min_order_level"]
self.min_order_volume = setting["min_order_volume"]
self.sell_max_volume = setting["sell_max_volume"]
self.buy_max_volume = setting["buy_max_volume"]
self.auto_trade_volume = setting["auto_trade_volume"]
self.sell_max_ratio = setting["sell_max_ratio"]
self.buy_max_ratio = setting["buy_max_ratio"]
self.reward_ratio = setting["reward_ratio"]
self.min_pos = setting["min_pos"]
self.max_pos = setting["max_pos"]
self.enable_ioc = setting.get("enable_ioc", False)
self.ioc_intervel = setting.get("ioc_interval", self.interval)
# validate setting
assert self.price_offset <= self.price_offset_max
assert 0 <= self.min_order_level <= 5
# Variables
self.pos = 0
self.timer_count = 0
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.origin_ask_price = 0.00000002
self.origin_bid_price = 0.00000001
self.last_ask_price = 0.00000002
self.last_bid_price = 0.00000001
self.last_ask_volume = 0.0
self.last_bid_volume = 0.0
self.total_ask_volume = 0.0
self.total_bid_volume = 0.0
self.ask_order_level = 0
self.bid_order_level = 0
self.last_tick = None
self._init_market_accounts(self.vt_symbol)
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def _init_market_accounts(self, active_vt_symbol):
SYMBOL_SPLITTER = re.compile(r"^(\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$")
market_token_pair = active_vt_symbol.split('.')[0]
active_market = active_vt_symbol.split('.')[1]
if not market_token_pair or not active_market:
self.algo_engine.main_engine.write_log(f"ERROR: parse active_vt {active_vt_symbol} failed")
return False
token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper())
if not token_pair_match:
self.algo_engine.main_engine.write_log(f"ERROR: parse symbol {market_token_pair} failed")
return False
self.market_vt_tokens = [
f"{active_market}.{token_pair_match.group(1)}",
f"{active_market}.{token_pair_match.group(2)}"
]
self.current_balance = {}
self._update_current_balance()
def _update_current_balance(self):
for vt_token in self.market_vt_tokens:
user_account = self.algo_engine.main_engine.get_account(vt_token)
if type(user_account) is not AccountData:
return False
self.current_balance[vt_token] = user_account.balance
return True
def on_start(self):
""""""
random.seed(time.time())
self.write_log(f"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}")
self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick
self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume
assert self.pricetick > 0
def on_tick(self, tick: TickData):
""""""
self.last_tick = tick
market_price = (tick.ask_price_1 + tick.bid_price_1) / 2
if self.vt_ask_orderid != "":
self.ask_order_alive_tick += 1
# if time to kill
cancel_ask = False
if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel:
self.write_log(f"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消")
cancel_ask = True
if not cancel_ask:
total_ask_volume = 0
for num_level in range(1, 6):
ask_price = getattr(tick, f"ask_price_{num_level}")
if 0 < ask_price < self.last_ask_price:
total_ask_volume += getattr(tick, f"ask_volume_{num_level}")
# min_ask_price = getattr(tick, f"ask_price_{self.ask_order_level}") if self.ask_order_level > 0 else market_price
# vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)
vt_ask_price = getattr(tick, f"ask_price_1")
if self.vt_ask_price < vt_ask_price:
cancel_ask = True
self.write_log(f"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消")
elif self.vt_ask_price > vt_ask_price:
cancel_ask = True
self.write_log(f"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消")
elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2):
cancel_ask = True
self.write_log(f"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化")
if cancel_ask:
self.cancel_order(self.vt_ask_orderid)
# self.ask_order_alive_tick = 0
if self.vt_bid_orderid != "":
self.bid_order_alive_tick += 1
# if time to kill
cancel_bid = False
if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel:
self.write_log(f"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消")
cancel_bid = True
if not cancel_bid:
total_bid_volume = 0
for num_level in range(1, 6):
bid_price = getattr(tick, f"bid_price_{num_level}")
if bid_price > self.last_bid_price:
total_bid_volume += getattr(tick, f"bid_volume_{num_level}")
# max_bid_price = getattr(tick, f"bid_price_{self.bid_order_level}") if self.bid_order_level > 0 else market_price
# vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)
vt_bid_price = getattr(tick, f"bid_price_1")
if self.vt_bid_price > vt_bid_price:
cancel_bid = True
self.write_log(f"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消")
elif self.vt_bid_price < vt_bid_price:
cancel_bid = True
self.write_log(f"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消")
elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2):
cancel_bid = True
self.write_log(f"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化")
if cancel_bid:
self.cancel_order(self.vt_bid_orderid)
# self.bid_order_alive_tick = 0
def on_timer(self):
""""""
if not self.last_tick:
return
if self.pos < self.min_pos or self.pos > self.max_pos:
self.cancel_all()
self.write_log(f"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿")
return
self.timer_count += 1
if self.timer_count < self.interval:
self.put_variables_event()
return
self.timer_count = 0
self.write_log(f"当前余额 {self.current_balance}, 持仓 {self.pos}")
if not self._update_current_balance():
self.write_log(f"查询余额失败,上次余额: [{self.current_balance}]")
return
use_max_volume = self.max_volume_ratio > 0
max_volume_ratio = self.max_volume_ratio
market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2
if self.vt_ask_orderid == "":
self.ask_order_level = 0
for num_level in range(self.min_order_level, 0, -1):
ask_price = getattr(self.last_tick, f"ask_price_{num_level}")
if 0 < ask_price < market_price * (1 + self.reward_ratio * 0.99):
self.ask_order_level = num_level
break
if self.ask_order_level > 0:
total_ask_volume = 0
for num_level in range(1, self.ask_order_level + 1):
total_ask_volume += getattr(self.last_tick, f"ask_volume_{num_level}")
if total_ask_volume != self.last_ask_volume:
one_ask_price = getattr(self.last_tick, f"ask_price_1")
one_ask_volume = getattr(self.last_tick, f"ask_volume_1")
min_ask_price = getattr(self.last_tick, f"ask_price_{self.ask_order_level}") if self.ask_order_level > 0 else market_price
vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)
if self.origin_ask_price == 0.00000002:
self.origin_ask_price = vt_ask_price
ask_condition0 = self.last_ask_price == 0.00000002
ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset))
ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max))
ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2))
self.write_log(f"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}")
if ask_condition0 or (ask_condition1 and ask_condition2):
self.last_ask_price = vt_ask_price
self.vt_ask_price = one_ask_price
self.total_ask_volume = total_ask_volume
max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio
if 0 < self.sell_max_volume < max_volume:
max_volume = self.sell_max_volume
min_volume = self.volume * total_ask_volume
if self.min_order_volume > 0 and min_volume < self.min_order_volume:
min_volume = self.min_order_volume
volume = min_volume if not use_max_volume else max_volume * max_volume_ratio
if volume >= max_volume:
volume = max_volume
self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick)
self.write_log(f"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}")
self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume)
self.ask_order_alive_tick = 0
elif ask_condition8 and one_ask_volume < self.auto_trade_volume:
self.write_log(f"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}")
self.buy(self.vt_symbol, one_ask_price, one_ask_volume)
else:
self.write_log(f"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量")
else:
self.write_log(f"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置")
if self.vt_bid_orderid == "":
self.bid_order_level = 0
for num_level in range(self.min_order_level, 0, -1):
bid_price = getattr(self.last_tick, f"bid_price_{num_level}")
if bid_price > market_price * (1 - self.reward_ratio * 0.99):
self.bid_order_level = num_level
break
if self.bid_order_level > 0:
total_bid_volume = 0
for num_level in range(1, self.bid_order_level + 1):
total_bid_volume += getattr(self.last_tick, f"bid_volume_{num_level}")
if total_bid_volume != self.last_bid_volume:
one_bid_price = getattr(self.last_tick, f"bid_price_1")
one_bid_volume = getattr(self.last_tick, f"bid_volume_1")
max_bid_price = getattr(self.last_tick, f"bid_price_{self.bid_order_level}") if self.bid_order_level > 0 else market_price
vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)
if self.origin_bid_price == 0.00000001:
self.origin_bid_price = vt_bid_price
bid_condition0 = self.last_bid_price == 0.00000001
bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset))
bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max))
bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2))
self.write_log(f"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}")
if bid_condition0 or (bid_condition1 and bid_condition2):
self.last_bid_price = vt_bid_price
self.vt_bid_price = one_bid_price
self.total_bid_volume = total_bid_volume
max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price
if 0 < self.buy_max_volume < max_volume:
max_volume = self.buy_max_volume
min_volume = self.volume * total_bid_volume
if self.min_order_volume > 0 and min_volume < self.min_order_volume:
min_volume = self.min_order_volume
volume = min_volume if not use_max_volume else max_volume * max_volume_ratio
if volume >= max_volume:
volume = max_volume
self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick)
self.write_log(f"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}")
self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume)
self.bid_order_alive_tick = 0
elif bid_condition8 and one_bid_volume < self.auto_trade_volume:
self.write_log(f"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}")
self.sell(self.vt_symbol, one_bid_price, one_bid_volume)
else:
self.write_log(f"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量")
else:
self.write_log(f"---> 流动性挖矿买入下单失败,因为没有合适的下单位置")
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
if order.vt_orderid == self.vt_ask_orderid:
if not order.is_active():
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
elif order.vt_orderid == self.vt_bid_orderid:
if not order.is_active():
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
if trade.direction == Direction.SHORT:
self.write_log(f"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}")
self.pos -= trade.volume
elif trade.direction == Direction.LONG:
self.write_log(f"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}")
self.pos += trade.volume
self.put_variables_event()
def on_stop(self):
""""""
self.write_log("停止 流动性挖矿")
# self.write_log(f"账户状态:{self.algo_engine.main_engine.get_all_accounts()}")
time.sleep(5)
| 2.1875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.