max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/baseline_data.py | jakiee3y/luma.core | 114 | 11188716 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 <NAME> and contributors
# See LICENSE.rst for details.
def primitives(device, draw):
padding = 2
shape_width = 20
top = padding
bottom = device.height - padding - 1
draw.rectangle(device.bounding_box, outline="white", fill="black")
x = padding
draw.ellipse((x, top, x + shape_width, bottom), outline="red", fill="black")
x += shape_width + padding
draw.rectangle((x, top, x + shape_width, bottom), outline="blue", fill="black")
x += shape_width + padding
draw.polygon([(x, bottom), (x + shape_width / 2, top), (x + shape_width, bottom)], outline="green", fill="black")
x += shape_width + padding
draw.line((x, bottom, x + shape_width, top), fill="yellow")
draw.line((x, top, x + shape_width, bottom), fill="yellow")
x += shape_width + padding
draw.text((x, top), 'Hello', fill="cyan")
draw.text((x, top + 20), 'World!', fill="purple")
|
examples/custom_op/train.py | jnclt/simple_tensorflow_serving | 771 | 11188743 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import (
signature_constants, signature_def_utils, tag_constants, utils)
from tensorflow.python.util import compat
def main():
# Load custom op
filename = os.path.join(os.path.dirname(__file__), "zero_out.so")
zero_out_module = tf.load_op_library(filename)
zero_out = zero_out_module.zero_out
# Prepare train data
train_data = np.ones((2, 2))
print("Input data: {}".format(train_data))
# Define the model
input = tf.placeholder(tf.int32, shape=(None, 2))
output = zero_out(input)
# Export the model
model_path = "model"
model_version = 1
model_signature = signature_def_utils.build_signature_def(
inputs={
"input": utils.build_tensor_info(input),
},
outputs={
"output": utils.build_tensor_info(output),
},
method_name=signature_constants.PREDICT_METHOD_NAME)
export_path = os.path.join(
compat.as_bytes(model_path), compat.as_bytes(str(model_version)))
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
# Create session to run
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
output_data = sess.run(output, feed_dict={input: train_data})
print("Output data: {}".format(output_data))
builder = saved_model_builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
clear_devices=True,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
if __name__ == "__main__":
main()
|
inferpy/util/iterables.py | PGM-Lab/InferPy | 140 | 11188762 | <gh_stars>100-1000
def get_shape(x):
""" Get the shape of an element x. If it is an element with a shape attribute, return it. If it is a list with more than
one element, compute the shape by checking the len, and the shape of internal elements. In that case, the shape must
be consistent. Finally, in other case return () as shape.
Args:
x: The element to compute its shape
Raises:
class `ValueError`: list shape not consistent
Returns:
A tuple with the shape of `x`
"""
if isinstance(x, list) and len(x) > 0:
shapes = [get_shape(subx) for subx in x]
if any([s != shapes[0] for s in shapes[1:]]):
raise ValueError('Parameter dimension not consistent: {}'.format(x))
return (len(x), ) + shapes[0]
else:
if hasattr(x, '_shape_tuple'):
return x._shape_tuple() # method to return the shape as a tuple
elif hasattr(x, 'shape'):
return tuple(x.shape)
else:
return ()
def get_plate_size(variables, sample_dict):
# get the plate size by analyzing the sample_dict input
# check that all values in dict whose name is a datamodel RV has the same length (will be the plate size)
plate_shapes = [get_shape(v) for k, v in sample_dict.items()
if k in variables and variables[k].is_datamodel]
plate_sizes = [s[0] if len(s) > 0 else 1 for s in plate_shapes] # if the shape is (), it is just one element
if len(plate_sizes) == 0:
return 1
else:
plate_size = plate_sizes[0]
if any(plate_size != x for x in plate_sizes[1:]):
raise ValueError('The number of elements for each mapped variable must be the same.')
return plate_size
|
python/fate_arch/federation/transfer_variable/_cleaner.py | hubert-he/FATE | 3,787 | 11188789 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from collections import deque
from fate_arch.abc import GarbageCollectionABC
from fate_arch.common.log import getLogger
LOGGER = getLogger()
class IterationGC(GarbageCollectionABC):
def __init__(self, capacity=2):
self._ashcan: deque[typing.List[typing.Tuple[typing.Any, str, dict]]] = deque()
self._last_tag: typing.Optional[str] = None
self._capacity = capacity
self._enable = True
def add_gc_action(self, tag: str, obj, method, args_dict):
if self._last_tag == tag:
self._ashcan[-1].append((obj, method, args_dict))
else:
self._ashcan.append([(obj, method, args_dict)])
self._last_tag = tag
def disable(self):
self._enable = False
def set_capacity(self, capacity):
self._capacity = capacity
def gc(self):
if not self._enable:
return
if len(self._ashcan) <= self._capacity:
return
self._safe_gc_call(self._ashcan.popleft())
def clean(self):
while self._ashcan:
self._safe_gc_call(self._ashcan.pop())
@staticmethod
def _safe_gc_call(actions: typing.List[typing.Tuple[typing.Any, str, dict]]):
for obj, method, args_dict in actions:
try:
LOGGER.debug(f"[CLEAN]deleting {obj}, {method}, {args_dict}")
getattr(obj, method)(**args_dict)
except Exception as e:
LOGGER.debug(f"[CLEAN]this could be ignore {e}")
|
test/library/packages/ProtobufProtocolSupport/endToEnd/oneofs/write.py | jhh67/chapel | 1,602 | 11188850 | import oneofs_pb2
messageObj = oneofs_pb2.Foo()
messageObj.co = oneofs_pb2.color.green
messageObj.name = b"chapel";
messageObj.mfield.a = 67
messageObj.ifield = 45
file = open("out", "wb")
file.write(messageObj.SerializeToString())
file.close()
|
python-sdk/nuscenes/scripts/export_instance_videos.py | bjajoh/nuscenes-devkit | 1,284 | 11188859 | <filename>python-sdk/nuscenes/scripts/export_instance_videos.py
# nuScenes dev-kit.
# Code written by <NAME>, 2020.
"""
Generate videos of nuScenes object instances.
See https://github.com/EricWiener/nuscenes-instance-videos for more detailed instructions.
Usage: python3 generate_videos.py --dataroot <path to data> --version <version> -o <output directory>
Note: You first need to generate 2D annotations with export_2d_annotations_as_json.py.
"""
import argparse
import json
import os
import pathlib
from collections import defaultdict
from shutil import rmtree
from typing import List, Tuple
import cv2
import numpy as np
from PIL import Image
from tqdm import tqdm
def convert_annotation_list_to_dict(annotation_list: List[dict],
categories: List[str] = None,
visibilities: List[str] = None) -> defaultdict:
"""
Save the list of sample_annotations in a format suitable for instance videos.
When saving the list of annotations to a dictionary, special attention must be paid to the
correct keys to use.
For example, you will have bounding boxes with the same instance_token and sample_annotation_token
because there are multiple cameras on the car, so you can have the same object appearing across
multiple sensors. Each sensor's data is identified with a sample_data_token.
{'attribute_tokens': ['58aa28b1c2a54dc88e169808c07331e3'], 'bbox_corners': [1370.3079971217335, 446.66394956158524, 1600.0, 607.4567037983365], 'category_name': 'vehicle.car', 'filename': 'samples/CAM_FRONT/n008-2018-08-27-11-48-51-0400__CAM_FRONT__1535385095912404.jpg', 'instance_token': '0f8696c5e7284236b29a806d3d6f3513', 'next': '<KEY>', 'num_lidar_pts': 4, 'num_radar_pts': 2, 'prev': '8291db1bc2704230867275bad5f42297', 'sample_annotation_token': 'ee04de72a30e4517a366ddad89d64fef', 'sample_data_token': '60ade2dececb46c69b114ce4c8a0bd3e', 'visibility_token': '1'}
{'attribute_tokens': ['58aa28b1c2a54dc88e169808c07331e3'], 'bbox_corners': [0.0, 446.3944232196225, 387.13952090477727, 618.0310593208171], 'category_name': 'vehicle.car', 'filename': 'samples/CAM_FRONT_RIGHT/n008-2018-08-27-11-48-51-0400__CAM_FRONT_RIGHT__1535385095920482.jpg', 'instance_token': '0f8696c5e7284236b29a806d3d6f3513', 'next': '<KEY>', 'num_lidar_pts': 4, 'num_radar_pts': 2, 'prev': '8291db1bc2704230867275bad5f42297', 'sample_annotation_token': 'ee04de72a30e4517a366ddad89d64fef', 'sample_data_token': '92d49452e5804d0a9724ab4161a26147', 'visibility_token': '1'}
A combination of [instance_token][sample_data_token] can be used to uniquely identify
the bounding boxes. You can enumerate through [instance_token][x] to find all the different
views of a single bounding box.
:param annotation_list: A list of annotations.
:param categories: The list of categories to filter annotations by.
:param visibilities: The list of visibilities to filter annotations by.
:return: A nested dict of annotations indexed by [instance_token][sample_token][camera_name].
"""
# Default arguments.
if visibilities is None:
visibilities = ['', '1', '2', '3', '4']
# Convert the list of instance to a dictionary that uses the
# instance_token -> sample_annotation_token -> camera
# to look up the instance.
bbox_2d_annotations = defaultdict(lambda: defaultdict(dict))
num_dups = 0
for instance in annotation_list:
instance_token = instance['instance_token']
# 3. `sample` - An annotated snapshot of a scene at a particular timestamp.
# This is identified by `sample_annotation_token`.
# 4. `sample_data` - Data collected from a particular sensor.
# sample_data refers to the picture captured by a single sensor at a single timestamp.
# sample_annotation_token refers to a single bounding box, which might exist in multiple
# sample_data (across the different cameras)
sample_token = instance['sample_annotation_token']
category = instance['category_name']
visibility = instance['visibility_token']
camera_name = extract_camera_key_from_filename(instance['filename'])
# Append additional information.
instance['camera_name'] = camera_name
instance['bbox_area'] = calculate_bb_area(instance['bbox_corners'])
if (categories is not None and category not in categories) or visibility not in visibilities:
continue
if instance_token in bbox_2d_annotations and sample_token in bbox_2d_annotations[instance_token] \
and camera_name in bbox_2d_annotations[instance_token][sample_token]:
num_dups += 1
print('Duplicate instance {}, sample {}, and camera {}'.format(
instance_token, sample_token, camera_name))
bbox_2d_annotations[instance_token][sample_token][camera_name] = instance
assert num_dups == 0, 'Error: Number of duplicates (should be zero)!'
return bbox_2d_annotations
def extract_camera_key_from_filename(filename: str) -> str:
"""
Extract the camera name from the filename.
:param filename: the name of the file where the samples image is stored.
Ex: 'samples/CAM_BACK/n015-2018-10-02-10-50-40+0800__CAM_BACK__1538448750037525.jpg',
:return: The camera name.
"""
camera_name = filename.split('/')[1]
# Validate the camera name is valid.
camera_names = ['CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT',
'CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT']
assert (camera_name in camera_names), "Invalid camera name: {} from path: {}".format(
camera_name, filename)
return camera_name
def calculate_bb_area(bounding_box: np.ndarray) -> float:
"""
Calculates area of a 2D bounding box.
:param bounding_box: np.array of length 4 (x min, y min, x max, y max).
:return: The area.
"""
x_min, y_min, x_max, y_max = bounding_box
return (x_max - x_min) * (y_max - y_min)
def get_most_visible_camera_annotation(camera_data_dict: dict) -> dict:
"""
Get the most visibile camera's annotation.
:param camera_data_dict: Dictionary of form:
{
'CAM_BACK': {'attribute_tokens': ['<KEY>'],
'bbox_corners': [600.8315617945755,
426.38901275036744,
643.6756536789582,
476.66593163100237],
'category_name': 'vehicle.bus.rigid',
'filename': 'samples/CAM_BACK/n015-2018-10-02-10-50-40+0800__CAM_BACK__1538448750037525.jpg',
'instance_token': '<KEY>',
'next': 'ef90c2e525244b7d9eeb759837cf2277',
'num_lidar_pts': 0,
'num_radar_pts': 0,
'prev': '6628e81912584a72bd448a44931afb42',
'sample_annotation_token': '06b4886e79d2435c80bd23e7ac60c618',
'sample_data_token': '<KEY>',
'visibility_token': '4'},
'CAM_FRONT': ...
...
}
:return: The camera annotation with highest visibility.
"""
# Loop through all the camera views to find the best view of this instance
# Each of the cameras will have a corresponding bounding box and visibility.
# We want the largest bounding box and highest visibility.
best_visibility = ''
largest_area = -1
best_camera_token = None
for camera_token in camera_data_dict:
visibility = camera_data_dict[camera_token]['visibility_token']
bbox_area = camera_data_dict[camera_token]['bbox_area']
if visibility > best_visibility or (visibility == best_visibility and bbox_area > largest_area):
best_camera_token = camera_token
largest_area = bbox_area
best_visibility = visibility
if not best_camera_token:
print('Unable to find any good views for camera data dict: {}'.format(
camera_data_dict))
best_instance_data = camera_data_dict[best_camera_token]
return best_instance_data
def get_cropped_image_for_annotation(sample_data_annotation: dict,
dataroot: str,
output_size: Tuple[int, int]) -> np.ndarray:
"""
Crop the annotation of a given imgae.
:param sample_data_annotation: Dict of form:
```
{'attribute_tokens': ['<KEY>'],
'bbox_corners': [600.8315617945755,
426.38901275036744,
643.6756536789582,
476.66593163100237],
'category_name': 'vehicle.bus.rigid',
'filename': 'samples/CAM_BACK/n015-2018-10-02-10-50-40+0800__CAM_BACK__1538448750037525.jpg',
'instance_token': '<KEY>',
'next': 'ef90c2e525244b7d9eeb759837cf2277',
'num_lidar_pts': 0,
'num_radar_pts': 0,
'prev': '<KEY>',
'sample_annotation_token': '06b4886e79d2435c80bd23e7ac60c618',
'sample_data_token': '<KEY>',
'visibility_token': '4'},
```
:param dataroot: The nuScenes dataroot.
:param output_size: A tuple for the image size.
:return: The cropped image.
"""
data_path = os.path.join(dataroot,
sample_data_annotation['filename'])
bbox = sample_data_annotation['bbox_corners']
im = Image.open(data_path)
im1 = im.crop(bbox)
im1 = im1.resize(output_size)
np_img = np.asarray(im1)
return np_img
def sort_sample_annotations_chronologically(instance_dict: dict) -> List[str]:
"""
Sort the sample_annotations chronologically.
:param instance_dict: Taken by indexing bbox_2d_annotations[instance_token]
:return: A list of chronologically sorted annotations.
Uses [sample_token][sample_annotation_token]['best_annotation'] to find the correct sequence.
"""
# Find the first sample token
first_sample_token = None
for sample_token in instance_dict:
if instance_dict[sample_token]['best_annotation']['prev'] == '':
first_sample_token = sample_token
break
if first_sample_token is None:
print("Unable to find a start token")
# Now iterate and find a list of the sample_tokens in order
sequential_sample_tokens = [first_sample_token]
while True:
try:
next_sample_token = instance_dict[sequential_sample_tokens[-1]]['best_annotation']['next']
except:
print("Unrecognized sample annotaton token: {}", sequential_sample_tokens)
break
if next_sample_token == '':
break
sequential_sample_tokens.append(next_sample_token)
return sequential_sample_tokens
def remove_bad_samples(instance_annotation: dict,
minimum_bb_area: float,
minimum_visibility: str,
image_area: int = 1600 * 900) -> List:
"""
Removes bad samples from an instance annotation's sample sequence
:param instance_annotation: an instance annotation
:param minimum_bb_area: The minimum fraction of a frame a bounding box take up to be used (0, 1)
:param minimum_visibility: The minimum visibility a frame is allowed to haev ('', '1', '2', '3', '4')
:param image_area: The area of an image frame. Defaults to 1600*900.
:return: A cleaned list of sample annotation tokens that meet requirements
"""
sample_token_sequence = instance_annotation['sample_annotation_sequence']
cleaned = []
for sample_token in sample_token_sequence:
area = instance_annotation[sample_token]['best_annotation']['bbox_area']
visibility = instance_annotation[sample_token]['best_annotation']['visibility_token']
if area / image_area > minimum_bb_area and visibility >= minimum_visibility:
cleaned.append(sample_token)
return cleaned
def main(version: str,
dataroot: str,
output: str,
object_categories: List[str],
fps: int,
output_size: Tuple[int, int],
minimum_frames: int,
minimum_bb_area: float,
visibility: str,
codec: str) -> None:
"""
Generates video sequences of nuScenes object instances over time.
Expects the data to be organized as:
```
"$dataroot"/
samples - Sensor data for keyframes.
sweeps - Sensor data for intermediate frames.
maps - Folder for all map files: rasterized .png images and vectorized .json files.
v1.0-* - JSON tables that include all the meta data and annotations.
Each split (trainval, test, mini) is provided in a separate folder.
Note that image_annotations.json should be inside this directory.
```
:param version: The nuScenes data version.
:param dataroot: The path to the data root directory.
:param output: The path to the output video directory.
:param object_categories: The categories to extract videos for.
:param fps: Frames per second to use for the video.
:param output_size: The output dimension to resize every cropped bounding box to. Defaults to (112, 112).
:param minimum_frames: The minimum number of frames an instance must have.
:param minimum_bb_area: The minimum fraction of a frame a bounding box take up to be used (0, 1).
:param visibility: The minimum visibility a frame is allowed to haev ('', '1', '2', '3', '4').
:param codec: Which codec to use to generate the video, e.g. MJPG or vp09.
Some data annotation platforms require vp09.
"""
print('=' * 20)
print('Generating video sequences:')
print('\t* Size: {}'.format(output_size))
print('\t* FPS: {}'.format(fps))
print('\t* Minimum frame count: {}'.format(minimum_frames))
print('\t* Minimum BB area: {}'.format(minimum_bb_area))
print('\t* Minimum visibility: {}'.format(visibility))
# ================================ Load image annotations. ========================================
image_annotations_file = os.path.join(dataroot, version, 'image_annotations.json')
if not os.path.exists(image_annotations_file):
raise Exception("Error: Missing image_annotations.json. "
"Please run the export_2d_annotations_as_json.py script.")
with open(image_annotations_file) as f:
# A list of dictionaries
bbox_2d_annotations_list = json.load(f)
# These can be indexed with [instance_token][sample_annotation_token][camera_name] -> data about the annotation
# You can use the sample_annotation_token with the nuScenes helper in order to get the sample tokens.
bbox_2d_annotations = convert_annotation_list_to_dict(
bbox_2d_annotations_list, categories=object_categories)
print('Number of unique vehicle instances: {}'.format(len(bbox_2d_annotations)))
# ==============================================================================================
# ===== For each instance and each sample annotation, find the best camera sensor to use. ======
# Get sorted sample annotation tokens per instance per camera.
for instance_token in bbox_2d_annotations:
for sample_annotation_token in bbox_2d_annotations[instance_token]:
bbox_2d_annotations[instance_token][sample_annotation_token][
'best_annotation'] = get_most_visible_camera_annotation(
bbox_2d_annotations[instance_token][sample_annotation_token])
# ==============================================================================================
# ====== For each instance, find the correct sequence of sample annotations. ====================
# Get sorted sample annotation tokens per instance per camera.
for instance_token in bbox_2d_annotations:
bbox_2d_annotations[instance_token]['sample_annotation_sequence'] = sort_sample_annotations_chronologically(
bbox_2d_annotations[instance_token])
# ==============================================================================================
# ====== Remove samples from sequence that don't meet requirements. ====================
for instance_token in bbox_2d_annotations:
bbox_2d_annotations[instance_token]['sample_annotation_sequence'] = remove_bad_samples(
bbox_2d_annotations[instance_token], minimum_bb_area, visibility)
# ==============================================================================================
# ====== Create videos for every instance. ======================================================
# Remove the directory if it already exists and create new one.
rmtree(output, ignore_errors=True)
pathlib.Path(output).mkdir(parents=True, exist_ok=True)
print("Creating videos and storing in '{}'...".format(output))
total_videos = 0
for instance_token in tqdm(bbox_2d_annotations):
sample_annotation_tokens = bbox_2d_annotations[instance_token]['sample_annotation_sequence']
if len(sample_annotation_tokens) < minimum_frames:
continue
# Define codec and file extension.
file_ext = 'mp4' if codec == 'vp09' else 'avi'
video_path = os.path.join(
output, '{}.{}'.format(instance_token, file_ext))
out = cv2.VideoWriter(
video_path, cv2.VideoWriter_fourcc(*codec), fps, output_size)
for sample_annotation_token in sample_annotation_tokens:
best_annotation = bbox_2d_annotations[instance_token][sample_annotation_token]['best_annotation']
cropped_img = get_cropped_image_for_annotation(
best_annotation, dataroot, output_size)
# Convert from PIL's RGB to cv2 BGR
out.write(cropped_img[:, :, ::-1])
out.release()
total_videos += 1
print('Created {} videos ({} did not meet requirements).'.format(
total_videos, len(bbox_2d_annotations) - total_videos, minimum_frames))
# ==============================================================================================
print('=' * 20)
if __name__ == "__main__":
# Construct the argument parser and parse the arguments.
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataroot", type=str, default='/data/sets/nuscenes',
help="The path to the root directory where the data is stored")
ap.add_argument("-v", "--version", type=str, default='v1.0-trainval',
help="The nuScenes data version")
ap.add_argument("-o", "--output", type=str, default="videos",
help="The output video directory")
ap.add_argument("-x", "--codec", type=str, default='MJPG',
help="Which codec to use to generate the video, e.g. MJPG or vp09. ")
ap.add_argument("-f", "--fps", type=int, default=2,
help="Frames per second for output video (use 2 to match speed of original data)")
ap.add_argument("-m", "--minimum_frames", type=int, default=9,
help="The minimum number of frames an instance must have")
ap.add_argument("-p", "--minimum_bb_area", type=float, default=0.01,
help="The minimum fraction of a frame a bounding box take up to be used (0, 1)")
ap.add_argument("--visibility", type=str, default='2',
help="The minimum visibility a frame is allowed to have ('', '1', '2', '3', '4')")
ap.add_argument("-s", "--size", type=int, default=(112, 112), nargs=2,
help="Size of the output video")
# Excludes bicycle and motorcycle by default.
vehicle_categories = ['vehicle.bus.bendy', 'vehicle.bus.rigid',
'vehicle.car', 'vehicle.construction', 'vehicle.trailer', 'vehicle.truck']
ap.add_argument("-c", "--categories", nargs='+',
help="The categories to extract videos for", required=False, default=vehicle_categories)
args = vars(ap.parse_args())
main(args['version'], args['dataroot'], args['output'], args['categories'],
args['fps'], tuple(args['size']), args['minimum_frames'], args['minimum_bb_area'], args["visibility"],
args['codec'])
|
tests/samples/method_and_prefix.py | spamegg1/snoop | 751 | 11188890 | <filename>tests/samples/method_and_prefix.py
from snoop.configuration import Config
snoop = Config(prefix='ZZZ').snoop
class Baz(object):
def __init__(self):
self.x = 2
@snoop(watch='self.x')
def square(self):
foo = 7
self.x **= 2
return self
def main():
baz = Baz()
baz.square()
|
pystiche/enc/models/alexnet.py | dooglewoogle/pystiche | 129 | 11189028 | <reponame>dooglewoogle/pystiche
from typing import Any, Dict, List, Tuple
from torch import nn
from torchvision.models import alexnet
from torchvision.models.alexnet import model_urls as TORCH_MODEL_URLS
from .utils import ModelMultiLayerEncoder, select_url
__all__ = ["AlexNetMultiLayerEncoder", "alexnet_multi_layer_encoder"]
MODEL_URLS = {"torch": TORCH_MODEL_URLS["alexnet"]}
def _make_description() -> str:
return r"""Multi-layer encoder based on :class:`~torchvision.models.AlexNet`.
The :class:`~torchvision.models.AlexNet` architecture was introduced by
Krizhevsky, Sutskever, and Hinton in :cite:`KSH2012`.
"""
def _make_docstring(body: str) -> str:
return f"{_make_description()}\n{body}"
class AlexNetMultiLayerEncoder(ModelMultiLayerEncoder):
__doc__ = _make_docstring(
r""" Args:
pretrained: If ``True``, loads builtin weights. Defaults to ``True``.
framework: Name of the framework that was used to train the builtin weights.
Defaults to ``"torch"``.
kwargs: Optional arguments of :class:`ModelMultiLayerEncoder` .
Raises:
RuntimeError: If ``pretrained`` and no weights are available for the
``framework``.
"""
)
def state_dict_url(self, framework: str) -> str:
return select_url(MODEL_URLS, framework)
def collect_modules(
self, inplace: bool
) -> Tuple[List[Tuple[str, nn.Module]], Dict[str, str]]:
model = alexnet(pretrained=False)
modules = []
state_dict_key_map = {}
block = 1
for idx, module in model.features.named_children():
if isinstance(module, nn.Conv2d):
name = f"conv{block}"
elif isinstance(module, nn.ReLU):
module = nn.ReLU(inplace=inplace)
name = f"relu{block}"
if block in (3, 4):
# in the third and forth block the ReLU layer marks the end of the
# block
block += 1
else: # isinstance(module, nn.MaxPool2d):
name = f"pool{block}"
# each pooling layer marks the end of the current block
block += 1
modules.append((name, module))
state_dict_key_map.update(
{
f"features.{idx}.{key}": f"{name}.{key}"
for key in module.state_dict().keys()
}
)
return modules, state_dict_key_map
def alexnet_multi_layer_encoder(**kwargs: Any) -> AlexNetMultiLayerEncoder:
return AlexNetMultiLayerEncoder(**kwargs)
alexnet_multi_layer_encoder.__doc__ = _make_docstring(
r""" Args:
kwargs: Optional arguments of :class:`AlexNetMultiLayerEncoder` .
"""
)
|
tests/unit/nn/test_rnn.py | ethan-asapp/flambe | 148 | 11189031 | import pytest
from flambe.nn import RNNEncoder
import torch
import mock
@pytest.mark.parametrize("rnn_type", ['LSTM', 'random', ''])
def test_invalid_type(rnn_type):
with pytest.raises(ValueError):
RNNEncoder(
input_size=10,
hidden_size=20,
rnn_type=rnn_type
)
def test_sru_kwargs():
rnn = RNNEncoder(
input_size=10,
hidden_size=20,
rnn_type='sru',
use_tanh=True
)
for i in rnn.rnn.rnn_lst:
assert i.activation == 'tanh'
def test_invalid_sru_kwargs():
with pytest.raises(ValueError):
_ = RNNEncoder(
input_size=10,
hidden_size=20,
rnn_type='sru',
use_tanh=True,
some_invalid_param=123
)
@pytest.mark.parametrize("rnn_type", ['lstm', 'gru', 'sru'])
def test_forward_pass(rnn_type):
input_size = 300
output_size = 10
seq_len = 20
batch_len = 32
rnn = RNNEncoder(
input_size=input_size,
hidden_size=output_size,
n_layers=4,
rnn_type=rnn_type)
input_t = torch.rand(batch_len, seq_len, input_size)
output, state = rnn(input_t)
assert output.shape == torch.Size((batch_len, seq_len, output_size))
@pytest.mark.parametrize("rnn_type", ['lstm', 'gru', 'sru'])
def test_transpose_on_forward_pass(rnn_type):
input_size = 300
output_size = 10
rnn = RNNEncoder(
input_size=input_size,
hidden_size=output_size,
n_layers=4,
rnn_type=rnn_type)
input_t = torch.rand(10, 10, input_size)
input_t.transpose = mock.Mock(side_effect=input_t.transpose)
output, state = rnn(input_t)
input_t.transpose.assert_called()
input_t.transpose.assert_called_with(0, 1)
|
xam/preprocessing/binning/base.py | topolphukhanh/xam | 357 | 11189037 | <reponame>topolphukhanh/xam
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.utils import check_array
class BaseBinner(BaseEstimator, TransformerMixin):
@property
def cut_points(self):
raise NotImplementedError
def transform(self, X, y=None):
"""Binarize X based on the fitted cut points."""
# scikit-learn checks
X = check_array(X)
if self.cut_points is None:
raise NotFittedError('Estimator not fitted, call `fit` before exploiting the model.')
if X.shape[1] != len(self.cut_points):
raise ValueError("Provided array's dimensions do not match with the ones from the "
"array `fit` was called on.")
binned = np.array([
np.digitize(x, self.cut_points[i])
if len(self.cut_points[i]) > 0
else np.zeros(x.shape)
for i, x in enumerate(X.T)
]).T
return binned
class BaseSupervisedBinner(BaseBinner):
def fit(X, y, **fit_params):
raise NotImplementedError
class BaseUnsupervisedBinner(BaseBinner):
def fit(X, y=None, **fit_params):
raise NotImplementedError
|
tests/cli/agent/build/build_test.py | bbhunter/ostorlab | 113 | 11189062 | <reponame>bbhunter/ostorlab
"""Tests for CLI agent build command."""
from pathlib import Path
import docker
import pytest
from click import testing
from ostorlab.cli import rootcli
def testAgentBuildCLI_whenRequiredOptionFileIsMissing_showMessage():
"""Test ostorlab agent build CLI command without the required file option. Should show help message, and confirm
the --file option is missing.
"""
runner = testing.CliRunner()
result = runner.invoke(rootcli.rootcli, ['agent', 'build'])
assert 'Usage: rootcli agent build [OPTIONS]' in result.output
assert 'Try \'rootcli agent build --help\' for help.' in result.output
assert 'Error: Missing option \'--file\' / \'-f\'.' in result.output
def _is_docker_image_present(image: str):
docker_sdk_client = docker.from_env()
try:
docker_sdk_client.images.get(image)
return True
except docker.errors.ImageNotFound:
return False
def testAgentBuildCLI_whenParentBuildRootPath_failShowErrorMessage():
"""Test ostorlab agent build CLI command : Case where the command is valid. The agent container should be built.
"""
dummy_def_yaml_file_path = Path(__file__).parent / 'assets/illegal_build_root_dummydef.yaml'
runner = testing.CliRunner()
result = runner.invoke(rootcli.rootcli, [
'agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'
])
assert 'ERROR: Invalid docker build path' in result.output
@pytest.mark.docker
@pytest.mark.parametrize('image_cleanup', ['dummy'], indirect=True)
def testAgentBuildCLI_whenCommandIsValid_buildCompletedAndNoRaiseImageNotFoundExcep(image_cleanup):
"""Test ostorlab agent build CLI command : Case where the command is valid. The agent container should be built.
"""
del image_cleanup
dummy_def_yaml_file_path = Path(__file__).parent / 'assets/dummydef.yaml'
runner = testing.CliRunner()
_ = runner.invoke(rootcli.rootcli, [
'agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'
])
assert _is_docker_image_present('agent_ostorlab_dummy_agent:v1.0.0') is True
@pytest.mark.docker
@pytest.mark.parametrize('image_cleanup', ['dummy'], indirect=True)
def testAgentBuildCLI_whenCommandIsValidAndImageAlreadyExists_showsMessageAndExists(image_cleanup):
"""Test ostorlab agent build CLI command : Case where the command is valid. The agent container should be built.
"""
del image_cleanup
dummy_def_yaml_file_path = Path(__file__).parent / 'assets/dummydef.yaml'
runner = testing.CliRunner()
_ = runner.invoke(rootcli.rootcli, ['agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
result = runner.invoke(rootcli.rootcli, ['agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
assert 'already exist' in result.output
assert result.exit_code == 0
@pytest.mark.docker
@pytest.mark.parametrize('image_cleanup', ['dummy'], indirect=True)
def testAgentBuildCLI_whenImageAlreadyExistsAndForceFlagPassed_buildCompletedAndNoRaiseImageNotFoundExcep(
image_cleanup):
"""Test ostorlab agent build CLI command : Case where the command is valid, the image exists and force flag is
passed. The agent container should be built.
"""
del image_cleanup
dummy_def_yaml_file_path = Path(__file__).parent / 'assets/dummydef.yaml'
runner = testing.CliRunner()
_ = runner.invoke(rootcli.rootcli, ['agent',
'build',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
result = runner.invoke(rootcli.rootcli, ['agent',
'build',
'--force',
f'--file={dummy_def_yaml_file_path}',
'--organization=ostorlab'])
assert 'already exist' not in result.output
assert result.exit_code == 0
|
Polar/polar_effectscatter.py | pyecharts/pyecharts_gallery | 759 | 11189064 | <reponame>pyecharts/pyecharts_gallery
import random
from pyecharts import options as opts
from pyecharts.charts import Polar
data = [(i, random.randint(1, 100)) for i in range(10)]
c = (
Polar()
.add(
"",
data,
type_="effectScatter",
effect_opts=opts.EffectOpts(scale=10, period=5),
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(title_opts=opts.TitleOpts(title="Polar-EffectScatter"))
.render("polar_effectscatter.html")
)
|
recipes/Python/138889_extract_email_addresses/recipe-138889.py | tdiprima/code | 2,023 | 11189081 | def grab_email(files = []):
# if passed a list of text files, will return a list of
# email addresses found in the files, matched according to
# basic address conventions. Note: supports most possible
# names, but not all valid ones.
found = []
if files != None:
mailsrch = re.compile(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}')
for file in files:
for line in open(file,'r'):
found.extend(mailsrch.findall(line))
# remove duplicate elements
# borrowed from <NAME>' algorithm on ASPN Cookbook
u = {}
for item in found:
u[item] = 1
# return list of unique email addresses
return u.keys()
|
examples/butteryfly_network.py | eric-downes/mesh-networking | 368 | 11189141 | # -*- coding: utf-8 -*-
# <NAME> 2016/10/08
# Butterfly Network
#
# Simulate a butterfly network where addresses can be used to determine routing paths.
# MIT 6.042J Mathematics for Computer Science: Lecture 9
# https://www.youtube.com/watch?v=bTyxpoi2dmM
import math
import time
from mesh.node import Node
from mesh.links import VirtualLink
from mesh.programs import Cache, BaseProgram
class ButterflySwitch(BaseProgram):
"""A switch that routes a packet coming in on any interface to all the other interfaces."""
def recv(self, packet, interface):
other_ifaces = set(self.node.interfaces) - {interface}
if packet and other_ifaces:
self.node.log("SWITCH ", (str(interface)+" >>>> <"+','.join(i.name for i in other_ifaces)+">").ljust(30), packet.decode())
self.node.send(packet, interfaces=other_ifaces)
def ask(type, question, fallback=None):
value = input(question)
if type == bool:
if fallback:
return not value[:1].lower() == "n"
else:
return value[:1].lower() == "y"
try:
return type(value)
except Exception:
return fallback
def print_grid(nodes):
for row in NODES:
output = ''
if row and row[-1].program.received:
output = ' : {}'.format(row[-1].program.received.pop())
print(' --- '.join(str(n).center(10) for n in row) + output)
if __name__ == "__main__":
num_rows = ask(int, "How many input nodes do you want? [8]:", 8)
num_cols = 2 + int(math.log(num_rows))
print('Creating Nodes ({}x{})...'.format(num_rows, num_cols))
IN_ADDRESSES = ['in:0b{0:b}'.format(a) for a in range(0, num_rows)]
OUT_ADDRESSES = ['out:0b{0:b}'.format(a) for a in range(0, num_rows)]
NODES = []
# make several rows of input nodes to output nodes
for row_idx in range(num_rows):
row = []
for col_idx in range(num_cols):
# add input node
if col_idx == 0:
addr = IN_ADDRESSES[row_idx]
Program = None
# add output node
elif col_idx == num_cols - 1:
addr = OUT_ADDRESSES[row_idx]
Program = Cache
# out middle node
else:
addr = 'row:{};col{}'.format(row_idx, col_idx)
Program = ButterflySwitch
row.append(Node(name=addr, mac_addr=addr, Program=Program))
NODES.append(row)
print('Creating Butterfly Links...')
# make the first links going directly across each row
for row_idx in range(num_rows):
for col_idx in range(num_cols - 1):
bridge = VirtualLink(name='{}<{}>{}'.format(col_idx, row_idx, col_idx + 1))
NODES[row_idx][col_idx].interfaces.append(bridge)
# node directly to the right
NODES[row_idx][col_idx + 1].interfaces.append(bridge)
bridge.start()
# TODO: finish diagonal linking algorithm
# give each node a second diagonal link, starting from right to left
for col_idx in reversed(range(1, num_cols)):
for row_idx in range(num_rows):
diagonal = VirtualLink(name='{}<{}>{}'.format(col_idx, row_idx, col_idx + 1))
NODES[row_idx][col_idx].interfaces.append(diagonal)
# node to the left and (up/down) to a different butterfly set
to_row = 1
NODES[to_row][col_idx - 1].interfaces.append(diagonal)
diagonal.start()
[n.start() for row in NODES for n in row]
print_grid(NODES)
print('Input the number of a node, followed by text to send')
print(' e.g. [$]: 0:hello world!')
dont_exit = True
try:
while dont_exit:
try:
in_id, in_text = str(input("#:text ")).split(':', 1)
except ValueError:
print('Input must be #:text')
continue
in_node = NODES[int(in_id)][0]
in_node.send(bytes(in_text, 'UTF-8'))
time.sleep(0.2)
# import ipdb; ipdb.set_trace()
print_grid(NODES)
except (KeyboardInterrupt, EOFError):
raise SystemExit(0)
|
ros/geometry/tf_conversions/src/tf_conversions/posemath.py | numberen/apollo-platform | 742 | 11189155 | # Copyright (c) 2010, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from geometry_msgs.msg import Pose, Point, Quaternion
from tf import transformations
import tf
import rospy
import numpy
from PyKDL import *
def fromTf(tf):
"""
:param tf: :class:`tf.Transformer` transform
:type tf: tuple (translation, quaternion)
:return: New :class:`PyKDL.Frame` object
Convert a pose returned by :meth:`tf.Transformer.lookupTransform` to a :class:`PyKDL.Frame`.
.. doctest::
>>> import rospy
>>> import tf
>>> import geometry_msgs.msg
>>> t = tf.Transformer(True, rospy.Duration(10.0))
>>> m = geometry_msgs.msg.TransformStamped()
>>> m.header.frame_id = 'THISFRAME'
>>> m.child_frame_id = 'CHILD'
>>> m.transform.translation.x = 668.5
>>> m.transform.rotation.w = 1.0
>>> t.setTransform(m)
>>> t.lookupTransform('THISFRAME', 'CHILD', rospy.Time(0))
((668.5, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))
>>> import tf_conversions.posemath as pm
>>> p = pm.fromTf(t.lookupTransform('THISFRAME', 'CHILD', rospy.Time(0)))
>>> print pm.toMsg(p * p)
position:
x: 1337.0
y: 0.0
z: 0.0
orientation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
"""
position, quaternion = tf
x, y, z = position
Qx, Qy, Qz, Qw = quaternion
return Frame(Rotation.Quaternion(Qx, Qy, Qz, Qw),
Vector(x, y, z))
def toTf(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a tuple (position, quaternion) for the pose.
"""
return ((f.p[0], f.p[1], f.p[2]), f.M.GetQuaternion())
# to and from pose message
def fromMsg(p):
"""
:param p: input pose
:type p: :class:`geometry_msgs.msg.Pose`
:return: New :class:`PyKDL.Frame` object
Convert a pose represented as a ROS Pose message to a :class:`PyKDL.Frame`.
"""
return Frame(Rotation.Quaternion(p.orientation.x,
p.orientation.y,
p.orientation.z,
p.orientation.w),
Vector(p.position.x, p.position.y, p.position.z))
def toMsg(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a ROS Pose message for the Frame f.
"""
p = Pose()
p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w = f.M.GetQuaternion()
p.position.x = f.p[0]
p.position.y = f.p[1]
p.position.z = f.p[2]
return p
# to and from matrix
def fromMatrix(m):
"""
:param m: input 4x4 matrix
:type m: :func:`numpy.array`
:return: New :class:`PyKDL.Frame` object
Convert a pose represented as a 4x4 numpy array to a :class:`PyKDL.Frame`.
"""
return Frame(Rotation(m[0,0], m[0,1], m[0,2],
m[1,0], m[1,1], m[1,2],
m[2,0], m[2,1], m[2,2]),
Vector(m[0,3], m[1, 3], m[2, 3]))
def toMatrix(f):
"""
:param f: input pose
:type f: :class:`PyKDL.Frame`
Return a numpy 4x4 array for the Frame F.
"""
return numpy.array([[f.M[0,0], f.M[0,1], f.M[0,2], f.p[0]],
[f.M[1,0], f.M[1,1], f.M[1,2], f.p[1]],
[f.M[2,0], f.M[2,1], f.M[2,2], f.p[2]],
[0,0,0,1]])
# from camera parameters
def fromCameraParams(cv, rvec, tvec):
"""
:param cv: OpenCV module
:param rvec: A Rodrigues rotation vector - see :func:`Rodrigues2`
:type rvec: 3x1 :class:`CvMat`
:param tvec: A translation vector
:type tvec: 3x1 :class:`CvMat`
:return: New :class:`PyKDL.Frame` object
For use with :func:`FindExtrinsicCameraParams2`::
import cv
import tf_conversions.posemath as pm
...
rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
tvec = cv.CreateMat(3, 1, cv.CV_32FC1)
cv.FindExtrinsicCameraParams2(model, corners, intrinsic_matrix, kc, rvec, tvec)
pose = pm.fromCameraParams(cv, rvec, tvec)
"""
m = numpy.array([ [ 0, 0, 0, tvec[0,0] ],
[ 0, 0, 0, tvec[1,0] ],
[ 0, 0, 0, tvec[2,0] ],
[ 0, 0, 0, 1.0 ] ], dtype = numpy.float32)
cv.Rodrigues2(rvec, m[:3,:3])
return fromMatrix(m)
|
utils/util.py | xyupeng/ContrastiveCrop | 148 | 11189204 | import math
import numpy as np
import torch
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TrackMeter(object):
"""Compute and store values"""
def __init__(self):
self.reset()
def reset(self):
self.data = []
self.sum = 0
self.avg = 0
self.max_val = float('-inf')
self.max_idx = -1
def update(self, val, idx=None):
self.data.append(val)
self.sum += val
self.avg = self.sum / len(self.data)
if val > self.max_val:
self.max_val = val
self.max_idx = idx if idx else len(self.data)
def last(self, k):
assert 0 < k <= len(self.data)
return sum(self.data[-k:]) / k
def set_seed(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def update_ema(model, model_ema, m=0.999):
for param, param_ema in zip(model.parameters(), model_ema.parameters()):
param_ema.data = param_ema.data * m + param.data * (1. - m)
# BN running_mean and running_var are buffers
for buf, buf_ema in zip(model.buffers(), model_ema.buffers()):
buf_ema.data = buf.data # buf_ema = buf is wrong. should not share memory
def interleave(x, batch_size):
# x.shape[0] == batch_size * num_batches
s = list(x.shape)
return x.reshape([-1, batch_size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, batch_size):
s = list(x.shape)
return x.reshape([batch_size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def count_params(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def _get_lr(cfg, step):
lr = cfg.lr
if cfg.type == 'Cosine': # Cosine Anneal
start_step = cfg.get('start_step', 1)
eta_min = lr * cfg.decay_rate
lr = eta_min + (lr - eta_min) * (1 + math.cos(math.pi * (step - start_step) / cfg.steps)) / 2
elif cfg.type == 'MultiStep': # MultiStep
num_steps = np.sum(step > np.asarray(cfg.decay_steps))
lr = lr * (cfg.decay_rate ** num_steps)
else:
raise NotImplementedError(cfg.type)
return lr
def adjust_learning_rate(cfg, optimizer, step, batch_idx=0, num_batches=100):
start_step = cfg.get('start_step', 1)
if step < cfg.get('warmup_steps', 0) + start_step:
warmup_to = _get_lr(cfg, cfg.warmup_steps + 1)
p = (step - start_step + batch_idx / num_batches) / cfg.warmup_steps
lr = cfg.warmup_from + p * (warmup_to - cfg.warmup_from)
else:
lr = _get_lr(cfg, step)
# update optimizer lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_lr_simsiam(cfg, optimizer, step):
init_lr = cfg.lr
lr = _get_lr(cfg, step)
for param_group in optimizer.param_groups:
if 'fix_lr' in param_group and param_group['fix_lr']:
param_group['lr'] = init_lr
else:
param_group['lr'] = lr
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
# millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
# if millis > 0 and i <= 2:
# f += str(millis) + 'ms'
# i += 1
if f == '':
f = '0ms'
return f
|
django/contrib/gis/forms/__init__.py | pomarec/django | 285 | 11189217 | <filename>django/contrib/gis/forms/__init__.py
from django.forms import *
from .fields import (GeometryField, GeometryCollectionField, PointField,
MultiPointField, LineStringField, MultiLineStringField, PolygonField,
MultiPolygonField)
from .widgets import BaseGeometryWidget, OpenLayersWidget, OSMWidget
|
alipay/aop/api/domain/InstCashPoolAccountMappingVO.py | antopen/alipay-sdk-python-all | 213 | 11189291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InstAccountDTO import InstAccountDTO
from alipay.aop.api.domain.InstAccountDTO import InstAccountDTO
class InstCashPoolAccountMappingVO(object):
def __init__(self):
self._cash_pool_id = None
self._inst_account = None
self._main_account = None
self._operator = None
self._parent_inst_account = None
@property
def cash_pool_id(self):
return self._cash_pool_id
@cash_pool_id.setter
def cash_pool_id(self, value):
self._cash_pool_id = value
@property
def inst_account(self):
return self._inst_account
@inst_account.setter
def inst_account(self, value):
if isinstance(value, InstAccountDTO):
self._inst_account = value
else:
self._inst_account = InstAccountDTO.from_alipay_dict(value)
@property
def main_account(self):
return self._main_account
@main_account.setter
def main_account(self, value):
self._main_account = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
@property
def parent_inst_account(self):
return self._parent_inst_account
@parent_inst_account.setter
def parent_inst_account(self, value):
if isinstance(value, InstAccountDTO):
self._parent_inst_account = value
else:
self._parent_inst_account = InstAccountDTO.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.cash_pool_id:
if hasattr(self.cash_pool_id, 'to_alipay_dict'):
params['cash_pool_id'] = self.cash_pool_id.to_alipay_dict()
else:
params['cash_pool_id'] = self.cash_pool_id
if self.inst_account:
if hasattr(self.inst_account, 'to_alipay_dict'):
params['inst_account'] = self.inst_account.to_alipay_dict()
else:
params['inst_account'] = self.inst_account
if self.main_account:
if hasattr(self.main_account, 'to_alipay_dict'):
params['main_account'] = self.main_account.to_alipay_dict()
else:
params['main_account'] = self.main_account
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
if self.parent_inst_account:
if hasattr(self.parent_inst_account, 'to_alipay_dict'):
params['parent_inst_account'] = self.parent_inst_account.to_alipay_dict()
else:
params['parent_inst_account'] = self.parent_inst_account
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InstCashPoolAccountMappingVO()
if 'cash_pool_id' in d:
o.cash_pool_id = d['cash_pool_id']
if 'inst_account' in d:
o.inst_account = d['inst_account']
if 'main_account' in d:
o.main_account = d['main_account']
if 'operator' in d:
o.operator = d['operator']
if 'parent_inst_account' in d:
o.parent_inst_account = d['parent_inst_account']
return o
|
modeling/fuzzware_modeling/model_detection.py | felkal/fuzzware | 106 | 11189297 | import angr
import claripy
from itertools import chain
from .angr_utils import is_ast_mmio_address, contains_var, all_states, has_conditional_statements, state_vars_out_of_scope, state_contains_tracked_mmio_path_constraints, state_variables_involved_in_loop, in_scope_register_values
MAX_SET_MODEL_VAL_NUM = 16
# ======= Passthrough Model =======
def check_is_passthrough_model(state, mmio_constrains_path, returned, vars_dead):
""" Compute Passthrough model
Check whether a state represents a passthrough access
"""
if mmio_constrains_path:
print("[PASSTHROUGH] [-] Path is constrained")
return False
if not vars_dead:
print("[PASSTHROUGH] [-] Vars not dead")
return False
if state.liveness.tracked_vars:
print("[PASSTHROUGH] [*] Checking vars in path")
print("[PASSTHROUGH] [*] Vars: {}".format(state.liveness.tracked_vars))
mmio_write_actions = [ev for ev in state.history.actions
if isinstance(ev, angr.state_plugins.sim_action.SimActionData)
and ev.action == "write" and is_ast_mmio_address(state, ev.addr)]
all_vars_written = True
for var in state.liveness.tracked_vars:
all_vars_written = all_vars_written and any([contains_var(action.data, var) for action in mmio_write_actions])
if all_vars_written:
print("[PASSTHROUGH] [+] All MMIO vars written to mmio location")
return True
print("[PASSTHROUGH] [-] Default case")
return False
# ======= Constant Model =======
def check_is_constant_model(states):
""" Compute Constant model prerequesites
Check whether an MMIO output can be set to a constant value.
Two situations can lead there:
1. MMIO is used as a guarding status value for a busy while(MMIO_VAL){} loop
2. MMIO value is not used at all and no conditional statements are present in the executed code until function return
"""
if any([not state_vars_out_of_scope(state) for state in states]):
print("[CONST] [-] vars not dead")
return False
else:
paths_constrained = [state_contains_tracked_mmio_path_constraints(state) for state in states]
if not all(paths_constrained):
if any(paths_constrained):
print("[CONST] [-] Vars are dead and some some paths are constrained, but not all. We might be shadowed")
return False
known_unconditional_bbls = set()
for state in states:
if has_conditional_statements(state, known_unconditional_bbls):
print("[CONST] [-] Vars are dead but path is not constrained by MMIO for all states and we found conditionals. We might be shadowed")
return False
# There are no conditional statements, we are not shadowed and the variable is indeed not used
print("[CONST] [+] Variable is not used and no conditional statements may be shadowing us")
return True
print("[CONST] got {} states, #tracked_mmio_vars: {}".format(len(states), list(map(lambda s: len(s.liveness.tracked_vars), states))))
# First collect a representative last and prev-to-last constraint, making sure they are the same amongst states along the way
reference_var = None
normalized_last_constraint = None
normalized_prev_to_last_constraint = None
for state in sorted(states, key=lambda s:len(s.liveness.tracked_vars)):
last_var = state.liveness.tracked_vars[-1]
if reference_var is None:
reference_var = claripy.BVS("ref", last_var.size())
last_tracked_var_constraints = [state.solver.simplify(state.liveness.base_snapshot.unconstrain(guard)) for guard in state.history.jump_guards if contains_var(guard, last_var)]
# We are looking for a busy loop that gets exited by a single jump on the last variable meeting a condition
if len(last_tracked_var_constraints) != 1:
print("[CONST] [-] More than one constraint on last variable, assuming non-constant")
return False
# We also need all states to have the same (presumably inevitable) exit condition
curr_last_constraint = last_tracked_var_constraints[0].replace(last_var, reference_var)
if normalized_last_constraint is None:
normalized_last_constraint = curr_last_constraint
elif not claripy.is_true(normalized_last_constraint == curr_last_constraint):
print("[CONST] [-] Encountered different exit conditions amongst states ('{}' != '{}')".format(normalized_last_constraint, curr_last_constraint))
return False
if len(state.liveness.tracked_vars) == 1:
continue
pre_to_last_vars = state.liveness.tracked_vars[:-1]
# Next up we make sure that all previous-to-last constraints are the same in nature
prev_to_last_tracked_var_constraints = []
for var in pre_to_last_vars:
prev_to_last_tracked_var_constraints += [state.solver.simplify(guard).replace(var, reference_var) for guard in state.history.jump_guards if contains_var(guard, var)]
for constraint in prev_to_last_tracked_var_constraints:
if normalized_prev_to_last_constraint is None:
normalized_prev_to_last_constraint = constraint
elif not claripy.is_true(normalized_prev_to_last_constraint == constraint):
print("[CONST] [-] Encountered different previous-to-last constraint amongst states")
if normalized_prev_to_last_constraint is None:
print("[CONST] [-] We have no previous constraint to compare exit condition against")
return False
# Now check that all previous conditions are exactly Not(exit condition)
if not claripy.is_true(state.solver.simplify(
normalized_last_constraint == claripy.Not(normalized_prev_to_last_constraint)
)):
print("[CONST] [-] Not(prev-to-last constraint) != last constraint")
return False
print("[CONST] [+] All checks done")
return True
# ======= Set Model =======
def check_and_gen_set_model(states):
""" Compute Set model
Idea: Model representing access to status register which is used in conditional execution, if-statements or a switch/case construct
Goal: Find exhaustive set of values triggering all paths, knowing that values outside the set do not contribute additional behavior
"""
# Check set model preconditions
for state in states:
# If not all variables are out of scope, we don't know whether they are still going to be checked later
if not state_vars_out_of_scope(state):
print("[SET Model] [-] some states have live variables ({})".format(state))
return None
if state_variables_involved_in_loop(state):
print("[SET Model] [-] variable used in loop")
return None
# Collect variables
variables = set()
for state in states:
for var in state.liveness.tracked_vars:
variables.add(var)
# For every variable, collect and process constraints per state
vals = None
for var in variables:
# Collect constraints for variable per state (position in guards corresponds to index of state)
guards = []
for state in states:
curr_guards = [guard for guard in state.history.jump_guards if guard.symbolic and contains_var(guard, var)]
guards.append(claripy.And(*curr_guards))
if any(map(lambda guard: any(map(lambda state_restore_reg_bitvec: contains_var(guard, state_restore_reg_bitvec), state.liveness.base_snapshot.all_initial_bitvecs)), guards)):
print("[SET Model] [-] detected state-defined register in relevant jump guard, not assigning a set model")
vals = None
break
# Combine constraints on variable for each other state
constraints = []
for i in range(len(states)):
own_jumpguard = guards[i]
curr_constraint = own_jumpguard
# For the current state, make all constraints not collide with other state's constraints
for j in range(len(guards)):
# Skip our own constraints
if j != i:
other_jumpguard = guards[j]
curr_constraint = claripy.And(curr_constraint, claripy.Or(
# a) either own_jumpguard implies other_jumpguard
own_jumpguard == claripy.Or(own_jumpguard, other_jumpguard)
,
# b) or we need to find a value which does not take other path
claripy.Not(other_jumpguard)
))
# Add the variable's combined constraints for the current state
constraints.append(curr_constraint)
# After collecting constraints
curr_vals = set()
for i in range(len(states)):
curr_vals.add(states[i].solver.min(var, extra_constraints=[constraints[i]]))
if vals is None:
vals = curr_vals
elif vals != curr_vals:
print("[SET Model] [-] got ambiguous sets")
return None
if vals is None:
print("[SET Model] [-] could not find values")
return None
print("[SET Model]: [+] Got vals: {}".format(vals))
# For single-valued sets, apply constant model
if len(vals) == 1:
return None
else:
return sorted(vals)
def min_bitmask(state, ast, var):
ast = state.liveness.base_snapshot.unconstrain(ast)
simplified_ast = state.solver.simplify(ast)
num_bits = var.size()
mask = claripy.BVV((1 << num_bits) - 1, num_bits)
for i in range(num_bits):
# flip bit to 0 and retry
mask &= ~(1 << i)
replacement_var = var & mask
replaced_ast = state.solver.simplify(simplified_ast.replace(var, replacement_var))
if not state.solver.is_true(state.solver.simplify(simplified_ast == replaced_ast)):
mask |= (1 << i)
return state.solver.eval(mask)
# ======= Bitextract Model =======
def compute_bitextract_mask(state):
""" Compute Bitextract model
"""
write_actions = [ action for action in state.history.actions if
isinstance(action, angr.state_plugins.sim_action.SimActionData)
and action.action == 'write' and action.type == 'mem'
# and (print(action),print(action.actual_value) or True)
and action.actual_value is not None
and state.solver.symbolic(action.actual_value) ]
masks = {}
# Look at all writes
for action in write_actions:
for var in state.liveness.tracked_vars:
if contains_var(action.actual_value, var):
if var not in masks:
masks[var] = set()
masks[var].add(min_bitmask(state, action.actual_value, var))
break
# Look at all jump guards
for guard in state.history.jump_guards:
for var in state.liveness.tracked_vars:
if contains_var(guard, var):
if var not in masks:
masks[var] = set()
masks[var].add(min_bitmask(state, guard, var))
break
# Look at all in-scope registers
for regval in in_scope_register_values(state):
for var in state.liveness.tracked_vars:
if contains_var(regval, var):
if var not in masks:
masks[var] = set()
masks[var].add(min_bitmask(state, regval, var))
return masks
# ======= Config Map Creation =======
def bitmask_to_byte_shift_config(bitmask):
if bitmask == 0:
return 0xffffffff, 0
min_bit, max_bit = -1, -1
for i in range(32):
if bitmask & 1:
if min_bit == -1:
min_bit = i
max_bit = i
bitmask >>= 1
min_byte, max_byte = min_bit // 8, max_bit // 8
shift = min_byte * 8
size = max_byte - min_byte + 1
return size, shift
def create_model_config_map_errored(pc):
return {'errored': {'0x{:08x}'.format(pc): 'TBD'}}
def hamming_weight(val):
res = 0
while val:
if val & 1:
res += 1
val = val >> 1
return res
def create_model_config_map(pc, representative_state, is_passthrough, is_constant, bitmask, set_vals):
mmio_addr, mmio_access_size = representative_state.liveness.base_snapshot.mmio_addr, representative_state.liveness.base_snapshot.mmio_access_size
result = {}
pc &= (~0x1)
entry_name = "pc_{:08x}_mmio_{:08x}".format(pc, mmio_addr)
config_entry_map = {}
config_entry_map['addr'] = mmio_addr
config_entry_map['pc'] = pc
config_entry_map['access_size'] = mmio_access_size
model_type = "unmodeled"
if is_passthrough:
model_type = "passthrough"
config_entry_map['init_val'] = 0
elif is_constant:
assert(representative_state is not None)
model_type = "constant"
config_entry_map['val'] = representative_state.solver.min(representative_state.liveness.tracked_vars[-1])
elif set_vals is not None and len(set_vals) <= MAX_SET_MODEL_VAL_NUM:
model_type = "set"
config_entry_map['vals'] = set_vals
elif bitmask != 0:
# Only assign this if no completely replacing model was identified
byte_size, left_shift = bitmask_to_byte_shift_config(bitmask)
# Only assign bit mask if it actually reduces the access size
if hamming_weight(bitmask) < mmio_access_size * 8:
model_type = "bitextract"
config_entry_map['size'] = byte_size
config_entry_map['left_shift'] = left_shift
config_entry_map['mask'] = bitmask
result[model_type] = {}
result[model_type][entry_name]=config_entry_map
return result
def detect_model(pc, simulation, is_timed_out=False, pre_fork_state=None):
if is_timed_out:
states = [pre_fork_state]
else:
states = simulation.found
bitmask = 0
is_constant = False
is_passthrough = False
model_config_map = None
set_vals = None
state = None
tracked_mmio_constrains_any_path = False
if is_timed_out:
"""
For the timeout case, we only have a parent state before the first split.
In this case, only the following models may possibly apply:
- bitextract (a mask/extraction has already been applied)
- passthrough (value has already been discarded without applying path constraints)
"""
state = states[0]
returned = state.liveness.returned
all_vars_out_of_scope = state_vars_out_of_scope(state)
constrains_path = state_contains_tracked_mmio_path_constraints(state)
tracked_mmio_constrains_any_path |= constrains_path
# 1. Check for passthrough model
if all_vars_out_of_scope:
is_passthrough = check_is_passthrough_model(state, constrains_path, returned, all_vars_out_of_scope)
else:
print("[PASSTHROUGH] [-] Not all vars out of scope")
# 2. Check for bitextract model
min_masks = compute_bitextract_mask(state)
print("Got minimal mask set: {}".format(min_masks)) # list(map(hex, min_masks)))
if min_masks:
for var, masks in min_masks.items():
for mask in masks:
bitmask |= mask
print("State: {}\nReturned: {}\nVars dead: {}\nIs config reg: {}\nbitmask: {:x}".format(state, returned, all_vars_out_of_scope, is_passthrough, bitmask))
elif simulation.found:
states = simulation.found
is_passthrough = True
for state in states:
returned = state.liveness.returned
all_vars_out_of_scope = state_vars_out_of_scope(state)
constrains_path = state_contains_tracked_mmio_path_constraints(state)
tracked_mmio_constrains_any_path |= constrains_path
if is_passthrough:
curr_is_passthrough = check_is_passthrough_model(state, constrains_path, returned, all_vars_out_of_scope)
is_passthrough = is_passthrough and curr_is_passthrough
min_masks = compute_bitextract_mask(state)
print("Got minimal mask set: {}".format(min_masks)) # list(map(hex, min_masks)))
if min_masks:
for var, masks in min_masks.items():
for mask in masks:
bitmask |= mask
print("State: {}\nReturned: {}\nVars dead: {}\nIs config reg: {}\nbitmask: {:x}".format(state, returned, all_vars_out_of_scope, curr_is_passthrough, bitmask))
set_vals = check_and_gen_set_model(states)
is_constant = check_is_constant_model(states)
# We treat the config model in a special way here and ignore deep calls
if not tracked_mmio_constrains_any_path and not is_passthrough and all(map(lambda state: state.globals['config_write_performed'], all_states(simulation))):
print("[PASSTHROUGH] [WARNING] Assigning low-confidence config model")
is_passthrough = True
model_config_map = create_model_config_map(pc, state, is_passthrough, is_constant, bitmask, set_vals)
return model_config_map, is_passthrough, is_constant, bitmask, set_vals |
configs/search_config.py | MarcAntoineAlex/DenseNAS-1 | 305 | 11189303 | <gh_stars>100-1000
from tools.collections import AttrDict
__C = AttrDict()
search_cfg = __C
__C.search_params=AttrDict()
__C.search_params.arch_update_epoch=10
__C.search_params.val_start_epoch=120
__C.search_params.sample_policy='prob' # prob uniform
__C.search_params.weight_sample_num=1
__C.search_params.softmax_temp=1.
__C.search_params.adjoin_connect_nums = []
__C.search_params.net_scale = AttrDict()
__C.search_params.net_scale.chs = []
__C.search_params.net_scale.fm_sizes = []
__C.search_params.net_scale.stage = []
__C.search_params.net_scale.num_layers = []
__C.search_params.PRIMITIVES_stack = [
'mbconv_k3_t3',
'mbconv_k3_t6',
'mbconv_k5_t3',
'mbconv_k5_t6',
'mbconv_k7_t3',
'mbconv_k7_t6',
'skip_connect',
]
__C.search_params.PRIMITIVES_head = [
'mbconv_k3_t3',
'mbconv_k3_t6',
'mbconv_k5_t3',
'mbconv_k5_t6',
'mbconv_k7_t3',
'mbconv_k7_t6',
]
__C.optim=AttrDict()
__C.optim.init_dim=16
__C.optim.head_dim=16
__C.optim.last_dim=1984
__C.optim.weight=AttrDict()
__C.optim.weight.init_lr=0.1
__C.optim.weight.min_lr=1e-4
__C.optim.weight.lr_decay_type='cosine'
__C.optim.weight.momentum=0.9
__C.optim.weight.weight_decay=4e-5
__C.optim.arch=AttrDict()
__C.optim.arch.alpha_lr=3e-4
__C.optim.arch.beta_lr=3e-4
__C.optim.arch.weight_decay=1e-3
__C.optim.if_sub_obj=True
__C.optim.sub_obj=AttrDict()
__C.optim.sub_obj.type='latency' # latency / flops
__C.optim.sub_obj.skip_reg=False
__C.optim.sub_obj.log_base=15.0
__C.optim.sub_obj.sub_loss_factor=0.1
__C.optim.sub_obj.latency_list_path=''
|
src/genie/libs/parser/iosxe/tests/ShowIpMsdpPeer/cli/equal/device_output_2_expected.py | balmasea/genieparser | 204 | 11189328 | expected_output = {
"vrf": {
"VRF1": {
"peer": {
"10.1.100.2": {
"peer_as": 1,
"session_state": "Up",
"resets": "0",
"connect_source": "Loopback0",
"connect_source_address": "10.1.100.1",
"elapsed_time": "00:15:38",
"statistics": {
"queue": {"size_in": 0, "size_out": 0},
"sent": {
"data_message": 17,
"sa_message": 8,
"sa_response": 0,
"data_packets": 1,
},
"received": {
"data_message": 15,
"sa_message": 0,
"sa_request": 0,
"data_packets": 0,
},
"error": {"rpf_failure": 0},
"established_transitions": 1,
"output_msg_discarded": 0,
},
"conn_count_cleared": "00:15:38",
"sa_filter": {
"in": {
"(S,G)": {"filter": "none", "route_map": "none"},
"RP": {"filter": "none", "route_map": "none"},
},
"out": {
"(S,G)": {"filter": "none", "route_map": "none"},
"RP": {"filter": "none", "route_map": "none"},
},
},
"sa_request": {"input_filter": "none"},
"ttl_threshold": 0,
"sa_learned_from": 0,
"signature_protection": False,
}
}
}
}
}
|
extraPackages/matplotlib-3.0.3/examples/lines_bars_and_markers/simple_plot.py | dolboBobo/python3_ios | 130 | 11189334 | <filename>extraPackages/matplotlib-3.0.3/examples/lines_bars_and_markers/simple_plot.py<gh_stars>100-1000
"""
===========
Simple Plot
===========
Create a simple plot.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions and methods is shown in this example:
matplotlib.axes.Axes.plot
matplotlib.pyplot.plot
matplotlib.pyplot.subplots
matplotlib.figure.Figure.savefig
|
ByteNet/translator.py | paarthneekhara/byteNet-tensorflow | 345 | 11189339 | import tensorflow as tf
import ops
class ByteNet_Translator:
def __init__(self, options):
self.options = options
embedding_channels = 2 * options['residual_channels']
self.w_source_embedding = tf.get_variable('w_source_embedding',
[options['source_vocab_size'], embedding_channels],
initializer=tf.truncated_normal_initializer(stddev=0.02))
self.w_target_embedding = tf.get_variable('w_target_embedding',
[options['target_vocab_size'], embedding_channels],
initializer=tf.truncated_normal_initializer(stddev=0.02))
def build_model(self):
options = self.options
self.source_sentence = tf.placeholder('int32',
[None, None], name = 'source_sentence')
self.target_sentence = tf.placeholder('int32',
[None, None], name = 'target_sentence')
target_1 = self.target_sentence[:,0:-1]
target_2 = self.target_sentence[:,1:]
source_embedding = tf.nn.embedding_lookup(self.w_source_embedding,
self.source_sentence, name = "source_embedding")
target_1_embedding = tf.nn.embedding_lookup(self.w_target_embedding,
target_1, name = "target_1_embedding")
curr_input = source_embedding
for layer_no, dilation in enumerate(options['encoder_dilations']):
curr_input = ops.byetenet_residual_block(curr_input, dilation,
layer_no, options['residual_channels'],
options['encoder_filter_width'], causal = False, train = True)
encoder_output = curr_input
combined_embedding = target_1_embedding + encoder_output
curr_input = combined_embedding
for layer_no, dilation in enumerate(options['decoder_dilations']):
curr_input = ops.byetenet_residual_block(curr_input, dilation,
layer_no, options['residual_channels'],
options['decoder_filter_width'], causal = True, train = True)
logits = ops.conv1d(tf.nn.relu(curr_input),
options['target_vocab_size'], name = 'logits')
print "logits", logits
logits_flat = tf.reshape(logits, [-1, options['target_vocab_size']])
target_flat = tf.reshape(target_2, [-1])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = target_flat, logits = logits_flat)
self.loss = tf.reduce_mean(loss)
self.arg_max_prediction = tf.argmax(logits_flat, 1)
tf.summary.scalar('loss', self.loss)
def build_translator(self, reuse = False):
if reuse:
tf.get_variable_scope().reuse_variables()
options = self.options
self.t_source_sentence = tf.placeholder('int32',
[None, None], name = 'source_sentence')
self.t_target_sentence = tf.placeholder('int32',
[None, None], name = 'target_sentence')
source_embedding = tf.nn.embedding_lookup(self.w_source_embedding,
self.t_source_sentence, name = "source_embedding")
target_embedding = tf.nn.embedding_lookup(self.w_target_embedding,
self.t_target_sentence, name = "target_embedding")
curr_input = source_embedding
for layer_no, dilation in enumerate(options['encoder_dilations']):
curr_input = ops.byetenet_residual_block(curr_input, dilation,
layer_no, options['residual_channels'],
options['encoder_filter_width'], causal = False, train = False)
encoder_output = curr_input[:,0:tf.shape(self.t_target_sentence)[1],:]
combined_embedding = target_embedding + encoder_output
curr_input = combined_embedding
for layer_no, dilation in enumerate(options['decoder_dilations']):
curr_input = ops.byetenet_residual_block(curr_input, dilation,
layer_no, options['residual_channels'],
options['decoder_filter_width'], causal = True, train = False)
logits = ops.conv1d(tf.nn.relu(curr_input),
options['target_vocab_size'], name = 'logits')
logits_flat = tf.reshape(logits, [-1, options['target_vocab_size']])
probs_flat = tf.nn.softmax(logits_flat)
self.t_probs = tf.reshape(probs_flat,
[-1, tf.shape(logits)[1], options['target_vocab_size']])
def main():
options = {
'source_vocab_size' : 250,
'target_vocab_size' : 250,
'residual_channels' : 512,
'encoder_dilations' : [ 1,2,4,8,16,
1,2,4,8,16
],
'decoder_dilations' : [ 1,2,4,8,16,
1,2,4,8,16
],
'encoder_filter_width' : 3,
'decoder_filter_width' : 3
}
md = ByteNet_Translator(options)
md.build_model()
md.build_translator(reuse = True)
if __name__ == '__main__':
main() |
bindings/nodejs/nan/lib/sdb.gyp | XVilka/sdb | 123 | 11189351 | {
"targets": [
{
"target_name": "libsdb",
"type": "static_library",
"include_dirs": [
"sdb/src"
],
"sources": [
"sdb/src/sdb.c",
"sdb/src/fmt.c",
"sdb/src/array.c",
"sdb/src/base64.c",
"sdb/src/buffer.c",
"sdb/src/cdb.c",
"sdb/src/cdb_make.c",
"sdb/src/disk.c",
"sdb/src/ht.c",
"sdb/src/journal.c",
"sdb/src/json.c",
"sdb/src/lock.c",
"sdb/src/ls.c",
"sdb/src/match.c",
"sdb/src/ns.c",
"sdb/src/num.c",
"sdb/src/query.c",
"sdb/src/sdb_version.h",
"sdb/src/util.c",
]
}
]
}
|
ZeroingArrays_2016/TestSizes.py | riverar/blogstuff | 323 | 11189355 | <reponame>riverar/blogstuff
"""
Test compilation of char array[BUF_SIZE] = {} versus char array[BUF_SIZE] = {0}
"""
import os
import subprocess
import sys
terse = False
if len(sys.argv) > 1:
if sys.argv[1].lower() == "--terse":
terse = True
else:
print "--terse is the only option to this program"
test_code = r'''// Test of code generation
#include <stdio.h>
void ZeroArray1()
{
char buffer[BUF_SIZE] = { 0 };
printf("Don't optimize away my empty buffer.%s\n", buffer);
}
void ZeroArray2()
{
char buffer[BUF_SIZE] = {};
printf("Don't optimize away my empty buffer.%s\n", buffer);
}
'''
open("TestCode.cpp", "w").write(test_code)
os.environ["INCLUDE"] = r"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\INCLUDE;C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\ATLMFC\INCLUDE;C:\Program Files (x86)\Windows Kits\10\include\10.0.10586.0\ucrt;C:\Program Files (x86)\Windows Kits\NETFXSDK\4.6.1\include\um;C:\Program Files (x86)\Windows Kits\10\include\10.0.10586.0\shared;C:\Program Files (x86)\Windows Kits\10\include\10.0.10586.0\um;C:\Program Files (x86)\Windows Kits\10\include\10.0.10586.0\winrt;C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\INCLUDE;C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\ATLMFC\INCLUDE;C:\Program Files (x86)\Microsoft SDKs\Windows\v7.0A\include"
startpath = os.environ["PATH"]
for bitness in [32, 64]:
for options in ["/O1 /Oy-", "/O2 /Oy-", "/O2"]:
if bitness == 64:
options = options.replace(" /Oy-", "")
print "%d-bit with %s:" % (bitness, options),
if not terse:
print
if bitness == 32:
os.environ["PATH"] = r"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\BIN;" + startpath
else:
os.environ["PATH"] = r"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\BIN\AMD64;" + startpath
total = 0
count = 0
last_diff = None
total_sizes = [(0,0)]
for buf_size in range(1,65):
# Using /O2 instead of /O1 changes the results
# On 32-bit /O2 buids /Oy- changes the results
# /Oi seems to make no difference
command = "cl /nologo /c TestCode.cpp %s /FAcs /DBUF_SIZE=%d" % (options, buf_size)
subprocess.check_output(command)
lastline = ""
size1 = None
size2 = None
SSE_used = False
SSE_used_for1 = None
SSE_used_for2 = None
for line in open("TestCode.cod").readlines():
if line.count(" PROC") > 0:
SSE_used = False
if line.lower().count("xmm") > 0:
SSE_used = True
if lastline.count("ret") == 1:
if line.startswith("?ZeroArray1@@YAXXZ"):
size1 = int(lastline.strip().split("\t")[0], 16) + 1
SSE_used_for1 = SSE_used
SSE_used = False
if line.startswith("?ZeroArray2@@YAXXZ"):
size2 = int(lastline.strip().split("\t")[0], 16) + 1
SSE_used_for2 = SSE_used
SSE_used = False
lastline = line
last_total = total_sizes[-1]
total_sizes.append((last_total[0] + size1, last_total[1] + size2))
if size1 != size2:
last_diff = buf_size
if not terse:
print "%2d: %2d -> %2d: %+3d bytes" % (buf_size, size1, size2, size2 - size1),
print "%4.1f%%" % ((size1 - size2) * 100.0 / size1),
if SSE_used_for1:
print " SSE used for = {0};",
if SSE_used_for2:
print " SSE used for = {};",
print
total += size2 - size1
count += 1
print "Average saving from 1 to %d is %1.3f bytes," % (last_diff, total * -1.0 / last_diff),
last_diff_totals = total_sizes[last_diff]
print "%1.2f%%" % ((last_diff_totals[0] - last_diff_totals[1]) * 100.0 / last_diff_totals[0])
if not terse:
print
if not terse:
print
|
yt/units/unit_object.py | Xarthisius/yt | 360 | 11189362 | <reponame>Xarthisius/yt<filename>yt/units/unit_object.py
from unyt.unit_object import *
|
node_modules/python-shell/test/python/exit-code.py | brenocg29/TP1RedesInteligentes | 1,869 | 11189386 | <reponame>brenocg29/TP1RedesInteligentes
import sys
exit_code = int(sys.argv[1]) if len(sys.argv) > 1 else 0
sys.exit(exit_code)
|
chainer_chemistry/dataset/splitters/stratified_splitter.py | pfnet/chainerchem | 184 | 11189410 | <filename>chainer_chemistry/dataset/splitters/stratified_splitter.py
import numpy
import pandas
from chainer_chemistry.dataset.splitters.base_splitter import BaseSplitter
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
def _approximate_mode(class_counts, n_draws):
"""Referred scikit-learn, https://git.io/fPMmB"""
n_class = len(class_counts)
continuous = class_counts * n_draws / class_counts.sum()
floored = numpy.floor(continuous)
assert n_draws // n_class == floored.sum() // n_class
n_remainder = int(n_draws - floored.sum())
remainder = continuous - floored
inds = numpy.argsort(remainder)[::-1]
inds = inds[:n_remainder]
floored[inds] += 1
assert n_draws == floored.sum()
return floored.astype(numpy.int)
class StratifiedSplitter(BaseSplitter):
"""Class for doing stratified data splits."""
def _split(self, dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1,
labels=None, **kwargs):
numpy.testing.assert_almost_equal(frac_train + frac_valid + frac_test,
1.)
seed = kwargs.get('seed', None)
label_axis = kwargs.get('label_axis', -1)
task_index = kwargs.get('task_index', 0)
n_bin = kwargs.get('n_bin', 10)
task_type = kwargs.get('task_type', 'auto')
if task_type not in ['classification', 'regression', 'auto']:
raise ValueError("{} is invalid. Please use 'classification',"
"'regression' or 'auto'".format(task_type))
rng = numpy.random.RandomState(seed)
if isinstance(labels, list):
labels = numpy.array(labels)
elif labels is None:
if not isinstance(dataset, NumpyTupleDataset):
raise ValueError("Please assign label dataset.")
labels = dataset.features[:, label_axis]
if labels.ndim == 1:
labels = labels
else:
labels = labels[:, task_index]
if task_type == 'auto':
if labels.dtype.kind == 'i':
task_type = 'classification'
elif labels.dtype.kind == 'f':
task_type = 'regression'
else:
raise ValueError
if task_type == 'classification':
classes, labels = numpy.unique(labels, return_inverse=True)
elif task_type == 'regression':
classes = numpy.arange(n_bin)
labels = pandas.qcut(labels, n_bin, labels=False)
else:
raise ValueError
n_classes = classes.shape[0]
n_total_valid = int(numpy.floor(frac_valid * len(dataset)))
n_total_test = int(numpy.floor(frac_test * len(dataset)))
class_counts = numpy.bincount(labels)
class_indices = numpy.split(numpy.argsort(labels,
kind='mergesort'),
numpy.cumsum(class_counts)[:-1])
# n_total_train is the remainder: n - n_total_valid - n_total_test
n_valid_samples = _approximate_mode(class_counts, n_total_valid)
class_counts = class_counts - n_valid_samples
n_test_samples = _approximate_mode(class_counts, n_total_test)
train_index = []
valid_index = []
test_index = []
for i in range(n_classes):
n_valid = n_valid_samples[i]
n_test = n_test_samples[i]
perm = rng.permutation(len(class_indices[i]))
class_perm_index = class_indices[i][perm]
class_valid_index = class_perm_index[:n_valid]
class_test_index = class_perm_index[n_valid:n_valid+n_test]
class_train_index = class_perm_index[n_valid+n_test:]
train_index.extend(class_train_index)
valid_index.extend(class_valid_index)
test_index.extend(class_test_index)
assert n_total_valid == len(valid_index)
assert n_total_test == len(test_index)
return numpy.array(train_index), numpy.array(valid_index),\
numpy.array(test_index),
def train_valid_test_split(self, dataset, labels=None, label_axis=-1,
task_index=0, frac_train=0.8, frac_valid=0.1,
frac_test=0.1, converter=None,
return_index=True, seed=None, task_type='auto',
n_bin=10, **kwargs):
"""Split dataset into train, valid and test set.
Split indices are generated by stratified splitting of labels.
Args:
dataset(NumpyTupleDataset, numpy.ndarray):
Dataset.
labels(numpy.ndarray):
Target label. If `None`, this function assumes that dataset is
an instance of `NumpyTupleDataset`.
labels_axis(int):
Dataset feature axis in NumpyTupleDataset.
task_index(int):
Target task index in dataset for stratification.
seed (int):
Random seed.
frac_train(float):
Fraction of dataset put into training data.
frac_valid(float):
Fraction of dataset put into validation data.
return_index(bool):
If `True`, this function returns only indexes. If `False`, this
function returns splitted dataset.
Returns:
SplittedDataset(tuple):
splitted dataset or indexes
.. admonition:: Example
>>> from chainer_chemistry.datasets import NumpyTupleDataset
>>> from chainer_chemistry.dataset.splitters import StratifiedSplitter # NOQA
>>>
>>> a = numpy.random.random((10, 10))
>>> b = numpy.random.random((10, 8))
>>> c = numpy.random.random((10, 1))
>>> d = NumpyTupleDataset(a, b, c)
>>> splitter = StratifiedSplitter()
>>> train, valid, test =
splitter.train_valid_test_split(dataset, return_index=False)
>>> print(len(train), len(valid))
8, 1, 1
"""
return super(StratifiedSplitter, self)\
.train_valid_test_split(dataset, frac_train, frac_valid, frac_test,
converter, return_index, seed=seed,
label_axis=label_axis, task_type=task_type,
task_index=task_index, n_bin=n_bin,
labels=labels, **kwargs)
def train_valid_split(self, dataset, labels=None, label_axis=-1,
task_index=0, frac_train=0.9, frac_valid=0.1,
converter=None, return_index=True, seed=None,
task_type='auto', n_bin=10, **kwargs):
"""Split dataset into train and valid set.
Split indices are generated by stratified splitting of labels.
Args:
dataset(NumpyTupleDataset, numpy.ndarray):
Dataset.
labels(numpy.ndarray):
Target label. If `None`, this function assumes that dataset is
an instance of `NumpyTupleDataset`.
labels_axis(int):
Dataset feature axis in NumpyTupleDataset.
task_index(int):
Target task index in dataset for stratification.
seed (int):
Random seed.
frac_train(float):
Fraction of dataset put into training data.
frac_valid(float):
Fraction of dataset put into validation data.
return_index(bool):
If `True`, this function returns only indexes. If `False`, this
function returns splitted dataset.
Returns:
SplittedDataset(tuple):
splitted dataset or indexes
.. admonition:: Example
>>> from chainer_chemistry.datasets import NumpyTupleDataset
>>> from chainer_chemistry.dataset.splitters \
>>> import StratifiedSplitter
>>> a = numpy.random.random((10, 10))
>>> b = numpy.random.random((10, 8))
>>> c = numpy.random.random((10, 1))
>>> d = NumpyTupleDataset(a, b, c)
>>> splitter = StratifiedSplitter()
>>> train, valid =
splitter.train_valid_split(dataset, return_index=False)
>>> print(len(train), len(valid))
9, 1
"""
return super(StratifiedSplitter, self)\
.train_valid_split(dataset, frac_train, frac_valid, converter,
return_index, seed=seed, label_axis=label_axis,
task_type=task_type, task_index=task_index,
n_bin=n_bin, labels=labels, **kwargs)
|
scripts/data/jura2bbtxt.py | wuzzh/master_thesis_code | 206 | 11189427 | """
Script for translating the dataset from J<NAME> (jura) to BBTXT format.
Since this dataset contains only cars all objects will be assigned label 1.
A BBTXT file is formatted like this:
filename label confidence xmin ymin xmax ymax
filename label confidence xmin ymin xmax ymax
filename label confidence xmin ymin xmax ymax
...
The label txt files in the jura dataset have the following form:
filename:car_ID left top right bottom bumper_center;car_ID left top right bottom bumper_center....
filename:car_ID left top right bottom bumper_center;car_ID left top right bottom bumper_center....
...
----------------------------------------------------------------------------------------------------
python jura2bbtxt.py path_labels path_images outfile.bbtxt
----------------------------------------------------------------------------------------------------
"""
__date__ = '12/06/2016'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
import os
####################################################################################################
# DEFINITIONS #
####################################################################################################
####################################################################################################
# FUNCTIONS #
####################################################################################################
def translate_file(path_file, path_images, outfile):
"""
Translates a single TXT file with jura labels and appends its output to the given BBTXT file.
Input:
path_file: Path to the TXT file to be translated
path_images: Path to the folder, which contains "Toyota" and "LiborNovak" folders
outfile: File handle of the open output BBTXT file
"""
with open(path_file, 'r') as infile:
for line in infile:
line = line.rstrip('\n')
# Get the image file path
data = line.split(':')
path_image = os.path.join(path_images, data[0])
if not os.path.isfile(path_image):
print('WARNING: Image "%s" does not exist!'%(path_image))
# Get the annotations
annotations = data[1].split(';')
for annotation in annotations:
if annotation != '':
# Get the numbers
coords = annotation.split(' ')
# All annotations are cars -> put 1 for class. For confidence we put 1 - just
# to have something
line_out = path_image + ' 1 1 '
# Bounding box
line_out += coords[1] + ' ' + coords[2] + ' ' + coords[3] + ' ' + coords[4] + '\n'
outfile.write(line_out)
def translate_files(path_labels, path_images, outfile):
"""
Runs the translation of <NAME>ny's label format into the BBTXT format. Translates all TXT
files in the path_labels folder into a single BBTXT file.
Input:
path_labels: Path to the folder with label files to be translated
path_images: Path to the folder, which contains "Toyota" and "LiborNovak" folders
outfile: File handle of the open output BBTXT file
"""
print('-- TRANSLATING JIRI TREFNY\'S ANNOTATION TO BBTXT')
# Get the content of the path_labels directory
txt_names = [f for f in os.listdir(path_labels) if os.path.splitext(f)[1] == '.txt']
for filename in txt_names:
print('-- Processing: ' + filename)
translate_file(os.path.join(path_labels, filename), path_images, outfile)
print('-- TRANSLATION DONE')
####################################################################################################
# MAIN #
####################################################################################################
def check_path(path, is_folder=False):
"""
Checks if the given path exists.
Input:
path: Path to be checked
is_folder: True if the checked path is a folder
Returns:
True if the given path exists
"""
if not os.path.exists(path) or (not is_folder and not os.path.isfile(path)):
print('ERROR: Path "%s" does not exist!'%(path))
return False
return True
def parse_arguments():
"""
Parse input options of the script.
"""
parser = argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')
parser.add_argument('path_labels', metavar='path_labels', type=str,
help='Path to the folder with label files to be translated (all .txt files'\
' from this folder will be loaded)')
parser.add_argument('path_images', metavar='path_images', type=str,
help='Path to the folder, which contains "Toyota" and "LiborNovak" folders')
parser.add_argument('outfile', metavar='path_outfile', type=argparse.FileType('w'),
help='Path to the output BBTXT file (including the extension)')
args = parser.parse_args()
if not check_path(args.path_labels, True) or not check_path(args.path_images, True):
parser.print_help()
exit(1)
return args
def main():
args = parse_arguments()
translate_files(args.path_labels, args.path_images, args.outfile)
args.outfile.close()
if __name__ == '__main__':
main()
|
examples/numbersonly.py | shmuelamar/cbox | 164 | 11189456 | #!/usr/bin/env python3
import cbox
@cbox.stream()
def numbersonly(line):
"""returns the lines containing only numbers. bad lines reported to stderr.
if any bad line is detected, exits with exitcode 2.
"""
if not line.isnumeric():
raise ValueError('{} is not a number'.format(line))
return line
if __name__ == '__main__':
cbox.main(numbersonly)
|
posthog/migrations/0194_set_property_type_for_time.py | dorucioclea/posthog | 7,409 | 11189480 | <reponame>dorucioclea/posthog
# Generated by Django 3.2.5 on 2021-12-22 09:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("posthog", "0193_auto_20211222_0912"),
]
operations = [
migrations.RunSQL(
sql="""
update posthog_propertydefinition
set property_type = 'DateTime', property_type_format='unix_timestamp'
where name = '$time'
""",
reverse_sql="""
update posthog_propertydefinition
set property_type = null, property_type_format=null
where name = '$time'
""",
)
]
|
texthero/representation.py | gagandeepreehal/texthero | 2,559 | 11189502 | <filename>texthero/representation.py
"""
Map words into vectors using different algorithms such as
TF-IDF, word2vec or GloVe.
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, NMF
from sklearn.cluster import KMeans, DBSCAN, MeanShift
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize as sklearn_normalize
from scipy.sparse import coo_matrix
from typing import Optional, Union, Any
from texthero._types import (
TextSeries,
TokenSeries,
VectorSeries,
DataFrame,
InputSeries,
)
from texthero import preprocessing
import logging
import warnings
# from texthero import pandas_ as pd_
"""
Helper
"""
# Warning message for not-tokenized inputs
_not_tokenized_warning_message = (
"It seems like the given Pandas Series s is not tokenized. This"
" function will tokenize it automatically using hero.tokenize(s)"
" first. You should consider tokenizing it yourself first with"
" hero.tokenize(s) in the future."
)
"""
Vectorization
"""
@InputSeries([TokenSeries, TextSeries])
def count(
s: Union[TokenSeries, TextSeries],
max_features: Optional[int] = None,
min_df=1,
max_df=1.0,
binary=False,
) -> pd.DataFrame:
"""
Represent a text-based Pandas Series using count.
Rows of the returned DataFrame represent documents whereas
columns are terms. The value in the cell document-term is
the number of the term in this document. The output is sparse.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before count is calculated.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default=None
Maximum number of features to keep. Will keep all features if set to
None.
min_df : float in range [0.0, 1.0] or int, optional, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents,
integer absolute counts.
max_df : float in range [0.0, 1.0] or int, optional, default=1.0
Ignore terms that have a document frequency (number of documents they
appear in) frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
binary : bool, optional, default=False
If True, all non zero counts are set to 1.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Sentence one", "Sentence two"]).pipe(hero.tokenize)
>>> hero.count(s) # doctest: +SKIP
Sentence one two
0 1 1 0
1 1 0 1
See Also
--------
TODO add tutorial link
"""
# TODO. Can be rewritten without sklearn.
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tf = CountVectorizer(
max_features=max_features,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
min_df=min_df,
max_df=max_df,
binary=binary,
)
tf_vectors_csr = tf.fit_transform(s)
return pd.DataFrame.sparse.from_spmatrix(
tf_vectors_csr, s.index, tf.get_feature_names()
)
def term_frequency(
s: pd.Series, max_features: Optional[int] = None, min_df=1, max_df=1.0,
) -> pd.DataFrame:
"""
Represent a text-based Pandas Series using Term Frequency.
Rows of the returned DataFrame represent documents whereas columns are
terms. The value in the cell document-term is the frequency of the
term in this document. The output is sparse.
TODO add tutorial link
The input Series should already be tokenized. If not, it will
be tokenized before term_frequency is calculated.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default=None
Maximum number of features to keep. Will keep all features if set to
None.
min_df : float in range [0.0, 1.0] or int, optional, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents,
integer absolute counts.
max_df : float in range [0.0, 1.0] or int, optional, default=1.0
Ignore terms that have a document frequency (number of documents they
appear in) frequency strictly higher than the given threshold.
If float, the parameter represents a proportion of documents, integer
absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Text Text of doc one", "Text of of doc two", "Aha hi bnd one"]).pipe(hero.tokenize)
>>> hero.term_frequency(s) # doctest: +SKIP
term_frequency
Aha Text bnd doc hi of one two
0 0.00 0.4 0.00 0.2 0.00 0.2 0.20 0.0
1 0.00 0.2 0.00 0.2 0.00 0.4 0.00 0.2
2 0.25 0.0 0.25 0.0 0.25 0.0 0.25 0.0
See Also
--------
TODO add tutorial link
"""
# Term frequency is just the word counts for each document
# with each document divided by the number of terms in the
# document. That's just l1 normalization!
s_term_frequency = s.pipe(
count, max_features=max_features, min_df=min_df, max_df=max_df
).pipe(normalize, norm="l1")
return s_term_frequency
def tfidf(s: pd.Series, max_features=None, min_df=1, max_df=1.0,) -> pd.DataFrame:
"""
Represent a text-based Pandas Series using TF-IDF.
Rows of the returned DataFrame represent documents whereas columns are
terms. The value in the cell document-term is the tfidf-value of the
term in this document. The output is sparse.
*Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to
calculate the _relative importance_ of the words in a document, taking
into account the words' occurences in other documents. It consists of
two parts:
The *term frequency (tf)* tells us how frequently a term is present
in a document, so tf(document d, term t) = number of times t appears
in d.
The *inverse document frequency (idf)* measures how _important_ or
_characteristic_ a term is among the whole corpus (i.e. among all
documents). Thus, idf(term t) = log((1 + number of documents) /
(1 + number of documents where t is present)) + 1.
Finally, tf-idf(document d, term t) = tf(d, t) * idf(t).
Different from the `sklearn-implementation of tfidf
<https://scikit-learn.org/stable/modules/generated/sklearn.feature_
extraction.text.TfidfVectorizer.html>`, this function does *not*
normalize the output in any way, so the result is exactly what you
get applying the formula described above.
The input Series should already be tokenized. If not, it will
be tokenized before tfidf is calculated.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default=None
If not None, only the max_features most frequent tokens are used.
min_df : float in range [0.0, 1.0] or int, optional, default=1
When building the vocabulary ignore terms that have a document
frequency (number of documents they appear in) strictly
lower than the given threshold.
If float, the parameter represents a proportion of documents,
integer absolute counts.
max_df : float in range [0.0, 1.0] or int, default=1.0
Ignore terms that have a document frequency (number of documents they
appear in) frequency strictly higher than the given threshold.
This arguments basically permits to remove corpus-specific stop
words. If float, the parameter represents a proportion of documents,
integer absolute counts.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Hi Bye", "Test Bye Bye"]).pipe(hero.tokenize)
>>> hero.tfidf(s) # doctest: +SKIP
Bye Hi Test
0 1.0 1.405465 0.000000
1 2.0 0.000000 1.405465
See Also
--------
`TF-IDF on Wikipedia <https://en.wikipedia.org/wiki/Tf-idf>`_
TODO add tutorial link
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tfidf = TfidfVectorizer(
use_idf=True,
max_features=max_features,
min_df=min_df,
max_df=max_df,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
norm=None, # Disable l1/l2 normalization.
)
tfidf_vectors_csr = tfidf.fit_transform(s)
return pd.DataFrame.sparse.from_spmatrix(
tfidf_vectors_csr, s.index, tfidf.get_feature_names()
)
"""
Dimensionality reduction
"""
def pca(
input_matrix: Union[pd.Series, pd.DataFrame], n_components=2, random_state=None
) -> pd.Series:
"""
Perform principal component analysis on the given input.
Principal Component Analysis (PCA) is a statistical method that is
used to reveal where the variance in a dataset comes from. For
textual data, one could for example first represent a Series of
documents using :meth:`texthero.representation.tfidf` to get a vector
representation of each document. Then, PCA can generate new vectors
from the tfidf representation that showcase the differences among
the documents most strongly in fewer dimensions.
For example, the tfidf vectors will have length 100 if hero.tfidf was
called on a large corpus with max_features=100. Visualizing 100
dimensions is hard! Using PCA with n_components=3, every document will
now get a vector of length 3, and the vectors will be chosen so that
the document differences are easily visible. The corpus can now be
visualized in 3D and we can get a good first view of the data!
In general, *pca* should be called after the text has already been
represented to a matrix form.
PCA cannot directly handle sparse input, so when calling pca on a
sparse DataFrame, the input has to be expanded which can lead to
memory problems with big datasets.
Parameters
----------
input_matrix : Pandas Series (VectorSeries) or DataFrame
n_components : int or str, optional, default=2
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
If set to "mle", the number of components is
automatically estimated.
random_state : int, optional, default=None
Pass an int for reproducible results across multiple function calls.
Returns
-------
Pandas Series with the vector calculated by PCA for the document in
every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football is great",
... "Hi, I'm Texthero, who are you? Tell me!"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.tfidf)
>>> # Attention, your results might differ due to
>>> # the randomness in PCA!
>>> hero.pca(s) # doctest: +SKIP
document
0 [1.5713577608669735, 1.1102230246251565e-16]
1 [-1.5713577608669729, 1.1102230246251568e-16]
dtype: object
See also
--------
`PCA on Wikipedia
<https://en.wikipedia.org/wiki/Principal_component_analysis>`_
"""
# Default n_components=2 to enable users to easily plot the results.
pca = PCA(n_components=n_components, random_state=random_state, copy=False)
if isinstance(input_matrix, pd.DataFrame):
values = input_matrix.values
else:
values = list(input_matrix)
return pd.Series(list(pca.fit_transform(values)), index=input_matrix.index)
def nmf(
input_matrix: Union[pd.Series, pd.DataFrame], n_components=2, random_state=None
) -> pd.Series:
"""
Performs non-negative matrix factorization on the given input.
Non-Negative Matrix Factorization (NMF) is often used in
natural language processing to find clusters of similar
texts (e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; see the example below).
Given a document-term matrix (so in
texthero usually a Series after applying
:meth:`texthero.representation.tfidf` or some other first
representation function that assigns a scalar (a weight) to each
word), NMF will find n_components many topics (clusters) and
calculate a vector for each document that places it correctly among
the topics.
NMF can directly handle sparse input, so when calling nmf on a
sparse DataFrame, the advantage of sparseness is kept.
Parameters
----------
input_matrix : Pandas Series (VectorSeries) or DataFrame
n_components : int, optinal, default=2
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
random_state : int, optional, default=None
Pass an int for reproducible results across multiple function calls.
Returns
-------
Pandas Series with the vector calculated by NMF for the document in
every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer",
... "Music, Violin, Orchestra", "Football, Music"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(
... hero.term_frequency
... )
>>> hero.nmf(s) # doctest: +SKIP
0 [0.9080190347553924, 0.0]
1 [0.0, 0.771931061231598]
2 [0.3725409073202516, 0.31656880119331093]
dtype: object
>>> # As we can see, the third document, which
>>> # is a mix of sports and music, is placed
>>> # between the two axes (the topics) while
>>> # the other documents are placed right on
>>> # one topic axis each.
See also
--------
`NMF on Wikipedia
<https://en.wikipedia.org/wiki/Non-negative_matrix_factorization>`_
"""
# Default n_components=2 to enable users to easily plot the results.
nmf = NMF(n_components=n_components, init="random", random_state=random_state,)
if isinstance(input_matrix, pd.DataFrame):
input_matrix_coo = input_matrix.sparse.to_coo()
input_matrix_for_vectorization = input_matrix_coo.astype("float64")
else:
input_matrix_for_vectorization = list(input_matrix)
return pd.Series(
list(nmf.fit_transform(input_matrix_for_vectorization)),
index=input_matrix.index,
)
def tsne(
input_matrix: Union[pd.Series, pd.DataFrame],
n_components=2,
perplexity=30.0,
learning_rate=200.0,
n_iter=1000,
random_state=None,
n_jobs=-1,
) -> VectorSeries:
"""
Performs t-Distributed Stochastic Neighbor Embedding on the given
input.
t-distributed Stochastic Neighbor Embedding (t-SNE) is
a machine learning algorithm used to visualize high-dimensional data
in fewer dimensions. In natural language processing, the
high-dimensional data is usually a document-term matrix (so in
texthero usually a Series after applying
:meth:`texthero.representation.tfidf` or some other first
representation function that assigns a scalar (a weight) to each word)
that is hard to visualize as there might be many terms. With t-SNE,
every document gets a new, low-dimensional (n_components entries)
vector in such a way that the differences / similarities between
documents are preserved.
T-SNE can directly handle sparse input, so when calling tsne on a
sparse DataFrame, the advantage of sparseness is kept.
Parameters
----------
input_matrix : Pandas Series (VectorSeries) or DataFrame
n_components : int, optional, default=2
Number of components to keep (dimensionality of output vectors).
If n_components is not set or None, all components are kept.
perplexity : float, optional, default=30
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significanlty
different results.
learning_rate : float, optional, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional, default=1000
Maximum number of iterations for the optimization. Should be at
least 250.
random_state : int, optional, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls.
n_jobs : int, optional, default=-1
The number of parallel jobs to run for neighbors search.
``-1`` means using all processors.
Returns
-------
Pandas Series with the vector calculated by t-SNE for the document in
every cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer",
... "Music, Violin, Orchestra", "Football, Music"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.term_frequency)
>>> hero.tsne(s, random_state=42) # doctest: +SKIP
0 [-18.833383560180664, -276.800537109375]
1 [-210.60179138183594, 143.00535583496094]
2 [-478.27984619140625, -232.97410583496094]
dtype: object
See also
--------
`t-SNE on Wikipedia <https://en.wikipedia.org/wiki/T-distributed_
stochastic_neighbor_embedding>`_
"""
# Default n_components=2 to enable users to easily plot the results.
tsne = TSNE(
n_components=n_components,
perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter,
random_state=random_state,
n_jobs=n_jobs,
)
if isinstance(input_matrix, pd.DataFrame):
input_matrix_coo = input_matrix.sparse.to_coo()
input_matrix_for_vectorization = input_matrix_coo.astype("float64")
else:
input_matrix_for_vectorization = list(input_matrix)
return pd.Series(
list(tsne.fit_transform(input_matrix_for_vectorization)),
index=input_matrix.index,
)
"""
Clustering
"""
@InputSeries([VectorSeries, DataFrame])
def kmeans(
input_matrix: Union[pd.Series, pd.DataFrame],
n_clusters=5,
n_init=10,
max_iter=300,
random_state=None,
algorithm="auto",
) -> VectorSeries:
"""
Performs K-means clustering algorithm on the given input.
K-means clustering is used in natural language processing
to separate texts into k clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the K-means algorithm uses this
to separate them into two clusters).
Given a document-term matrix (so in
texthero usually a Series after applying
:meth:`texthero.representation.tfidf` or some other first
representation function that assigns a scalar (a weight) to each
word), K-means will find k topics (clusters) and assign a topic to
each document.
Kmeans can directly handle sparse input, so when calling kmeans on a
sparse DataFrame, the advantage of sparseness is kept.
Parameters
----------
input_matrix: Pandas Series (VectorSeries) or DataFrame
n_clusters: int, optional, default=5
The number of clusters to separate the data into.
n_init : int, optional, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, optional, default=300
Maximum number of iterations of the k-means algorithm for a
single run.
random_state : int, optional, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
algorithm : {"auto", "full", "elkan"}, optional, default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient on data with well-defined
clusters, by using the triangle inequality. However it's more memory
intensive.
Returns
-------
Pandas Series with the cluster the document was assigned to in each
cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer",
... "music, violin, orchestra",
... "football, fun, sports", "music, fun, guitar"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(
... hero.term_frequency
... )
>>> hero.kmeans(s, n_clusters=2, random_state=42) # doctest: +SKIP
0 1
1 0
2 1
3 0
dtype: category
Categories (2, int64): [0, 1]
>>> # As we can see, the documents are correctly
>>> # separated into topics / clusters by the algorithm.
See also
--------
`kmeans on Wikipedia
<https://en.wikipedia.org/wiki/K-means_clustering>`_
"""
if isinstance(input_matrix, pd.DataFrame):
input_matrix_coo = input_matrix.sparse.to_coo()
input_matrix_for_vectorization = input_matrix_coo.astype("float64")
else:
input_matrix_for_vectorization = list(input_matrix)
kmeans = KMeans(
n_clusters=n_clusters,
n_init=n_init,
max_iter=max_iter,
random_state=random_state,
copy_x=True,
algorithm=algorithm,
).fit(input_matrix_for_vectorization)
return pd.Series(
kmeans.predict(input_matrix_for_vectorization), index=input_matrix.index
).astype("category")
@InputSeries([VectorSeries, DataFrame])
def dbscan(
input_matrix: Union[pd.Series, pd.DataFrame],
eps=0.5,
min_samples=5,
metric="euclidean",
metric_params=None,
leaf_size=30,
n_jobs=-1,
) -> VectorSeries:
"""
Perform DBSCAN clustering on the given input.
Density-based spatial clustering of applications with noise (DBSCAN)
is used in natural language processing
to separate texts into clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the DBSCAN algorithm uses this
to separate them into clusters). It chooses the
number of clusters on its own.
Given a document-term matrix (so in
texthero usually a Series after applying
:meth:`texthero.representation.tfidf` or some other first
representation function that assigns a scalar (a weight) to each
word), DBSCAN will find topics (clusters) and assign a topic to
each document.
DBSCAN can directly handle sparse input, so when calling dbscan on a
sparse DataFrame, the advantage of sparseness is kept.
Parameters
----------
input_matrix: Pandas Series (VectorSeries) or DataFrame
eps : float, optional, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data
set and distance function.
min_samples : int, optional, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string or callable, optional, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. Use
`sorted(sklearn.neighbors.VALID_METRICS['brute'])`
to see valid options.
metric_params : dict, optional, default=None
Additional keyword arguments for the metric function.
leaf_size : int, optional, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
n_jobs : int, optional, default=-1
The number of parallel jobs to run.
``-1`` means using all processors.
Returns
-------
Pandas Series with the cluster the document was assigned to in each
cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Football, Sports, Soccer",
... "music, violin, orchestra",
... "football, fun, sports", "music, enjoy, guitar"])
>>> s = s.pipe(hero.clean).pipe(hero.tokenize).pipe(hero.tfidf)
>>> hero.dbscan(s, min_samples=1, eps=4)
0 0
1 1
2 0
3 1
dtype: category
Categories (2, int64): [0, 1]
>>> # As we can see, the documents are correctly
>>> # separated into topics / clusters by the algorithm
>>> # and we didn't even have to say how many topics there are!
See also
--------
`DBSCAN on Wikipedia <https://en.wikipedia.org/wiki/DBSCAN>`_
"""
if isinstance(input_matrix, pd.DataFrame):
input_matrix_coo = input_matrix.sparse.to_coo()
input_matrix_for_vectorization = input_matrix_coo.astype("float64")
else:
input_matrix_for_vectorization = list(input_matrix)
return pd.Series(
DBSCAN(
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
leaf_size=leaf_size,
n_jobs=n_jobs,
).fit_predict(input_matrix_for_vectorization),
index=input_matrix.index,
).astype("category")
@InputSeries([VectorSeries, DataFrame])
def meanshift(
input_matrix: Union[pd.Series, pd.DataFrame],
bandwidth=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=-1,
max_iter=300,
) -> VectorSeries:
"""
Perform mean shift clustering on the given input.
Mean shift clustering
is used in natural language processing
to separate texts into clusters (groups)
(e.g. some texts in a corpus might be about sports
and some about music, so they will differ in the usage
of technical terms; the mean shift algorithm uses this
to separate them into clusters). It chooses the
number of clusters on its own.
Given a document-term matrix (so in
texthero usually a Series after applying
:meth:`texthero.representation.tfidf` or some other first
representation function that assigns a scalar (a weight) to each
word), mean shift will find topics (clusters) and assign a topic
to each document.
Menashift cannot directly handle sparse input, so when calling
meanshift on a sparse DataFrame, the input has to be expanded
which can lead to memory problems with big datasets.
Parameters
----------
input_matrix: Pandas Series (VectorSeries) or DataFrame
bandwidth : float, optional, default=None
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated.
Estimating takes time at least quadratic in the number of samples
(i.e. documents). For large datasets, it’s wise to set the
bandwidth to a small value.
bin_seeding : bool, optional, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will
speed up the algorithm because fewer seeds will be initialized.
min_bin_freq : int, optional, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, optional, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, optional, default=-1
The number of jobs to use for the computation.
``-1`` means using all processors
max_iter : int, optional, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged
yet.
Returns
-------
Pandas Series with the cluster the document was assigned to in each
cell.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]])
>>> hero.meanshift(s, bandwidth=2)
0 1
1 1
2 1
3 0
4 0
5 0
dtype: category
Categories (2, int64): [0, 1]
See also
--------
`Mean-Shift on Wikipedia <https://en.wikipedia.org/wiki/Mean_shift>`_
"""
if isinstance(input_matrix, pd.DataFrame):
vectors = input_matrix.values
else:
vectors = list(input_matrix)
return pd.Series(
MeanShift(
bandwidth=bandwidth,
bin_seeding=bin_seeding,
min_bin_freq=min_bin_freq,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit_predict(vectors),
index=input_matrix.index,
).astype("category")
"""
Topic modelling
"""
# TODO.
"""
Normalization.
"""
def normalize(input_matrix: Union[pd.DataFrame, pd.Series], norm="l2") -> pd.Series:
"""
Normalize every cell in a Pandas Series.
Input can be VectorSeries or DataFrames. For sparse DataFrames,
the sparseness is kept.
Parameters
----------
input_matrix: Pandas Series (VectorSeries) or DataFrame
norm: str, optional, default="l2"
One of "l1", "l2", or "max". The norm that is used.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> col = ["a","b","c", "d"]
>>> s = pd.DataFrame([[1, 2, 3, 4],[4, 2, 7, 5],[2, 2, 3, 5],[1, 2, 9, 8]],
... columns=col).astype("Sparse")
>>> hero.normalize(s, norm="max") # doctest: +SKIP
a b c d
0 0.250000 0.500000 0.75 1.000000
1 0.571429 0.285714 1.00 0.714286
2 0.400000 0.400000 0.60 1.000000
3 0.111111 0.222222 1.00 0.888889
See Also
--------
DataFrame link TODO add link to tutorial
`Norm on Wikipedia
<https://en.wikipedia.org/wiki/Norm_(mathematics)>`_
"""
isDataFrame = isinstance(input_matrix, pd.DataFrame)
if isDataFrame:
input_matrix_coo = input_matrix.sparse.to_coo()
input_matrix_for_vectorization = input_matrix_coo.astype("float64")
else:
input_matrix_for_vectorization = list(input_matrix)
result = sklearn_normalize(
input_matrix_for_vectorization, norm=norm
) # Can handle sparse input.
if isDataFrame:
return pd.DataFrame.sparse.from_spmatrix(
result, input_matrix.index, input_matrix.columns
)
else:
return pd.Series(list(result), index=input_matrix.index)
|
PhysicsTools/RecoAlgos/python/TrackFullCloneSelector_cfi.py | ckamtsikis/cmssw | 852 | 11189516 | import FWCore.ParameterSet.Config as cms
trackFullCloneSelector = cms.EDFilter("TrackFullCloneSelector",
copyExtras = cms.untracked.bool(False), ## copies also extras and rechits on RECO
src = cms.InputTag("ctfWithMaterialTracks"),
cut = cms.string('(numberOfValidHits >= 8) & (normalizedChi2 < 5)'),
# don't set this to true on AOD!
copyTrajectories = cms.untracked.bool(False)
)
|
third_party/tests/Rsd/Processor/Tools/KanataConverter/RSD_Event.py | little-blue/Surelog | 670 | 11189530 | # -*- coding: utf-8 -*-
class RSD_Event(object):
""" Parser event types """
INIT = 0
STAGE_BEGIN = 1
STAGE_END = 2
STALL_BEGIN = 3
STALL_END = 4
RETIRE = 5
FLUSH = 6
LABEL = 7
|
tests/zeus/api/resources/test_build_file_coverage.py | conrad-kronos/zeus | 221 | 11189549 | <reponame>conrad-kronos/zeus
# flake8 is breaking with no empty lines up here NOQA
def test_build_file_coverage_list(
client,
default_login,
default_repo,
default_build,
default_filecoverage,
default_repo_access,
):
resp = client.get(
"/api/repos/{}/builds/{}/file-coverage".format(
default_repo.get_full_name(), default_build.number
)
)
assert resp.status_code == 200
data = resp.json()
assert len(data) == 1
assert data[0]["filename"] == str(default_filecoverage.filename)
def test_build_file_coverage_list_filter_diff_only(
client,
default_login,
default_repo,
default_build,
default_filecoverage,
default_repo_access,
):
resp = client.get(
"/api/repos/{}/builds/{}/file-coverage?diff_only=true".format(
default_repo.get_full_name(), default_build.number
)
)
assert resp.status_code == 200
data = resp.json()
assert len(data) == 0
resp = client.get(
"/api/repos/{}/builds/{}/file-coverage?diff_only=false".format(
default_repo.get_full_name(), default_build.number
)
)
data = resp.json()
assert len(data) == 1
assert data[0]["filename"] == str(default_filecoverage.filename)
|
utils/tensor_utils_test.py | garyxcheng/federated | 330 | 11189553 | <reponame>garyxcheng/federated<gh_stars>100-1000
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from utils import tensor_utils
class TensorUtilsTest(tf.test.TestCase):
def expect_ok_graph_mode(self, structure):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
def expect_ok_eager_mode(self, structure):
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
def expect_zeros_graph_mode(self, structure, expected):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
def expect_zeros_eager_mode(self, structure, expected):
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
def test_zero_all_if_any_non_finite_graph_mode(self):
tf.config.experimental_run_functions_eagerly(False)
self.expect_ok_graph_mode([])
self.expect_ok_graph_mode([(), {}])
self.expect_ok_graph_mode(1.1)
self.expect_ok_graph_mode([1.0, 0.0])
self.expect_ok_graph_mode([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
self.expect_zeros_graph_mode(np.inf, 0.0)
self.expect_zeros_graph_mode((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
self.expect_zeros_graph_mode((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
def test_zero_all_if_any_non_finite_eager_mode(self):
tf.config.experimental_run_functions_eagerly(True)
self.expect_ok_eager_mode([])
self.expect_ok_eager_mode([(), {}])
self.expect_ok_eager_mode(1.1)
self.expect_ok_eager_mode([1.0, 0.0])
self.expect_ok_eager_mode([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
self.expect_zeros_eager_mode(np.inf, 0.0)
self.expect_zeros_eager_mode((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
self.expect_zeros_eager_mode((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
if __name__ == '__main__':
tf.test.main()
|
fexm/evalscripts/eval_seeds.py | fgsect/fexm | 105 | 11189557 | import subprocess
import sys
import matplotlib.pyplot as plt
import os
import pandas as pd
def get_file_bucket(file):
file_bucket = " ".join(str(subprocess.check_output("file {0}".format(file),
shell=True).strip()).split(":")[1].split(",")[0].strip().split(
" ")[:2])
if file_bucket[-1] == "'":
file_bucket = file_bucket[:-1]
return file_bucket
def main(seeds_dir: str):
df = pd.DataFrame([], columns=["id", "filetype", "size_bucket"])
filetypes = []
file_buckets = []
count = 0
for file_dir in os.listdir(seeds_dir):
filedir_full_path = os.path.join(seeds_dir, file_dir)
if not os.path.isdir(filedir_full_path):
continue
if not os.listdir(filedir_full_path):
continue
# for file in os.listdir(filedir_full_path):
# print("Pre-Processing file {0}".format(file))
# file_full_path = os.path.join(filedir_full_path,file)
# file_bucket = get_file_bucket(file_full_path)
# file_buckets.append(file_bucket)
filetypes.append(file_dir.split("_")[0])
plot_df = pd.DataFrame([], columns=["<1KB", "<=500KB", "<=1000KB", ">1000KB"], index=filetypes)
plot_bucket_dict = {}
for filetype in filetypes:
plot_df.loc[filetype]["<1KB"] = 0
plot_df.loc[filetype]["<=500KB"] = 0
plot_df.loc[filetype]["<=1000KB"] = 0
plot_df.loc[filetype][">1000KB"] = 0
for file_dir in os.listdir(seeds_dir):
filedir_full_path = os.path.join(seeds_dir, file_dir)
if not os.path.isdir(filedir_full_path):
continue
if not os.listdir(filedir_full_path):
continue
for file in os.listdir(filedir_full_path):
print("Processing file {0}".format(file))
file_full_path = os.path.join(filedir_full_path, file)
bucket = ""
size = os.path.getsize(file_full_path)
if size <= 1 * 1000:
bucket = "<1KB"
elif size <= 500 * 1000:
bucket = "<=500KB"
elif size <= 1000 * 1000:
bucket = "<=1000KB"
else:
bucket = ">1000KB"
df.loc[count] = [file, file_dir.split("_")[0], bucket]
count += 1
plot_df.loc[file_dir.split("_")[0]][bucket] += 1
file_bucket = get_file_bucket(file_full_path)
if not plot_bucket_dict.get(file_bucket):
plot_bucket_dict[file_bucket] = {"<1KB": 0, "<=500KB": 0, "<=1000KB": 0, ">1000KB": 0}
plot_bucket_dict[file_bucket][bucket] = 1
elif not plot_bucket_dict.get(file_bucket).get(bucket):
plot_bucket_dict[file_bucket][bucket] = 1
else:
plot_bucket_dict[file_bucket][bucket] += 1
# plot_bucket_df.loc[get_file_bucket(file_full_path)bucket] +=1
plot_bucket_df = pd.DataFrame([], columns=["<1KB", "<=500KB", "<=1000KB", ">1000KB"], index=plot_bucket_dict.keys())
for file_bucket in plot_bucket_dict.keys():
plot_bucket_df.loc[file_bucket, "<1KB"] = plot_bucket_dict[file_bucket]["<1KB"]
plot_bucket_df.loc[file_bucket, "<=500KB"] = plot_bucket_dict[file_bucket]["<=500KB"]
plot_bucket_df.loc[file_bucket, "<=1000KB"] = plot_bucket_dict[file_bucket]["<=1000KB"]
plot_bucket_df.loc[file_bucket, ">1000KB"] = plot_bucket_dict[file_bucket][">1000KB"]
plot_bucket_df.plot.barh(stacked=True, figsize=(80, 80))
plot_bucket_df.to_csv("bucket_seeds_eval.csv")
plot_df.to_csv("fileendings_seeds_eval.csv")
plt.show()
if __name__ == "__main__":
main(sys.argv[1])
|
teuthology/openstack/test/test_config.py | varshar16/teuthology | 117 | 11189564 | from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
|
playground/pythonic-doom-fire/pythonic-doom-fire/example/doom_fire_pygame.py | jpalvesl/doom-fire-algorithm | 1,227 | 11189566 | <reponame>jpalvesl/doom-fire-algorithm
from os.path import dirname, abspath, join
import pygame
import sys
doom_fire_pygame_dir = dirname(abspath(__file__))
sys.path.append(join(doom_fire_pygame_dir, '..'))
from doom_fire import DoomFire
class DoomFirePygame(DoomFire):
def render(self, ctx):
ps = self.pixel_size
for i in range(self.height):
for j in range(self.width):
pixel_index = i * self.width + j
color_intensity = self.pixels_array[pixel_index]
color = self.color_palette.get_color(color_intensity)
pixel_rect = (j * ps, i * ps, ps, ps)
pygame.draw.rect(ctx, color, pixel_rect, 0)
|
pyjobs/core/managers.py | Mdslino/PyJobs | 132 | 11189591 | from datetime import datetime, timedelta
from django.db import models
class PublicQuerySet(models.QuerySet):
def public(self):
return self.filter(public=True)
def premium(self):
return self.public().filter(premium=True)
def not_premium(self):
return self.public().filter(premium=False)
def created_in_the_last(self, days, premium=False):
if premium:
return self.filter(
premium_at__gt=datetime.today() - timedelta(days=days),
premium_at__lte=datetime.today(),
)
return self.filter(
created_at__gt=datetime.today() - timedelta(days=days),
created_at__lte=datetime.today(),
)
def search(self, term):
if not term:
return self
params = (
models.Q(title__icontains=term)
| models.Q(workplace__icontains=term)
| models.Q(description__icontains=term)
| models.Q(requirements__icontains=term)
)
return self.filter(params)
class ProfilingQuerySet(models.QuerySet):
def grade(self, skills, job_skills):
if not skills or not job_skills:
return 0
intersect = set(skills) & set(job_skills)
return (len(intersect) / len(job_skills)) * 100
|
tools/live_plot.py | francescodelduchetto/gazr | 174 | 11189610 | <filename>tools/live_plot.py
#! /usr/bin/env python
import sys
import time
from numpy import arange
import matplotlib.pyplot as plt
PLOT_WIDTH=100
plt.axis([0, PLOT_WIDTH, -90, 90])
plt.ion()
pitch = [0] * PLOT_WIDTH
yaw = [0] * PLOT_WIDTH
roll = [0] * PLOT_WIDTH
pitch_graph, yaw_graph, roll_graph = plt.plot(pitch, 'r', yaw, 'g', roll, 'b')
plt.legend([pitch_graph, yaw_graph, roll_graph], ['Pitch', 'Yaw', 'Roll'])
plt.show()
while True:
line = sys.stdin.readline()
data = eval(line)
if "face_0" in data:
pitch.append(data["face_0"]["pitch"]-180)
del pitch[0]
pitch_graph.set_data(arange(0, len(pitch)), pitch)
yaw.append(data["face_0"]["yaw"]-180)
del yaw[0]
yaw_graph.set_data(arange(0, len(yaw)), yaw)
roll.append(data["face_0"]["roll"])
del roll[0]
roll_graph.set_data(arange(0, len(roll)), roll)
plt.xlim([max(0,len(pitch) - PLOT_WIDTH), max(0,len(pitch) - PLOT_WIDTH) + PLOT_WIDTH])
plt.draw()
plt.pause(0.05)
else:
pitch.append(0)
yaw.append(0)
roll.append(0)
|
easyargs/decorators.py | carlwgeorge/easyargs | 160 | 11189662 | from . import parsers
import functools
import inspect
def make_easy_args(obj=None, auto_call=True):
def decorate(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
parser = parsers.create_base_parser(f)
if inspect.isfunction(f):
parsers.function_parser(f, parser)
else:
klass_instance = f()
parsers.class_parser(klass_instance, parser)
if auto_call:
return parsers.handle_parser(parser)
return parser
return decorated
if obj is not None:
return decorate(obj)
return decorate
|
neuralmonkey/attention/__init__.py | Simon-Will/neuralmonkey | 446 | 11189664 | from .feed_forward import Attention
from .coverage import CoverageAttention
from .scaled_dot_product import ScaledDotProdAttention
|
causallib/contrib/shared_sparsity_selection/__init__.py | liranszlak/causallib | 350 | 11189694 | from .shared_sparsity_selection import SharedSparsityConfounderSelection
|
tests/r/test_strike_nb.py | hajime9652/observations | 199 | 11189710 | <reponame>hajime9652/observations<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.strike_nb import strike_nb
def test_strike_nb():
"""Test module strike_nb.py by downloading
strike_nb.csv and testing shape of
extracted data has 108 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = strike_nb(test_path)
try:
assert x_train.shape == (108, 3)
except:
shutil.rmtree(test_path)
raise()
|
StackApp/env/lib/python2.7/site-packages/flask_api/exceptions.py | jonathanmusila/StackOverflow-Lite | 555 | 11189751 | <reponame>jonathanmusila/StackOverflow-Lite<filename>StackApp/env/lib/python2.7/site-packages/flask_api/exceptions.py
from __future__ import unicode_literals
from flask_api import status
class APIException(Exception):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
detail = ''
def __init__(self, detail=None):
if detail is not None:
self.detail = detail
def __str__(self):
return self.detail
class ParseError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
detail = 'Malformed request.'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
detail = 'Incorrect authentication credentials.'
class NotAuthenticated(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
detail = 'Authentication credentials were not provided.'
class PermissionDenied(APIException):
status_code = status.HTTP_403_FORBIDDEN
detail = 'You do not have permission to perform this action.'
class NotFound(APIException):
status_code = status.HTTP_404_NOT_FOUND
detail = 'This resource does not exist.'
# class MethodNotAllowed(APIException):
# status_code = status.HTTP_405_METHOD_NOT_ALLOWED
# detail = 'Request method "%s" not allowed.'
# def __init__(self, method, detail=None):
# self.detail = (detail or self.detail) % method
class NotAcceptable(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
detail = 'Could not satisfy the request Accept header.'
class UnsupportedMediaType(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
detail = 'Unsupported media type in the request Content-Type header.'
class Throttled(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
detail = 'Request was throttled.'
# def __init__(self, wait=None, detail=None):
# if wait is None:
# self.detail = detail or self.detail
# self.wait = None
# else:
# format = (detail or self.detail) + ' ' + self.extra_detail
# self.detail = format % (wait, wait != 1 and 's' or '')
# self.wait = math.ceil(wait)
|
rpmvenv/extensions/core.py | oleynikandrey/rpmvenv | 150 | 11189834 | """Extension which accounts for the core RPM metadata fields."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from confpy.api import Configuration
from confpy.api import Namespace
from confpy.api import StringOption
from confpy.api import ListOption
from . import interface
cfg = Configuration(
core=Namespace(
description='Common core RPM metadata fields.',
name=StringOption(
description='The name of the RPM file which is generated.',
required=True,
),
version=StringOption(
description='The RPM version to build.',
required=True,
),
release=StringOption(
description=(
'The release number for the RPM. Default is 1. '
'Supports strings to let free usage of, for example, %{?dist}.'
),
default='1',
),
summary=StringOption(
description='The short package summary.',
required=False,
),
group=StringOption(
description='The RPM package group in which this package belongs.',
required=False,
),
license=StringOption(
description='The license under which the package is distributed.',
required=False,
),
url=StringOption(
description='The URL of the package source.',
required=False,
),
source=StringOption(
description='The path to the package source.',
required=False,
),
buildroot=StringOption(
description='The name of the buildroot directory to use.',
default=(
'%(mktemp -ud %{_tmppath}/%{SOURCE0}-%{version}'
'-%{release}-XXXXXX)'
),
),
buildarch=StringOption(
description='The build architecture to use.',
required=False
),
requires=ListOption(
option=StringOption(),
default=(),
description='Dependencies',
required=False
),
conflicts=ListOption(
option=StringOption(),
default=(),
description='Conflicts',
required=False
),
obsoletes=ListOption(
option=StringOption(),
default=(),
description='Obsoletes',
required=False
),
provides=ListOption(
option=StringOption(),
default=(),
description='Virtual package',
required=False
),
),
)
class Extension(interface.Extension):
"""Common core RPM metadata fields."""
name = 'core'
description = 'Complete the common core RPM metadata fields.'
version = '1.0.0'
requirements = {}
@staticmethod
def generate(config, spec):
"""Generate the core RPM package metadata."""
name = config.core.name
version = config.core.version
release = config.core.release
summary = config.core.summary
group = config.core.group
license = config.core.license
url = config.core.url
source = config.core.source
buildroot = config.core.buildroot
buildarch = config.core.buildarch
requires = tuple(config.core.requires)
conflicts = tuple(config.core.conflicts)
obsoletes = tuple(config.core.obsoletes)
provides = tuple(config.core.provides)
spec.tags['Name'] = name
spec.tags['Version'] = version
spec.tags['Release'] = release
spec.tags['BuildRoot'] = buildroot
if requires:
spec.tags['Requires'] = ', '.join(requires)
if conflicts:
spec.tags['Conflicts'] = ', '.join(conflicts)
if obsoletes:
spec.tags['Obsoletes'] = ', '.join(obsoletes)
if provides:
spec.tags['Provides'] = ', '.join(provides)
if buildarch:
spec.tags["BuildArch"] = buildarch
if summary:
spec.tags['Summary'] = summary
if group:
spec.tags['Group'] = group
if license:
spec.tags['License'] = license
if url:
spec.tags['Url'] = url
if source:
spec.tags['Source0'] = source
spec.blocks.prep.append('rm -rf %{buildroot}/*')
spec.blocks.clean.append('rm -rf %{buildroot}')
return spec
|
topik/fileio/base_output.py | ContinuumIO/topik | 104 | 11189838 | <filename>topik/fileio/base_output.py<gh_stars>100-1000
from abc import ABCMeta, abstractmethod
from six import with_metaclass
import jsonpickle
from ._registry import registered_outputs
class OutputInterface(with_metaclass(ABCMeta)):
def __init__(self, *args, **kwargs):
super(OutputInterface, self).__init__( *args, **kwargs)
# should be an iterable with each member having (id, text)
self.corpus = None
# should be a dictionary-like structure, with string ids for tokenizer used and parameters
# passed and dictionaries mapping doc id to list of tokens
self.tokenized_corpora = None
# should be a dictionary-like structure, with string ids for vectorizer used and parameters
# passed and dictionaries mapping doc id to list of tokens
self.vectorized_corpora = None
# should be a dictionary-like structure, with string ids for model used and parameters passed
# and dictionaries mapping doc id to list of tokens
self.modeled_corpora = None
def save(self, filename, saved_data=None):
"""Persist this object to disk somehow.
You can save your data in any number of files in any format, but at a minimum, you need one json file that
describes enough to bootstrap the loading process. Namely, you must have a key called 'class' so that upon
loading the output, the correct class can be instantiated and used to load any other data. You don't have
to implement anything for saved_data, but it is stored as a key next to 'class'.
"""
with open(filename, "w") as f:
f.write(jsonpickle.encode({"class": self.__class__.__name__, "saved_data": saved_data}, f))
def synchronize(self, max_wait, field):
"""By default, operations are synchronous and no additional wait is
necessary. Data sources that are asynchronous (ElasticSearch) may
use this function to wait for "eventual consistency" """
pass
@abstractmethod
def get_filtered_data(self, field_to_get, filter=""):
raise NotImplementedError
def close(self):
pass
def load_output(filename):
with open(filename) as f:
output_details = jsonpickle.decode(f.read())
return registered_outputs[output_details['class']](**output_details["saved_data"])
|
tests/test_application.py | william-wambua/rpc.py | 152 | 11189858 | import asyncio
import json
import sys
import time
from typing import AsyncGenerator, Generator
import httpx
import pytest
from rpcpy.application import RPC, AsgiRPC, WsgiRPC
from rpcpy.serializers import SERIALIZER_NAMES, SERIALIZER_TYPES
from rpcpy.typing import TypedDict
def test_wsgirpc():
rpc = RPC()
assert isinstance(rpc, WsgiRPC)
@rpc.register
def sayhi(name: str) -> str:
return f"hi {name}"
with pytest.raises(
TypeError, match="WSGI mode can only register synchronization functions."
):
@rpc.register
async def async_sayhi(name: str) -> str:
return f"hi {name}"
@rpc.register
def sayhi_without_type_hint(name):
return f"hi {name}"
with httpx.Client(app=rpc, base_url="http://testServer/") as client:
assert client.get("/openapi-docs").status_code == 405
assert client.post("/sayhi", data={"name": "Aber"}).status_code == 415
assert client.post("/sayhi", json={"name": "Aber"}).status_code == 200
assert (
client.post("/sayhi_without_type_hint", json={"name": "Aber"})
).status_code == 200
assert client.post("/sayhi", data=json.dumps({"name": "Aber"})).status_code == 415
assert (
client.post(
"/sayhi",
content=json.dumps({"name": "Aber"}).encode("utf8"),
headers={"serializer": "application/json"},
).status_code
== 415
)
assert (
client.post(
"/sayhi",
content=json.dumps({"name": "Aber"}).encode("utf8"),
headers={"content-type": "", "serializer": "json"},
).status_code
== 200
)
@pytest.mark.asyncio
async def test_asgirpc():
rpc = RPC(mode="ASGI")
assert isinstance(rpc, AsgiRPC)
@rpc.register
async def sayhi(name: str) -> str:
return f"hi {name}"
@rpc.register
async def sayhi_without_type_hint(name):
return f"hi {name}"
with pytest.raises(
TypeError, match="ASGI mode can only register asynchronous functions."
):
@rpc.register
def sync_sayhi(name: str) -> str:
return f"hi {name}"
async with httpx.AsyncClient(app=rpc, base_url="http://testServer/") as client:
assert (await client.get("/openapi-docs")).status_code == 405
assert (await client.post("/sayhi", data={"name": "Aber"})).status_code == 415
assert (await client.post("/sayhi", json={"name": "Aber"})).status_code == 200
assert (
await client.post("/sayhi_without_type_hint", json={"name": "Aber"})
).status_code == 200
assert (
await client.post(
"/sayhi",
content=json.dumps({"name": "Aber"}).encode("utf8"),
headers={"serializer": "application/json"},
)
).status_code == 415
assert (
await client.post(
"/sayhi",
content=json.dumps({"name": "Aber"}).encode("utf8"),
headers={"content-type": "", "serializer": "json"},
)
).status_code == 200
@pytest.mark.skipif("pydantic" in sys.modules, reason="Installed pydantic")
def test_wsgi_openapi_without_pydantic():
rpc = RPC(openapi={"title": "Title", "description": "Description", "version": "v1"})
@rpc.register
def sayhi(name: str) -> str:
"""
say hi with name
"""
return f"hi {name}"
with pytest.raises(NotImplementedError):
rpc.get_openapi_docs()
@pytest.mark.skipif("pydantic" in sys.modules, reason="Installed pydantic")
@pytest.mark.asyncio
async def test_asgi_openapi_without_pydantic():
rpc = RPC(
mode="ASGI",
openapi={"title": "Title", "description": "Description", "version": "v1"},
)
@rpc.register
async def sayhi(name: str) -> str:
"""
say hi with name
"""
return f"hi {name}"
with pytest.raises(NotImplementedError):
rpc.get_openapi_docs()
@pytest.mark.skipif("pydantic" not in sys.modules, reason="Missing pydantic")
def test_wsgi_openapi():
rpc = RPC(openapi={"title": "Title", "description": "Description", "version": "v1"})
@rpc.register
def sayhi(name: str = "Aber") -> str:
"""
say hi with name
"""
return f"hi {name}"
class DNSRecord(TypedDict):
record: str
ttl: int
class DNS(TypedDict):
dns_type: str
host: str
result: DNSRecord
@rpc.register
def query_dns(dns_type: str, host: str) -> DNS:
return {"dns_type": dns_type, "host": host, "result": {"record": "", "ttl": 0}}
@rpc.register
def timestamp() -> Generator[int, None, None]:
while True:
yield int(time.time())
time.sleep(1)
assert rpc.get_openapi_docs() == OPENAPI_DOCS
with httpx.Client(app=rpc, base_url="http://testServer/") as client:
assert client.get("/openapi-docs").status_code == 200
assert client.get("/get-openapi-docs").status_code == 200
@pytest.mark.skipif("pydantic" not in sys.modules, reason="Missing pydantic")
@pytest.mark.asyncio
async def test_asgi_openapi():
rpc = RPC(
mode="ASGI",
openapi={"title": "Title", "description": "Description", "version": "v1"},
)
@rpc.register
async def sayhi(name: str = "Aber") -> str:
"""
say hi with name
"""
return f"hi {name}"
DNSRecord = TypedDict("DNSRecord", {"record": str, "ttl": int})
DNS = TypedDict("DNS", {"dns_type": str, "host": str, "result": DNSRecord})
@rpc.register
async def query_dns(dns_type: str, host: str) -> DNS:
return {"dns_type": dns_type, "host": host, "result": {"record": "", "ttl": 0}}
@rpc.register
async def timestamp() -> AsyncGenerator[int, None]:
while True:
yield int(time.time())
await asyncio.sleep(1)
assert rpc.get_openapi_docs() == OPENAPI_DOCS
async with httpx.AsyncClient(app=rpc, base_url="http://testServer/") as client:
assert (await client.get("/openapi-docs")).status_code == 200
assert (await client.get("/get-openapi-docs")).status_code == 200
DEFAULT_PARAMETERS = [
{
"name": "content-type",
"in": "header",
"description": "At least one of serializer and content-type must be used"
" so that the server can know which serializer is used to parse the data.",
"required": True,
"schema": {
"type": "string",
"enum": [serializer_type for serializer_type in SERIALIZER_TYPES],
},
},
{
"name": "serializer",
"in": "header",
"description": "At least one of serializer and content-type must be used"
" so that the server can know which serializer is used to parse the data.",
"required": True,
"schema": {
"type": "string",
"enum": [serializer_name for serializer_name in SERIALIZER_NAMES],
},
},
]
OPENAPI_DOCS = {
"openapi": "3.0.0",
"info": {"title": "Title", "description": "Description", "version": "v1"},
"paths": {
"/sayhi": {
"post": {
"summary": "say hi with name",
"parameters": DEFAULT_PARAMETERS,
"requestBody": {
"required": True,
"content": {
serializer_type: {
"schema": {
"type": "object",
"properties": {
"name": {
"default": "Aber",
"title": "Name",
"type": "string",
}
},
}
}
for serializer_type in SERIALIZER_TYPES
},
},
"responses": {
200: {
"content": {"application/json": {"schema": {"type": "string"}}},
"headers": {
"serializer": {
"schema": {
"type": "string",
"enum": ["json"],
},
"description": "Serializer Name",
}
},
}
},
}
},
"/query_dns": {
"post": {
"parameters": DEFAULT_PARAMETERS,
"requestBody": {
"required": True,
"content": {
serializer_type: {
"schema": {
"type": "object",
"properties": {
"dns_type": {
"title": "Dns Type",
"type": "string",
},
"host": {
"title": "Host",
"type": "string",
},
},
"required": ["dns_type", "host"],
}
}
for serializer_type in SERIALIZER_TYPES
},
},
"responses": {
200: {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"dns_type": {
"title": "Dns Type",
"type": "string",
},
"host": {
"title": "Host",
"type": "string",
},
"result": {"$ref": "#/definitions/DNSRecord"},
},
"required": ["dns_type", "host", "result"],
}
}
},
"headers": {
"serializer": {
"schema": {
"type": "string",
"enum": ["json"],
},
"description": "Serializer Name",
}
},
}
},
}
},
"/timestamp": {
"post": {
"parameters": DEFAULT_PARAMETERS,
"responses": {
200: {
"content": {"text/event-stream": {"schema": {"type": "integer"}}},
"headers": {
"serializer": {
"schema": {"type": "string", "enum": ["json"]},
"description": "Serializer Name",
}
},
}
},
}
},
},
"definitions": {
"DNSRecord": {
"title": "DNSRecord",
"type": "object",
"properties": {
"record": {"title": "Record", "type": "string"},
"ttl": {"title": "Ttl", "type": "integer"},
},
"required": ["record", "ttl"],
}
},
}
|
tools/bin/pythonSrc/pychecker-0.8.18/pychecker2/tests/nested.py | YangHao666666/hawq | 450 | 11189867 | <filename>tools/bin/pythonSrc/pychecker-0.8.18/pychecker2/tests/nested.py
class N1:
class N2:
x = 1
class N3:
pass
|
preprocessing/compute_distances.py | enviromachinebeast/head2head | 206 | 11189870 | import cv2
import os
import numpy as np
import argparse
import collections
import torch
import itertools
from tqdm import tqdm
from preprocessing import transform
from reconstruction import NMFCRenderer
IMG_EXTENSIONS = ['.png']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_image_paths_dict(dir):
# Returns dict: {name: [path1, path2, ...], ...}
image_files = {}
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
basename = os.path.basename(root)
for fname in fnames:
if is_image_file(fname) and basename in ['real', 'fake']:
path = os.path.join(root, fname)
seq_name = os.path.basename(root).split('_')[0]
if seq_name not in image_files:
image_files[seq_name] = [path]
else:
image_files[seq_name].append(path)
# Sort paths for each sequence
for k, v in image_files.items():
image_files[k] = sorted(v)
# Return directory sorted for keys (identity names)
return collections.OrderedDict(sorted(image_files.items()))
def paths_exist(image_pths):
return all([os.path.exists(image_path) for image_path in image_pths])
def print_args(parser, args):
message = ''
message += '----------------- Arguments ---------------\n'
for k, v in sorted(vars(args).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '-------------------------------------------'
print(message)
def l1_dist(v1, v2):
return np.abs(v1 - v2).sum()
def euler_dist(e1, e2):
d0 = abs(e1[0]-e2[0])
if d0 > 180:
d0 = 360 - d0
d1 = abs(e1[1]-e2[1])
if d1 > 180:
d1 = 360 - d1
d2 = abs(e1[2]-e2[2])
if d2 > 180:
d2 = 360 - d2
return (d0 + d1 + d2) / 3
def get_within_distances(lst):
pairs = itertools.combinations(lst, 2)
max = 0
min = np.float('inf')
avg = []
for pair in pairs:
dst = l1_dist(pair[0], pair[1])
if dst < min:
min = dst
if dst > max:
max = dst
avg.append(dst)
avg = np.mean(avg)
return min, max, avg
def compute_distance_of_average_identities(ident_list1, ident_list2):
avg_ident1, avg_ident2 = np.mean(ident_list1, axis=0), np.mean(ident_list2, axis=0)
return l1_dist(avg_ident1, avg_ident2)
def compute_average_expesion_distance(expr_list1, expr_list2):
return np.mean([l1_dist(expr1, expr2) \
for expr1, expr2 in zip(expr_list1, expr_list2)])
def compute_average_rotation_distance(cam_list1, cam_list2):
# Rotation parameters to Euler angles.
angles_list1 = [transform.matrix2angle(cam[1]) for cam in cam_list1]
angles_list2 = [transform.matrix2angle(cam[1]) for cam in cam_list2]
return np.mean([euler_dist(ang1, ang2) \
for ang1, ang2 in zip(angles_list1, angles_list2)])
def main():
print('Computation of L1 distance between average identity coeffs (DAI-L1)\n')
print('Computation of average L1 distance between expression coeffs (AED-L1)\n')
print('Computation of average L1 distance between rotation parameters (ARD-L1)\n')
parser = argparse.ArgumentParser()
parser.add_argument('--results_dir', type=str, default='results/head2head_obama/latest_epoch/videos_test/obama',
help='Path to the results directory.')
parser.add_argument('--gpu_id', type=int, default='0', help='Negative value to use CPU, or greater equal than zero for GPU id.')
args = parser.parse_args()
# Figure out the device
args.gpu_id = int(args.gpu_id)
if args.gpu_id < 0:
args.gpu_id = -1
elif torch.cuda.is_available():
if args.gpu_id >= torch.cuda.device_count():
args.gpu_id = 0
else:
print('GPU device not available. Exit.')
exit(0)
# Print Arguments
print_args(parser, args)
# Create the directory of image paths.
images_dict = get_image_paths_dict(args.results_dir)
# Make sure we have two folders, one with real and one withs fake frames.
assert 'real' in images_dict and 'fake' in images_dict and \
len(images_dict.keys()) == 2, 'Results directory has wrong structure'
# Initialize the NMFC renderer.
renderer = NMFCRenderer(args)
# Iterate through the images_dict
identities_dict = {}
expressions_dict = {}
camera_dict = {}
for name, image_pths in images_dict.items():
if paths_exist(image_pths):
success, reconstruction_output = renderer.reconstruct(image_pths)
if success:
identities_dict[name] = reconstruction_output[1]
expressions_dict[name] = reconstruction_output[2]
camera_dict[name] = reconstruction_output[0]
else:
print('Reconstruction on %s failed.' % name)
break
# If the two expression sequences have been computed, find average L1 dist.
if len(identities_dict.keys()) == 2:
# Identity
dai_L1 = compute_distance_of_average_identities(identities_dict['real'],
identities_dict['fake'])
# Distance Between Average Identities (DAI-L1)
print('(L1) distance between average identities from real and fake sequences (DAI-L1): %0.4f' % (dai_L1))
#dsts_real = get_within_distances(identities_dict['real'])
#print('Within real sequence min %0.4f, max %0.4f, mean %0.4f' % dsts_real)
#dsts_fake = get_within_distances(identities_dict['fake'])
#print('Within fake sequence min %0.4f, max %0.4f, mean %0.4f' % dsts_fake)
# Expression
aed_L1 = compute_average_expesion_distance(expressions_dict['real'],
expressions_dict['fake'])
# Average Expression Distance (AED-L1)
print('Average expression (L1) distance between real and fake sequences (AED-L1): %0.4f' % (aed_L1))
# Pose
ard_L1 = compute_average_rotation_distance(camera_dict['real'],
camera_dict['fake'])
# Average Rotation Parameters Distance (ARD-L1)
print('Average rotation (L1) distance between real and fake sequences (ARD-L1): %0.4f' % (ard_L1))
# Clean
renderer.clear()
if __name__=='__main__':
main()
|
codingame/easy/Chuck Norris.py | DazEB2/SimplePyScripts | 117 | 11189880 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
message = input()
print(message, file=sys.stderr)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
def Chuck_Norris_encode(text):
last_c = text[0]
count = 1
result = '00 ' if last_c == '0' else '0 '
for i in range(len(text))[1:]:
c = text[i]
if c != last_c:
result += '0' * count + ' '
count = 1
result += '00 ' if c == '0' else '0 '
else:
count += 1
last_c = c
result += '0' * count
return result
# Бинарная строка должна содержать 7 символов, а bin в может вернуть
# строку меньшего размера, убрав ненужные нули
text = ''.join([bin(ord(c))[2:].rjust(7, '0') for c in message])
print(Chuck_Norris_encode(text))
|
johnny/utils.py | bennylope/johnny-cache | 124 | 11189919 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extra johnny utilities."""
from johnny.cache import get_backend, local, patch, unpatch
from johnny.decorators import wraps, available_attrs
__all__ = ["celery_enable_all", "celery_task_wrapper", "johnny_task_wrapper"]
def prerun_handler(*args, **kwargs):
"""Celery pre-run handler. Enables johnny-cache."""
patch()
def postrun_handler(*args, **kwargs):
"""Celery postrun handler. Unpatches and clears the localstore."""
unpatch()
local.clear()
def celery_enable_all():
"""Enable johnny-cache in all celery tasks, clearing the local-store
after each task."""
from celery.signals import task_prerun, task_postrun, task_failure
task_prerun.connect(prerun_handler)
task_postrun.connect(postrun_handler)
# Also have to cleanup on failure.
task_failure.connect(postrun_handler)
def celery_task_wrapper(f):
"""
Provides a task wrapper for celery that sets up cache and ensures
that the local store is cleared after completion
"""
from celery.utils import fun_takes_kwargs
@wraps(f, assigned=available_attrs(f))
def newf(*args, **kwargs):
backend = get_backend()
was_patched = backend._patched
get_backend().patch()
# since this function takes all keyword arguments,
# we will pass only the ones the function below accepts,
# just as celery does
supported_keys = fun_takes_kwargs(f, kwargs)
new_kwargs = dict((key, val) for key, val in kwargs.items()
if key in supported_keys)
try:
ret = f(*args, **new_kwargs)
finally:
local.clear()
if not was_patched:
get_backend().unpatch()
return ret
return newf
# backwards compatible alias
johnny_task_wrapper = celery_task_wrapper
|
tests/ut/python/privacy/sup_privacy/test_model_train.py | hboshnak/mindarmour | 139 | 11189936 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Suppress Privacy model test.
"""
import pytest
import numpy as np
from mindspore import nn
from mindspore import context
from mindspore.train.callback import ModelCheckpoint
from mindspore.train.callback import CheckpointConfig
from mindspore.train.callback import LossMonitor
from mindspore.nn.metrics import Accuracy
import mindspore.dataset as ds
from mindarmour.privacy.sup_privacy import SuppressModel
from mindarmour.privacy.sup_privacy import SuppressMasker
from mindarmour.privacy.sup_privacy import SuppressPrivacyFactory
from mindarmour.privacy.sup_privacy import MaskLayerDes
from tests.ut.python.utils.mock_net import Net as LeNet5
def dataset_generator():
"""mock training data."""
batches = 10
batch_size = 32
data = np.random.random((batches*batch_size, 1, 32, 32)).astype(
np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
for i in range(batches):
yield data[i*batch_size:(i + 1)*batch_size],\
label[i*batch_size:(i + 1)*batch_size]
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_suppress_model_with_pynative_mode():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
networks_l5 = LeNet5()
epochs = 5
batch_num = 10
mask_times = 10
lr = 0.01
masklayers_lenet5 = []
masklayers_lenet5.append(MaskLayerDes("conv1.weight", 0, False, False, -1))
suppress_ctrl_instance = SuppressPrivacyFactory().create(networks_l5,
masklayers_lenet5,
policy="local_train",
end_epoch=epochs,
batch_num=batch_num,
start_epoch=1,
mask_times=mask_times,
lr=lr,
sparse_end=0.50,
sparse_start=0.0)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.SGD(networks_l5.trainable_params(), lr)
model_instance = SuppressModel(
network=networks_l5,
loss_fn=net_loss,
optimizer=net_opt,
metrics={"Accuracy": Accuracy()})
model_instance.link_suppress_ctrl(suppress_ctrl_instance)
suppress_masker = SuppressMasker(model=model_instance, suppress_ctrl=suppress_ctrl_instance)
config_ck = CheckpointConfig(save_checkpoint_steps=batch_num, keep_checkpoint_max=10)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet",
directory="./trained_ckpt_file/",
config=config_ck)
ds_train = ds.GeneratorDataset(dataset_generator, ['data', 'label'])
model_instance.train(epochs, ds_train, callbacks=[ckpoint_cb, LossMonitor(), suppress_masker],
dataset_sink_mode=False)
|
test/python/test_sparse.py | LinjianMa/ctf | 108 | 11190071 | <reponame>LinjianMa/ctf
#!/usr/bin/env python
import unittest
import numpy
import ctf
import os
import sys
def allclose(a, b):
return abs(ctf.to_nparray(a) - ctf.to_nparray(b)).sum() < 1e-14
class KnowValues(unittest.TestCase):
def test_einsum_hadamard(self):
n = 11
a1 = ctf.tensor((n,n,n), sp=1)
b1 = ctf.tensor((n,n,n), sp=1)
c1 = ctf.tensor((n,n,n))
a1.fill_sp_random(0., 1., 0.1)
b1.fill_sp_random(0., 1., 0.1)
c1.fill_sp_random(0., 1., 0.1)
d1 = ctf.einsum('ijk,jkl->ijkl', a1, b1)
e1 = numpy.einsum('ijk,jkl->ijkl', ctf.to_nparray(a1), ctf.to_nparray(b1))
self.assertTrue(allclose(d1,e1))
d2 = ctf.einsum('ijk,jkl->ijkl', a1, c1)
e2 = numpy.einsum('ijk,jkl->ijkl', ctf.to_nparray(a1), ctf.to_nparray(c1))
self.assertTrue(allclose(d2,e2))
def test_scaled_expression(self):
n = 5
a_sp = ctf.tensor((n,n,n), sp=1)
a_dn = ctf.tensor((n,n,n), sp=0)
a_sp.fill_sp_random(0., 1., 0.1)
a_dn += a_sp
b_sp = ctf.tensor((n,n,n), sp=1)
b_dn = ctf.tensor((n,n,n), sp=0)
b_sp.fill_sp_random(0., 1., 0.1)
b_dn += b_sp
c_sp = ctf.tensor((n,n,n), sp=1)
c_dn = ctf.tensor((n,n,n), sp=0)
c_sp.fill_sp_random(0., 1., 0.1)
c_dn += c_sp
a_np = ctf.to_nparray(a_dn)
b_np = ctf.to_nparray(b_dn)
c_np = ctf.to_nparray(c_dn)
c_sp.i("ijk") << 2.3*a_sp.i("ijl")*b_sp.i("kjl") + 7*c_sp.i("ijk") - a_sp.i("ijk") - 1. * a_sp.i("ijk") - 2 * b_sp.i("ijk")
c_dn.i("ijk") << 2.3*a_dn.i("ijl")*b_dn.i("kjl") + 7*c_dn.i("ijk") - a_dn.i("ijk") - 1. * a_dn.i("ijk") - 2 * b_dn.i("ijk")
c_np += 2.3*numpy.einsum("ijl,kjl->ijk",a_np,b_np) + 7*c_np - a_np - 1. * a_np - 2 * b_np
self.assertTrue(allclose(c_np,c_dn))
self.assertTrue(allclose(c_np,c_sp))
def test_complex(self):
a0 = numpy.arange(27.).reshape(3,3,3)
b0 = numpy.arange(27.).reshape(3,3,3)
a1 = ctf.astensor(a0)
b1 = ctf.astensor(b0)
.2*b1.i("ijk") << .7*a1.i("kij")
b0 = .2*b0 + .7*a0.transpose([1,2,0])
self.assertTrue(allclose(b0,b1))
def test_sample(self):
A = ctf.tensor((4,3,5))
nrm = A.norm2()
A.sample(.5)
nrm2 = A.norm2()
A.sample(.3)
nrm3 = A.norm2()
self.assertTrue(nrm2<=nrm)
self.assertTrue(nrm3<=nrm2)
def test_sparse_SY(self):
A = ctf.tensor((4,4),sym=[ctf.SYM.SY,ctf.SYM.NS])
AA = ctf.tensor((3,3,3),sym=[ctf.SYM.NS,ctf.SYM.SY,ctf.SYM.NS])
B = ctf.tensor((4,4,4,4),sym=[ctf.SYM.NS,ctf.SYM.NS,ctf.SYM.SY,ctf.SYM.NS])
C = ctf.tensor((4,4,4,4),sym=[ctf.SYM.SY,ctf.SYM.NS,ctf.SYM.NS,ctf.SYM.NS])
D = ctf.tensor((4,4,4,4),sym=[ctf.SYM.SY,ctf.SYM.NS,ctf.SYM.SY,ctf.SYM.NS])
E = ctf.tensor((4,4,4,4),sym=[ctf.SYM.SY,ctf.SYM.SY,ctf.SYM.SY,ctf.SYM.NS])
for X in [A,AA,B,C,D,E]:
X.fill_random(1.,1.)
Y = X.sparsify(0.)
#print("TEST")
#print(X.shape,X.sym)
#print(X)
#print("norms are",ctf.vecnorm(X),ctf.vecnorm(Y))
self.assertTrue(allclose(X,Y))
self.assertTrue(allclose(X-Y,0.))
self.assertTrue(allclose(ctf.vecnorm(X),ctf.vecnorm(Y)))
def run_tests():
numpy.random.seed(5330);
wrld = ctf.comm()
if ctf.comm().rank() != 0:
result = unittest.TextTestRunner(stream = open(os.devnull, 'w')).run(unittest.TestSuite(unittest.TestLoader().loadTestsFromTestCase(KnowValues)))
else:
print("Tests for sparse functionality")
result = unittest.TextTestRunner().run(unittest.TestSuite(unittest.TestLoader().loadTestsFromTestCase(KnowValues)))
return result
if __name__ == "__main__":
result = run_tests()
ctf.MPI_Stop()
sys.exit(not result)
|
code/doubanUtils.py | zefengdaguo/douban_crawler | 116 | 11190101 | <filename>code/doubanUtils.py
import requests
import csv, os, os.path, re
from functools import reduce
from bs4 import BeautifulSoup
from time import localtime,strftime,perf_counter,strptime
user_agent_list = ["Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36 Edg/84.0.522.63",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
]
def getAgent(n=3):
return user_agent_list[n]
def hasNextPage(soup):
try:
NextPage=soup.find(class_='next').link.get('href')
return NextPage
except:
return False
def nextPageLink(sess,soup,page,head=""):
NextPage=soup.find(class_='next').link.get('href')
req=sess.get(head + NextPage)
print(f'第{page}页:',req.status_code)
return BeautifulSoup(req.text,'html.parser')
# file name
def fn(name):
return name.replace('\\','-').replace('/','-')\
.replace(':','-').replace('*','-').replace('"','“')\
.replace('<','《').replace('>','》').replace('|','-').replace('?','?')
# page control
def pageControl(limit=50):
beg=eval(input('请输入你要爬取的起始页码(比如1):'))
end=eval(input('请输入终止页码(建议一次爬取{}页以下):'.format(limit)))
return beg, end
def timebar(scale,start,p):
a='※'*round(p*scale)
b='.'*(scale-round(p*scale))
dur=(perf_counter()-start)/60
print("\r{:^3.0f}%[{}->{}]已运行{:.2f}分钟"\
.format(p*100,a,b,dur),end=' ')
def noco(txt):
if len(txt)==0: return '...'
return txt.replace(',','、').replace(',','、').replace('\n',' ')
def getFormatTime():
return strftime("%Y-%m-%d %H-%M-%S", localtime())
def string2Time(s):
return strptime(s, '%Y-%m-%d %H-%M-%S')
def fileTimeCompare(fn1, fn2):
fn1 = fn1.replace(".csv","").split('-',1)[1][:-6]
fn2 = fn2.replace(".csv","").split('-',1)[1][:-6]
return string2Time(fn1) > string2Time(fn2)
def getLastBackUpItem(douId,Type):
# 获取上次文件
matchFiles = []
# 文件名
fnMatch = r"iiid-\d{4}-\d{2}-\d{2} \d{2}-\d{2}-\d{2}tttypeplus.csv"\
.replace('iiid',douId).replace('tttype',Type)
for _, _, files in os.walk("."):
for file in files:
# print(file)
if re.match(fnMatch,file):
matchFiles.append(file)
## 得到最新的电影名
if len(matchFiles) != 0:
latest = reduce(lambda x,y: x if fileTimeCompare(x,y) else y,\
matchFiles)
with open(latest, 'r', encoding='utf-8_sig') as f:
reader = csv.DictReader(f)
# 获取第一行电影的id
try:
row = reader.__next__()
return row['subjectId']
except:
return None
else:
return None
def getCookie(raw_cookies):
cookies={}
for line in raw_cookies.split(';'):
key,value=line.split('=',1)
cookies[key]=value
return cookies
def getYear(raw):
yearRex = r'([1|2][9|0]\d{2})'
res = re.match(yearRex,raw)
try:
return res.group(1)
except:
return ''
def getShortComments(comments):
res = ''
for com in comments:
# 先得到评价用户名
user = com.find(class_="comment-info").get_text(strip=True).replace('\xa0','').replace('\n','')
res += user
res += ':'
short = com.find(class_="short").get_text(strip=True).replace('\xa0','').replace('\n','')
res += short
res += '; | '
return res.replace("看过"," ") |
tools/Ubertooth/host/python/extcap/btle-extcap.py | Charmve/BLE-Security-Att-Def | 149 | 11190106 | <reponame>Charmve/BLE-Security-Att-Def<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2013 <NAME>
#
# This file is part of Project Ubertooth.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
import getopt
import re
import sys
from subprocess import Popen, PIPE
def main():
try:
opts, args = getopt.getopt(
sys.argv[1:], "h",
[
"help",
"list-interfaces",
"list-dlts",
"config",
"capture",
"interface=",
"fifo=",
"channel=",
])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
interface = ''
fifo = None
channel = "37"
do_list_dlts = False
do_config = False
do_capture = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o == "--list-interfaces":
list_interfaces()
exit(0)
elif o == "--list-dlts":
do_list_dlts = True
elif o == "--config":
do_config = True
elif o == "--capture":
do_capture = True
elif o == "--interface":
interface = a
elif o == "--fifo":
fifo = a
elif o == "--channel":
channel = a
else:
assert False, "unhandled option"
# every operation down here depends on having an interface
m = re.match('ubertooth(\d+)', interface)
if not m:
exit(1)
interface = m.group(1)
if do_list_dlts:
list_dlts()
elif do_config:
config()
elif do_capture:
if fifo is None:
print("Must specify fifo!")
exit(1)
capture(interface, fifo, channel)
def usage():
print("Usage: %s <--list-interfaces | --list-dlts | --config | --capture>" % sys.argv[0])
def list_interfaces():
proc = Popen(['ubertooth-util', '-s'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out = proc.communicate()[0]
lines = out.split('\n')
interfaces = []
for line in lines:
p = line.split()
if len(p) == 0 or p[0] == 'ubertooth-util':
break
if p[0] == 'Serial':
interfaces.append(p[2])
elif re.match('[0-9a-f]+', p[0]):
interfaces.append(p[0])
for i in range(len(interfaces)):
print("interface {value=ubertooth%d}{display=Ubertooth One %s}" % (i, interfaces[i]))
def list_dlts():
print("dlt {number=147}{name=USER0}{display=Bluetooth Low Energy}")
def config():
args = []
args.append((0, '--channel', 'Advertising Channel', 'selector'))
values = []
values.append((0, "37", "37", "true"))
values.append((0, "38", "38", "false"))
values.append((0, "39", "39", "false"))
for arg in args:
print("arg {number=%d}{call=%s}{display=%s}{type=%s}" % arg)
for value in values:
print("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value)
def capture(interface, fifo, channel):
p = Popen([
"ubertooth-btle", "-f",
"-U%s" % interface,
"-c", fifo,
"-A", channel,
])
p.wait()
if __name__ == "__main__":
main()
|
build/python-env/lib/python2.7/site-packages/nose/exc.py | rfraposa/hadoopbeat | 5,079 | 11190128 | """Exceptions for marking tests as skipped or deprecated.
This module exists to provide backwards compatibility with previous
versions of nose where skipped and deprecated tests were core
functionality, rather than being provided by plugins. It may be
removed in a future release.
"""
from nose.plugins.skip import SkipTest
from nose.plugins.deprecated import DeprecatedTest
|
ahmia/views_search.py | keyboardcowboy42/ahmia | 176 | 11190173 | <gh_stars>100-1000
"""
Views
Full text search views.
YaCy back-end connections.
"""
import time
import urllib2 # URL encode
import simplejson as json
import urllib3 # HTTP conncetions
from django.conf import settings # For the back-end connection settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, StreamingHttpResponse
from django.shortcuts import redirect
from django.template import Context, loader
from django.views.decorators.http import require_GET, require_http_methods
from lxml import etree # To handle the XML answers from the YaCy
import ahmia.view_help_functions as helpers # My view_help_functions.py
from ahmia.models import HiddenWebsite, HiddenWebsitePopularity
from haystack.query import SearchQuerySet
@require_http_methods(["GET", "POST"])
def proxy(request):
"""Proxy connection to """
full_url = request.get_full_path()
http = urllib3.PoolManager()
url = settings.PROXY_BASE_URL + full_url.replace("/elasticsearch/", "")
content_type = {'Content-Type':request.META.get('CONTENT_TYPE')}
response = http.request(request.method, url, headers=content_type, body=request.body)
r_type = response.getheader('content-type')
r_data = response.data
r_status = response.status
return HttpResponse(content=r_data, content_type=r_type, status=r_status)
@require_GET
def solrapi(request):
"""Solr API to search domain. Returns a list of domains."""
query = request.GET.get('q', '')
domain_list = []
# Query format is key=value
# /search/API?q=key=value
# Examples: title=alert, server_header=openresty, text=ahmia, h1=Hidden Service, h2=Hidden Service
# Query key=value must be shorter than 120 chars
if query and "=" in query and len(query) < 120 and len(query.split("=")) == 2:
query = query.split("=")
key = query[0]
value = query[1].replace(" ", "+")
http = urllib3.PoolManager()
url = settings.SOLR_ADDRESS + "/select/?q=" + key + "%3A" + value + "&fl=domain"
url = url + '&start=0&rows=200&indent=on&group.field=domain&wt=python&group=true&group.limit=100'
response = http.request('GET', url)
# If Solr answer is 200 OK then build a domain list
if response.status == 200:
obj_data = eval(response.data) # Answer string to object
groups = obj_data["grouped"]["domain"]["groups"]
for group in groups:
domains = group["doclist"]["docs"]
for domain in domains:
domain_str = domain["domain"]
if 28 < len(domain_str) < 32: # Seems to be onion domain
domain_list.append(domain_str+"\n")
domain_list = sorted(set(domain_list)) # Sort the domains
return StreamingHttpResponse(domain_list, content_type="text/plain")
@require_GET
def autocomplete(request):
"""Autocomplete function to full text Haystack based search."""
sqs = SearchQuerySet().autocomplete(text=request.GET.get('q', ''))[:5]
suggestions = [result.title for result in sqs]
# Make sure you return a JSON object, not a bare list.
# Otherwise, you could be vulnerable to an XSS attack.
the_data = json.dumps({
'results': suggestions
})
return HttpResponse(the_data, content_type='application/json')
@require_GET
def default(request):
"""The default page."""
return redirect('/search/')
@require_GET
def search_page(request):
"""The default full text search page."""
query_string = request.GET.get('q', '')
search_time = ""
if query_string:
start = time.time()
if ".onion" in request.get_host():
show_tor2web_links = False
else:
show_tor2web_links = True
search_results = query(query_string, show_tor2web_links)
end = time.time()
search_time = end - start
search_time = round(search_time, 2)
else:
search_results = ""
onions = HiddenWebsite.objects.all()
template = loader.get_template('full_text_search.html')
content = Context({'search_results': search_results,
'search_time': search_time,
'count_banned': onions.filter(banned=True, online=True).count(),
'count_online': onions.filter(banned=False, online=True).count()})
return HttpResponse(template.render(content))
@require_GET
def yacy_connection(request, query_string):
"""Direct YaCy search wrapper."""
url = request.get_full_path()
if url == "/yacysearch.html":
url = ""
http = urllib3.PoolManager()
url = settings.YACY[:-1] + url
response = http.request('GET', url)
r_type = response.getheader('content-type')
r_data = response.data
r_status = response.status
return HttpResponse(content=r_data, content_type=r_type, status=r_status)
@require_GET
def yacy_static(request, query_string):
url = request.get_full_path()
return redirect('/static/yacy'+url)
@require_GET
def find(request, query_string):
"""XSLT based search view. For special use."""
if not query_string and 's' in request.GET:
query_string = request.GET.get('s')
xml = str(get_query(query_string))
xml = xml.replace("/yacysearch.xsl", "/static/xslt/yacysearch.xsl")
xml = xml.replace("<rss", "<lol")
xml = xml.replace("</rss>", "</lol>")
return HttpResponse(xml, content_type="application/xml")
def get_query(query_string):
"""Wrapper to YaCy installation."""
#<yacy_host>/yacysearch.rss?query=<search_string>&size=<max_hits>
try:
query_string = urllib2.quote(query_string.encode("utf8"))
url = settings.YACY + "yacysearch.rss?query=" + query_string
http = urllib3.PoolManager()
response = http.request('GET', url)
data = response.data
return data
except Exception as error:
print error
def query(query_string, show_tor2web_links=True):
"""Build HTML answer from the answer of the YaCy back-end."""
try:
xml = get_query(query_string)
root = etree.fromstring(xml)
html_answer = build_html_answer(root, show_tor2web_links)
if not html_answer:
html_answer = '<li class="hs_site"><h3>No search results</h3></li>'
return html_answer
except Exception as error:
print error
return '<li class="hs_site"><h3>No search results</h3></li>'
def build_html_answer(root, show_tor2web_links):
"""Builds HTML answer from the XML."""
results = []
for element in root.iter("item"):
link = element.find("link").text or ""
# HTML escape the link (href attribute)
link = helpers.html_escape(link)
# Show link on title if there is no title
title = element.find("title").text or link
redirect_link = "/redirect?redirect_url=" + link
description = element.find("description").text or ""
pub_date = element.find("pubDate").text or ""
answer = '<h3><a href="' + link + '">' + title + '</a></h3>'
answer = answer + '<div class="infotext"><p class="links">'
answer = answer + 'Direct link: <a href="' + redirect_link + '">'
answer = answer + link + '</a></p>'
if show_tor2web_links:
tor2web_link = link.replace('.onion/', '.tor2web.fi/')
redirect_tor2web_link = "/redirect?redirect_url=" + tor2web_link
answer = answer + '<p class="links"> Access without Tor Browser: '
answer = answer + '<a href="'
answer = answer + redirect_tor2web_link + '">' + tor2web_link
answer = answer + '</a></p>'
answer = answer + description
answer = answer + '<p class="urlinfo">' + pub_date + '</p></div>'
answer = '<li class="hs_site">' + answer + '</li>'
# Calculate the place of the result
namespaces = {'yacy': 'http://www.yacy.net/'}
host = element.find("yacy:host", namespaces=namespaces).text or ""
add_result(answer, host, results)
html_answer = sort_results(results)
return html_answer
def add_result(answer, host, results):
"""Add new search result and get the stats about it."""
if host:
onion_id = host.replace(".onion", "")
tor2web, backlinks, clicks = get_popularity(onion_id)
if tor2web > 0 or backlinks > 0 or clicks > 0:
results.append(Popularity(host, answer, tor2web, backlinks, clicks))
else:
results.append(Popularity(host, answer, 1, 1, 1))
def get_popularity(onion):
"""Calculate the popularity of an onion page."""
try:
hs = HiddenWebsite.objects.get(id=onion)
except ObjectDoesNotExist:
return 1, 1, 1
if hs.banned:
return 0, 0, 0
try:
pop = HiddenWebsitePopularity.objects.get(about=hs)
clicks = pop.clicks
public_backlinks = pop.public_backlinks
tor2web = pop.tor2web
return tor2web, public_backlinks, clicks
except ObjectDoesNotExist:
return 1, 1, 1
def sort_results(p_tuples):
"""Sort the results according to stats."""
# Scaling the number of backlinks
p_by_backlinks = sorted(p_tuples, key=lambda popularity: popularity.backlinks, reverse=True)
for index, p_info in enumerate(p_by_backlinks):
p_info.backlinks = 1 / (float(index) + 1)
# Scaling the number of clicks
p_by_clicks = sorted(p_by_backlinks, key=lambda popularity: popularity.clicks, reverse=True)
for index, p_info in enumerate(p_by_clicks):
p_info.clicks = 1 / (float(index) + 1)
# Scaling the number of Tor2web
p_by_tor2web = sorted(p_by_clicks, key=lambda popularity: popularity.tor2web, reverse=True)
for index, p_info in enumerate(p_by_tor2web):
p_info.tor2web = 1 / (float(index) + 1)
p_by_sum = sorted(p_by_tor2web, key=lambda popularity: popularity.sum(), reverse=True)
html_answer = ""
for p_info in p_by_sum:
html_answer = html_answer + p_info.content
return html_answer
class Popularity(object):
"""Popularity by Tor2web visits, backlinks and clicks."""
def __init__(self, url, content, tor2web, backlinks, clicks):
self.url = url
self.content = content
self.tor2web = float(tor2web)
self.backlinks = float(backlinks)
self.clicks = float(clicks)
def func(self):
"""Print the sum function."""
print "2.0*%f + 3.0*%f + 1.0*%f" % self.tor2web, self.backlinks, self.clicks
def sum(self):
"""Calculate the popularity."""
#The model can be very simple (sum)
#What are the proper coefficients?
sum_function = 2.0*self.tor2web + 3.0*self.backlinks + 1.0*self.clicks
return sum_function
def __repr__(self):
return repr((self.url, self.tor2web, self.backlinks, self.clicks, self.sum))
|
tests/unit/core/addresscodec/test_main_test_cases.py | SubCODERS/xrpl-py | 216 | 11190207 | test_cases = [
[
"<KEY>",
None,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
1,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
14,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
11747,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
None,
"<KEY>gBttTxAZwMcuJd4xteQHyt",
"TVVrSWtmQQssgVcmoMBcFQZKKf56QscyWLKnUyiuZW8ALU4",
],
[
"<KEY>",
None,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
58,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
23480,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
11747,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
None,
"XVLhHMPHU98es4dbozjVtdWzVrDjtV5fdx1mHp98tDMoQXb",
"<KEY>",
],
[
"<KEY>",
0,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
1,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
2,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
32,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
276,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
65591,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
16781933,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
4294967294,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
4294967295,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
None,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
0,
"<KEY>",
"<KEY>",
],
[
"<KEY>",
13371337,
"<KEY>",
"<KEY>",
],
]
|
static/scripts/tools/gis_update_location_tree.py | whanderley/eden | 205 | 11190213 | #!/usr/bin/python
# This is a script to update the Location Tree in the Database
# Needs to be run in the web2py environment
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/gis_update_location_tree.py
s3db.gis_location
gis.update_location_tree()
db.commit()
|
opendeep/models/single_layer/restricted_boltzmann_machine.py | vitruvianscience/OpenDeep | 252 | 11190217 | <filename>opendeep/models/single_layer/restricted_boltzmann_machine.py
"""
This module provides the RBM. http://deeplearning.net/tutorial/rbm.html
Boltzmann Machines (BMs) are a particular form of energy-based model which
contain hidden variables. Restricted Boltzmann Machines further restrict BMs
to those without visible-visible and hidden-hidden connections.
Also see:
https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf
for optimization tips and tricks.
"""
# standard libraries
import logging
# third party libraries
import theano
import theano.tensor as T
import theano.sandbox.rng_mrg as RNG_MRG
# internal references
from opendeep.utils.decorators import inherit_docs
from opendeep.models.model import Model
from opendeep.utils.weights import get_weights, get_bias
from opendeep.utils.activation import get_activation_function, is_binary
log = logging.getLogger(__name__)
@inherit_docs
class RBM(Model):
"""
This is a probabilistic, energy-based model.
Basic binary implementation from:
http://deeplearning.net/tutorial/rnnrbm.html
and
http://deeplearning.net/tutorial/rbm.html
.. todo::
Implement non-binary support for visible and hiddens (this means changing sampling method).
"""
def __init__(self, inputs=None, hiddens=None, params=None, outdir='outputs/rbm/',
visible_activation='sigmoid', hidden_activation='sigmoid',
weights_init='uniform', weights_mean=0, weights_std=5e-3, weights_interval='glorot',
bias_init=0.0,
mrg=RNG_MRG.MRG_RandomStreams(1),
k=15):
"""
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
Parameters
----------
inputs : List of [tuple(shape, `Theano.TensorType`)]
The dimensionality of the inputs for this model, and the routing information for the model
to accept inputs from elsewhere. `inputs` variable are expected to be of the form (timesteps, batch, data).
`shape` will be a monad tuple representing known
sizes for each dimension in the `Theano.TensorType`. The length of `shape` should be equal to number of
dimensions in `Theano.TensorType`, where the shape element is an integer representing the size for its
dimension, or None if the shape isn't known. For example, if you have a matrix with unknown batch size
but fixed feature size of 784, `shape` would be: (None, 784). The full form of `inputs` would be:
[((None, 784), <TensorType(float32, matrix)>)].
hiddens : int or Tuple of (shape, `Theano.TensorType`)
Int for the number of hidden units to use, or a tuple of shape, expression to route the starting
hidden values from elsewhere.
params : Dict(string_name: theano SharedVariable), optional
A dictionary of model parameters (shared theano variables) that you should use when constructing
this model (instead of initializing your own shared variables). This parameter is useful when you want to
have two versions of the model that use the same parameters - such as siamese networks or pretraining some
weights.
outdir : str
The directory you want outputs (parameters, images, etc.) to save to. If None, nothing will
be saved.
visible_activation : str or callable
The nonlinear (or linear) visible activation to perform after the dot product from hiddens -> visible layer.
This activation function should be appropriate for the input unit types, i.e. 'sigmoid' for binary inputs.
See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
your own function to be used as long as it is callable.
hidden_activation : str or callable
The nonlinear (or linear) hidden activation to perform after the dot product from visible -> hiddens layer.
See opendeep.utils.activation for a list of available activation functions. Alternatively, you can pass
your own function to be used as long as it is callable.
weights_init : str
Determines the method for initializing model weights. See opendeep.utils.nnet for options.
weights_interval : str or float
If Uniform `weights_init`, the +- interval to use. See opendeep.utils.nnet for options.
weights_mean : float
If Gaussian `weights_init`, the mean value to use.
weights_std : float
If Gaussian `weights_init`, the standard deviation to use.
bias_init : float
The initial value to use for the bias parameter. Most often, the default of 0.0 is preferred.
mrg : random
A random number generator that is used when sampling. The RBM is a probabilistic model, so it relies a lot
on sampling. I recommend using Theano's sandbox.rng_mrg.MRG_RandomStreams.
k : int
The k number of steps used for CD-k or PCD-k with Gibbs sampling. Basically, the number of samples
generated from the model to train against reconstructing the original input.
"""
# init Model to combine the defaults and config dictionaries with the initial parameters.
initial_parameters = locals().copy()
initial_parameters.pop('self')
super(RBM, self).__init__(**initial_parameters)
##################
# specifications #
##################
if len(self.inputs) > 1:
raise NotImplementedError("Expected 1 input to RBM, found %d. Please merge inputs before passing "
"to the model!" % len(self.inputs))
# self.inputs is a list of all the input expressions (we enforce only 1, so self.inputs[0] is the input)
input_shape, self.input = self.inputs[0]
if isinstance(input_shape, int):
self.input_size = ((None,) * (self.input.ndim - 1)) + (input_shape,)
else:
self.input_size = input_shape
assert self.input_size is not None, "Need to specify the shape for the last dimension of the input!"
# our output space is the same as the input space
self.output_size = self.input_size
# grab hiddens
# have only 1 hiddens
assert len(self.hiddens) == 1, "Expected 1 `hiddens` param, found %d" % len(self.hiddens)
self.hiddens = self.hiddens[0]
if isinstance(self.hiddens, int):
hidden_size = self.hiddens
hiddens_init = None
elif isinstance(self.hiddens, tuple):
hidden_shape, hiddens_init = self.hiddens
if isinstance(hidden_shape, int):
hidden_size = hidden_shape
else:
hidden_size = hidden_shape[-1]
else:
raise AssertionError("Hiddens need to be an int or tuple of (shape, theano_expression), found %s" %
type(self.hiddens))
# other specifications
# visible activation function!
self.visible_activation_func = get_activation_function(visible_activation)
# make sure the sampling functions are appropriate for the activation functions.
if is_binary(self.visible_activation_func):
self.visible_sampling = mrg.binomial
else:
# TODO: implement non-binary activation
log.error("Non-binary visible activation not supported yet!")
raise NotImplementedError("Non-binary visible activation not supported yet!")
# hidden activation function!
self.hidden_activation_func = get_activation_function(hidden_activation)
# make sure the sampling functions are appropriate for the activation functions.
if is_binary(self.hidden_activation_func):
self.hidden_sampling = mrg.binomial
else:
# TODO: implement non-binary activation
log.error("Non-binary hidden activation not supported yet!")
raise NotImplementedError("Non-binary hidden activation not supported yet!")
####################################################
# parameters - make sure to deal with params_hook! #
####################################################
self.W = self.params.get(
"W",
get_weights(weights_init=weights_init,
shape=(self.input_size[-1], hidden_size),
name="W",
rng=mrg,
# if gaussian
mean=weights_mean,
std=weights_std,
# if uniform
interval=weights_interval)
)
self.b_v = self.params.get(
"b_v",
get_bias(shape=self.input_size[-1], name="b_v", init_values=bias_init)
)
self.b_h = self.params.get(
"b_h",
get_bias(shape=hidden_size, name="b_h", init_values=bias_init)
)
# Finally have the parameters
self.params = {"W": self.W, "b_v": self.b_v, "b_h": self.b_h}
###############
# computation #
###############
# initialize from visibles if we aren't generating from some hiddens
if hiddens_init is None:
[_, v_chain, _, h_chain], self.updates = theano.scan(fn=self._gibbs_step_vhv,
outputs_info=[None, self.input, None, None],
n_steps=k)
# initialize from hiddens
else:
[_, v_chain, _, h_chain], self.updates = theano.scan(fn=self._gibbs_step_hvh,
outputs_info=[None, None, None, hiddens_init],
n_steps=k)
self.v_sample = v_chain[-1]
self.h_sample = h_chain[-1]
mean_v, _, _, _ = self._gibbs_step_vhv(self.v_sample)
# the free-energy cost function!
# consider v_sample constant when computing gradients on the cost function
# this actually keeps v_sample from being considered in the gradient, to set gradient to 0 instead,
# use theano.gradient.zero_grad
v_sample_constant = theano.gradient.disconnected_grad(self.v_sample)
# v_sample_constant = v_sample
self.cost = (self.free_energy(self.input) - self.free_energy(v_sample_constant)) / self.input.shape[0]
log.debug("Initialized an RBM shape %s",
str((self.input_size, hidden_size)))
def _gibbs_step_vhv(self, v):
"""
Single step in the Gibbs chain computing visible -> hidden -> visible.
"""
# compute the hiddens and sample
mean_h = self.hidden_activation_func(T.dot(v, self.W) + self.b_h)
h = self.hidden_sampling(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
# compute the visibles and sample
mean_v = self.visible_activation_func(T.dot(h, self.W.T) + self.b_v)
v = self.visible_sampling(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v, mean_h, h
def _gibbs_step_hvh(self, h):
"""
Single step in the Gibbs chain computing hidden -> visible -> hidden (for generative application).
"""
# compute the visibles and sample
mean_v = self.visible_activation_func(T.dot(h, self.W.T) + self.b_v)
v = self.visible_sampling(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
# compute the hiddens and sample
mean_h = self.hidden_activation_func(T.dot(v, self.W) + self.b_h)
h = self.hidden_sampling(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
return mean_v, v, mean_h, h
def free_energy(self, v):
"""
The free-energy equation used for contrastive-divergence.
Parameters
----------
v : tensor
The theano tensor representing the visible layer input.
Returns
-------
theano expression
The free energy calculation given the input tensor.
"""
# vbias_term = -T.dot(v, self.b_v)
# hidden_term = -T.sum(
# T.log(1 + T.exp(T.dot(v, self.W) + self.b_h)),
# axis=1
# )
vbias_term = -(v * self.b_v).sum()
hidden_term = -T.log(1 + T.exp(T.dot(v, self.W) + self.b_h)).sum()
return vbias_term + hidden_term
####################
# Model functions! #
####################
def get_inputs(self):
return self.input
def get_hiddens(self):
return self.h_sample
def get_outputs(self):
return self.v_sample
def generate(self, initial=None):
log.exception("Generate not implemented yet for the RBM! Feel free to contribute :)")
raise NotImplementedError("Generate not implemented yet for the RBM! Feel free to contribute :)")
def get_loss(self):
return self.cost
def get_updates(self):
return self.updates
def get_params(self):
return self.params
|
bin/chunks.py | cwickham/merely-useful.github.io | 190 | 11190238 | #!/usr/bin/env python
'''
chunks.py [source_file...]
Check that all chunks are either naked (no language spec) or have a recognizable
language and a label. If no source files are given, reads from standard input.
'''
import sys
import re
from util import LANGUAGES, read_all_files, report
MARKER = '```'
RICH_MARKER = re.compile(r'^```\{(.+?)\s+([^=,]+)(,\s*.+=.+)*\s*}$')
def main(source_files):
'''Main driver.'''
chunks = read_all_files(source_files, get_chunks)
chunks = {c for c in chunks if bad_chunk(c)}
report('Bad Chunks', chunks)
def get_chunks(filename, reader):
'''Extract chunk headers.'''
result = set()
in_chunk = False
for (i, line) in enumerate(reader):
if line.startswith(MARKER):
marker = line.rstrip()
if in_chunk:
assert marker == MARKER, \
f'Badly-formed end of chunk {filename}:{i}'
in_chunk = False
else:
result.add((filename, i+1, marker))
in_chunk = True
return result
def bad_chunk(chunk):
'''Is this a badly formed chunk?'''
filename, line_number, marker = chunk
# Naked chunk (plain text).
if marker == MARKER:
return False
# Doesn't match pattern for rich chunk header.
match = RICH_MARKER.search(marker)
if not match:
return True
# Unknown language.
language, label = match.group(1), match.group(2)
if language not in LANGUAGES:
return True
# No reason to reject.
return False
if __name__ == '__main__':
main(sys.argv[1:])
|
mlens/parallel/tests/test_a_learner_full.py | mehrdad-shokri/mlens | 760 | 11190260 | """"ML-ENSEMBLE
Testing suite for Learner and Transformer
"""
from mlens.testing import Data, EstimatorContainer, get_learner, run_learner
def test_predict():
"""[Parallel | Learner | Full | No Proba | No Prep] test fit and predict"""
args = get_learner('predict', 'full', False, False)
run_learner(*args)
def test_transform():
"""[Parallel | Learner | Full | No Proba | No Prep] test fit and transform"""
args = get_learner('transform', 'full', False, False)
run_learner(*args)
def test_predict_prep():
"""[Parallel | Learner | Full | No Proba | Prep] test fit and predict"""
args = get_learner('predict', 'full', False, True)
run_learner(*args)
def test_transform_prep():
"""[Parallel | Learner | Full | No Proba | Prep] test fit and transform"""
args = get_learner('transform', 'full', False, True)
run_learner(*args)
def test_predict_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test fit and predict"""
args = get_learner('predict', 'full', True, False)
run_learner(*args)
def test_transform_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test fit and transform"""
args = get_learner('transform', 'full', True, False)
run_learner(*args)
def test_predict_prep_proba():
"""[Parallel | Learner | Full | Proba | No Prep] test predict"""
args = get_learner('predict', 'full', True, True)
run_learner(*args)
def test_transform_prep_proba():
"""[Parallel | Learner | Full | Proba | Prep] test transform"""
args = get_learner('transform', 'full', True, True)
run_learner(*args)
|
Python3/687.py | rakhi2001/ecom7 | 854 | 11190267 | __________________________________________________________________________________________________
sample 336 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def helper(self, node):
cur_path=0
if node.left:
n, val = self.helper(node.left)
if val==node.val:
cur_path = n+1
if self.longest_path<cur_path:
self.longest_path = cur_path
if node.right:
n, val = self.helper(node.right)
if val==node.val:
cur_path2 = n+1
if self.longest_path<(cur_path+cur_path2):
self.longest_path = (cur_path+cur_path2)
cur_path = max(cur_path, cur_path2)
return cur_path, node.val
def longestUnivaluePath(self, root: TreeNode) -> int:
self.longest_path = 0
if root:
self.helper(root)
return self.longest_path
__________________________________________________________________________________________________
sample 17088 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def longestUnivaluePath(self, root: TreeNode) -> int:
longestPath = [float('-inf')]
def longestCommonPath(root, val):
if not root:
return 0
left = right = 0
found = False
if root.val == val:
found = True
left = longestCommonPath(root.left, val)
right = longestCommonPath(root.right, val)
else:
left = longestCommonPath(root.left, root.val)
right = longestCommonPath(root.right, root.val)
if left+right+1 > longestPath[0]:
longestPath[0] = left+right+1
return max(left+1, right+1) if found else 0
if not root:
return 0
longestCommonPath(root, root.val)
return longestPath[0]-1
__________________________________________________________________________________________________
|
piwheels/monitor/sense/renderers.py | jgillis/piwheels | 120 | 11190281 | #!/usr/bin/env python
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implements the screen rendering classes for the Sense HAT monitor.
.. autoclass:: Renderer
:members:
.. autoclass:: MainRenderer
:members:
.. autoclass:: MasterRenderer
:members:
.. autoclass:: SlaveRenderer
:members:
.. autoclass:: QuitRenderer
:members:
.. autofunction:: bounce
"""
import signal
import subprocess
from datetime import datetime, timedelta, timezone
from itertools import cycle, chain
from threading import main_thread
from contextlib import contextmanager
import numpy as np
from pisense import array, draw_text
from colorzero import Color, Lightness, ease_out
from piwheels.format import format_size
from piwheels.transport import NoData
from .states import SlaveList, MasterState, SlaveState
from . import controls
UTC = timezone.utc
def bounce(it):
# bounce('ABC') -> A B C C B A A B C ...
return cycle(chain(it, reversed(it)))
class Renderer:
"""
Base class for all renderers. A renderer acts as an iterator, yielding
images (or arrays, or anything that can be passed to one of the screen
methods). Renderers also have a :meth:`move` method which is used by the
owning task to pass along joystick events to the renderer.
The base implementation provides a simple joystick-based navigation
implementation, and limits its coordinates to a specified boundary.
"""
def __init__(self):
self._limits = (0, 0, 7, 7)
self._position = (0, 0)
@property
def position(self):
return self._position
@position.setter
def position(self, value):
x, y = value
min_x, min_y, max_x, max_y = self.limits
x = max(min_x, min(max_x, x))
y = max(min_y, min(max_y, y))
self._position = x, y
@property
def limits(self):
return self._limits
@limits.setter
def limits(self, value):
if value != self._limits:
self._limits = value
# Re-calculate position for new limits
self.position = self.position
def __iter__(self):
pass
def move(self, event, task):
if event.pressed:
x, y = self.position
try:
dx, dy = {
'up': (0, -1),
'down': (0, 1),
'left': (-1, 0),
'right': (1, 0),
}[event.direction]
except KeyError:
pass
else:
self.position = x + dx, y + dy
nx, ny = self.position
return nx - x, ny - y
return (0, 0)
class HelpRenderer(Renderer):
"""
The :class:`HelpRenderer` is responsible for rendering help notes for
the graphs at the top of the main page. It consists of eight small
horizontally arranged blocks at the top of the screen. Each can be
individually selected to display a scrolling description below.
"""
def __init__(self):
super().__init__()
self.offset = None
self.text = None
self.limits = (0, 0, 7, 0)
self.position = (0, 0)
self._update_text()
@property
def position(self):
return super().position
@position.setter
def position(self, value):
with self.watch_selection():
# Fugly super-call for property setters...
super(HelpRenderer, self.__class__).position.fset(self, value)
@contextmanager
def watch_selection(self):
before = self.position
yield
after = self.position
if before != after:
self._update_text()
def _update_text(self):
label = [
'Last Seen',
'Builds Queue/Build Time',
'Disk Used',
'Swap Used',
'Mem Used',
'CPU Temperature',
'Load Avg',
'Builds Done/Clock Skew',
][self.position[0]]
self.text = array(
draw_text(label,
font='small.pil',
foreground=Color('white'),
background=Color('black'),
padding=(8, 3, 8, 0)))
self.offset = iter(cycle(range(self.text.shape[1] - 8)))
def move(self, event, task):
if event.direction == 'down':
task.renderers['main'].position = self.position[0], 3
task.switch_to(task.renderers['main'], transition='draw')
return (0, 0)
else:
return super().move(event, task)
def __iter__(self):
buf = array(Color('black'))
grad = list(Color('darkblue').gradient(Color('white'), steps=15))
pulse = iter(bounce(range(len(grad))))
while True:
offset = next(self.offset)
buf[:, :] = self.text[:, offset:offset + 8]
buf[:3, :] = Color('darkblue')
x, y = self.position
buf[:3, x] = grad[next(pulse)]
yield buf
class MainRenderer(Renderer):
"""
The :class:`MainRenderer` is responsible for rendering the main screen in
the application (the first screen shown on start).
It consists of eight small horizontally arranged bar charts at the top of
the screen. These indicate, in order: time since last ping, disk used, swap
used, memory used, SoC temperature, load average, queue size, and inverted
build rate.
The status of each slave is depicted as a single pixel below these three
rows.
"""
def __init__(self):
super().__init__()
self.slaves = SlaveList()
self.controls = None
self.connected = False
self.limits = (0, 3, 7, 7)
self.position = (0, 3)
self._make_stats(self.selected)
@staticmethod
def _slave_coords(index):
return (index // 5, 3 + index % 5)
@staticmethod
def _slave_index(x, y):
return (x * 5) + (y - 3)
@property
def selected(self):
try:
return self.slaves[self._slave_index(*self.position)]
except IndexError:
return None
@property
def position(self):
return super().position
@position.setter
def position(self, value):
with self.watch_selection():
# ... and again
super(MainRenderer, self.__class__).position.fset(self, value)
@contextmanager
def watch_selection(self):
before = self.selected
yield
after = self.selected
if before != after:
self._make_stats(after)
def _make_stats(self, state):
if isinstance(state, MasterState):
self.controls = [
controls.LastSeen(),
controls.BuildsQueue(),
controls.Disk(),
controls.Swap(),
controls.Mem(),
controls.CPUTemp(),
controls.LoadAvg(),
controls.BuildsDone(),
]
elif isinstance(state, SlaveState):
self.controls = [
controls.LastSeen(),
controls.BuildTime(),
controls.Disk(),
controls.Swap(),
controls.Mem(),
controls.CPUTemp(),
controls.LoadAvg(),
controls.ClockSkew(),
]
else:
assert state is None
self.controls = [controls.Placeholder()] * 8
self._refresh_stats(state)
def _refresh_stats(self, state):
for control in self.controls:
control.update(state)
def message(self, msg, data):
if msg in ('HELLO', 'STATS'):
slave_id = None
timestamp = datetime.now(tz=UTC)
if msg == 'HELLO':
self.connected = True
elif msg == 'SLAVE':
slave_id, timestamp, msg, data = data
with self.watch_selection():
self.slaves.message(slave_id, timestamp, msg, data)
if self.selected is not None and self.selected.slave_id == slave_id:
self._refresh_stats(self.selected)
def move(self, event, task):
if not self.connected:
return (0, 0)
elif event.direction == 'up' and self.position[1] == 3:
task.renderers['help'].position = self.position[0], 0
task.switch_to(task.renderers['help'], transition='draw')
return (0, 0)
elif event.direction == 'down' and self.position[1] == 7:
task.switch_to(task.renderers['quit'], transition='slide',
direction='up', duration=0.5)
return (0, 0)
else:
delta = super().move(event, task)
if event.direction == 'enter' and self.selected is not None:
if isinstance(self.selected, MasterState):
task.switch_to(MasterRenderer(self.selected),
transition='zoom', direction='in',
center=self.position, duration=0.5)
else:
task.switch_to(SlaveRenderer(self.selected),
transition='zoom', direction='in',
center=self.position, duration=0.5)
return delta
def _render_stats(self, buf):
for x, stat in enumerate(self.controls):
if isinstance(stat, controls.LastSeen):
# Always ensure last-seen is updated as its value can change
# regardless of message arrival (and *should* change if message
# arrival has stopped)
stat.update(self.selected)
# Scale the value to a range of 2, with an offset of 1
# to ensure that the status line is never black
value = (stat.value * 2) + 1
buf[0:3, x] = [
stat.color if y < int(value) else
stat.color * Lightness(value - int(value)) if y < value else
Color('black')
for y in range(3)
][::-1]
def _render_slaves(self, buf, pulse):
for index, slave in enumerate(self.slaves):
x, y = self._slave_coords(index)
if 0 <= x < 8 and 0 <= y < 8:
buf[y, x] = slave.color
x, y = self.position
base = Color(*buf[y, x])
grad = list(base.gradient(Color('white'), steps=15))
buf[y, x] = grad[pulse]
def __iter__(self):
waiting = array(
draw_text('Waiting for connection', padding=(8, 0, 8, 1)))
for offset in cycle(range(waiting.shape[1] - 8)):
if self.connected:
break
yield waiting[:, offset:offset + 8]
buf = array(Color('black'))
pulse = iter(bounce(range(10)))
while True:
x, y = self.position
with self.watch_selection():
self.slaves.prune()
buf[:] = Color('black')
self._render_stats(buf)
self._render_slaves(buf, next(pulse))
yield buf
class NodeRenderer(Renderer):
"""
The :class:`NodeRenderer` is used to render the full-screen status of the
master or slave nodes (when selected from the main menu). Various screens
are included, detailing the node's statistics, and providing rudimentary
control actions. This is effectively an abstract base class, with
:class:`SlaveRenderer` and :class:`MasterRenderer` filling in the
:attr:`stats` dictionary.
"""
def __init__(self, node):
super().__init__()
self.updated = datetime(1970, 1, 1, tzinfo=UTC)
self.node = node
self.text = None
self.offset = None
self.graph = None
self._mode = 'text'
self.limits = (0, 0, 7, 2)
self.position = (0, 1)
self.controls = {}
@property
def selected(self):
try:
return self.controls[self.position]
except KeyError:
return None
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
assert value in {'text', 'graph'}
if self._mode != value:
self._mode = value
{
'text': self._update_text,
'graph': self._update_graph,
}[value]()
def move(self, event, task):
if event.pressed and event.direction == 'enter':
self._run_actions(self.selected.activate(), task)
return (0, 0)
delta = super().move(event, task)
if delta != (0, 0):
if not isinstance(self.selected, controls.HistoryStat):
self._mode = 'text'
{
'text': self._update_text,
'graph': self._update_graph,
}[self.mode]()
return delta
def _run_actions(self, actions, task):
data = self.node.slave_id
if data is None:
data = NoData
for action in actions:
if action == 'SWITCH':
self.mode = {
'text': 'graph',
'graph': 'text',
}[self.mode]
elif action == 'BACK':
task.switch_to(
task.renderers['main'], transition='zoom',
direction='out', duration=0.5,
center=task.renderers['main'].position)
break
else:
task.send_control(action, data)
def _update_text(self, *, restart=True):
self.text = array(
draw_text(self.selected.label,
font='small.pil',
padding=(8, 3, 8, 0)))
if restart or self.offset is None:
last = 0
else:
# Ensure the text doesn't "skip" while we're rendering it by
# starting the offset cycle at the current position of the offset
# cycle (unless it's out of range)
last = next(self.offset)
if last >= self.text.shape[1] - 8:
last = 0
self.offset = iter(cycle(chain(
range(last, self.text.shape[1] - 8), range(last)
)))
def _update_graph(self):
self.graph = array(shape=(5, 8))
for x, stat in zip(reversed(range(8)), self.selected.history()):
if stat is None:
continue
# Scale the value to the vertical size
value = stat.value * self.graph.shape[0]
for y in range(5):
self.graph[4 - y, x] = (
stat.color if y < int(value) else
stat.color * Lightness(value - int(value)) if y < value else
Color('black')
)
def _update_stats(self):
if self.updated < self.node.last_seen:
for control in self.controls.values():
control.update(self.node)
self._update_text(restart=False)
self._update_graph()
self.updated = self.node.last_seen
def _render_stats(self, buf, pulse):
for (x, y), stat in self.controls.items():
buf[y, x] = stat.color
x, y = self.position
base = Color(*buf[y, x])
grad = list(base.gradient(Color('white'), steps=15))
buf[y, x] = grad[pulse]
def _render_text(self, buf):
offset = next(self.offset)
buf += self.text[:, offset:offset + 8]
def _render_graph(self, buf):
buf[3:8, :] += self.graph
def __iter__(self):
buf = array(Color('black'))
pulse = iter(bounce(range(10)))
render_mode = {
'text': self._render_text,
'graph': self._render_graph,
}
while True:
x, y = self.position
buf[:] = Color('black')
self._update_stats()
self._render_stats(buf, next(pulse))
render_mode[self.mode](buf)
yield buf.clip(0, 1)
class MasterRenderer(NodeRenderer):
"""
The :class:`MasterRenderer` is used to render the full-screen status of the
master node (when selected from the main menu). Various screens are
included, detailing the master's statistics, and providing rudimentary
control actions.
"""
def __init__(self, master):
super().__init__(master)
self.controls = {
(x, y): item
for y, row in enumerate([
[
controls.Pause(),
controls.Halt(),
controls.Resume(),
controls.StopSlaves(),
controls.KillSlaves(),
controls.StopMaster(),
controls.Placeholder(),
controls.Placeholder(),
],
[
controls.Activity(),
controls.Host(),
controls.Board(),
controls.Serial(),
controls.OS(),
controls.UpTime(),
controls.Placeholder(),
controls.Placeholder(),
],
[
controls.LastSeen(),
controls.BuildsQueue(),
controls.Disk(),
controls.Swap(),
controls.Mem(),
controls.CPUTemp(),
controls.LoadAvg(),
controls.BuildsDone(),
],
])
for x, item in enumerate(row)
}
def _update_stats(self):
self.controls[0, 1].update(self.node)
self.controls[0, 2].update(self.node)
super()._update_stats()
class SlaveRenderer(NodeRenderer):
"""
The :class:`SlaveRenderer` is used to render the full-screen status of
a build slave (when selected from the main menu). Various screens are
included, detailing the slave's statistics, and providing rudimentary
control actions.
"""
def __init__(self, slave):
super().__init__(slave)
self.controls = {
(x, y): item
for y, row in enumerate([
[
controls.Skip(),
controls.Pause(),
controls.Halt(),
controls.Resume(),
controls.StopSlave(),
controls.KillSlave(),
controls.Placeholder(),
controls.Placeholder(),
],
[
controls.Activity(),
controls.Host(),
controls.Board(),
controls.Serial(),
controls.OS(),
controls.UpTime(),
controls.ABI(),
controls.Placeholder(),
],
[
controls.LastSeen(),
controls.BuildTime(),
controls.Disk(),
controls.Swap(),
controls.Mem(),
controls.CPUTemp(),
controls.LoadAvg(),
controls.ClockSkew(),
],
])
for x, item in enumerate(row)
}
def _update_stats(self):
self.controls[0, 2].update(self.node)
super()._update_stats()
class QuitRenderer(Renderer):
"""
The :class:`QuitRenderer` is responsible for rendering the Quit? and
Terminate? options which are "below" the main screen.
"""
def __init__(self):
super().__init__()
self.limits = (0, 0, 2, 0)
self.text = None
self.update_text()
def move(self, event, task):
x, y = self.position
if event.direction == 'enter':
if x == 1:
subprocess.call(['sudo', '-n', 'reboot'])
elif x == 2:
subprocess.call(['sudo', '-n', 'poweroff'])
signal.pthread_kill(main_thread().ident, signal.SIGINT)
delta = super().move(event, task)
if event.direction == 'up':
task.switch_to(task.renderers['main'], transition='slide',
direction='down', duration=0.5)
elif delta != (0, 0):
self.update_text()
return delta
def update_text(self):
x, y = self.position
text = {
0: 'Quit?',
1: 'Reboot?',
2: 'Off?',
}[x]
self.text = array(
draw_text(text, foreground=Color('red'), padding=(8, 0, 8, 1)))
self.offset = iter(cycle(range(self.text.shape[1] - 8)))
def __iter__(self):
buf = array(Color('black'))
while True:
offset = next(self.offset)
yield self.text[:, offset:offset + 8]
|
test_autolens/point/test_point_source.py | Jammy2211/AutoLens | 114 | 11190309 | <reponame>Jammy2211/AutoLens
from os import path
import shutil
import os
import numpy as np
import autolens as al
def test__point_dataset_structures_as_dict():
point_dataset_0 = al.PointDataset(
name="source_1",
positions=al.Grid2DIrregular([[1.0, 1.0]]),
positions_noise_map=al.ValuesIrregular([1.0]),
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0])
assert point_dict["source_1"].name == "source_1"
assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
assert point_dict["source_1"].fluxes == None
assert point_dict["source_1"].fluxes_noise_map == None
point_dataset_1 = al.PointDataset(
name="source_2",
positions=al.Grid2DIrregular([[1.0, 1.0]]),
positions_noise_map=al.ValuesIrregular([1.0]),
fluxes=al.ValuesIrregular([2.0, 3.0]),
fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])
assert point_dict["source_1"].name == "source_1"
assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
assert point_dict["source_1"].fluxes == None
assert point_dict["source_1"].fluxes_noise_map == None
assert point_dict["source_2"].name == "source_2"
assert point_dict["source_2"].positions.in_list == [(1.0, 1.0)]
assert point_dict["source_2"].positions_noise_map.in_list == [1.0]
assert point_dict["source_2"].fluxes.in_list == [2.0, 3.0]
assert point_dict["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]
assert (point_dict.positions_list[0] == np.array([1.0, 1.0])).all()
assert (point_dict.positions_list[1] == np.array([1.0, 1.0])).all()
def test__inputs_are_other_python_types__converted_correctly():
point_dataset_0 = al.PointDataset(
name="source_1", positions=[[1.0, 1.0]], positions_noise_map=[1.0]
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0])
assert point_dict["source_1"].name == "source_1"
assert point_dict["source_1"].positions.in_list == [(1.0, 1.0)]
assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
assert point_dict["source_1"].fluxes == None
assert point_dict["source_1"].fluxes_noise_map == None
point_dataset_0 = al.PointDataset(
name="source_1",
positions=[(1.0, 1.0), (2.0, 2.0)],
positions_noise_map=[1.0],
fluxes=[2.0],
fluxes_noise_map=[3.0],
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0])
assert point_dict["source_1"].name == "source_1"
assert point_dict["source_1"].positions.in_list == [(1.0, 1.0), (2.0, 2.0)]
assert point_dict["source_1"].positions_noise_map.in_list == [1.0]
assert point_dict["source_1"].fluxes.in_list == [2.0]
assert point_dict["source_1"].fluxes_noise_map.in_list == [3.0]
def test__from_json_and_output_to_json():
point_dataset_0 = al.PointDataset(
name="source_1",
positions=al.Grid2DIrregular([[1.0, 1.0]]),
positions_noise_map=al.ValuesIrregular([1.0]),
)
point_dataset_1 = al.PointDataset(
name="source_2",
positions=al.Grid2DIrregular([[1.0, 1.0]]),
positions_noise_map=al.ValuesIrregular([1.0]),
fluxes=al.ValuesIrregular([2.0, 3.0]),
fluxes_noise_map=al.ValuesIrregular([4.0, 5.0]),
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])
dir_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "files")
if path.exists(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
file_path = path.join(dir_path, "point_dict.json")
point_dict.output_to_json(file_path=file_path, overwrite=True)
point_dict_via_json = al.PointDict.from_json(file_path=file_path)
assert point_dict_via_json["source_1"].name == "source_1"
assert point_dict_via_json["source_1"].positions.in_list == [(1.0, 1.0)]
assert point_dict_via_json["source_1"].positions_noise_map.in_list == [1.0]
assert point_dict_via_json["source_1"].fluxes == None
assert point_dict_via_json["source_1"].fluxes_noise_map == None
assert point_dict_via_json["source_2"].name == "source_2"
assert point_dict_via_json["source_2"].positions.in_list == [(1.0, 1.0)]
assert point_dict_via_json["source_2"].positions_noise_map.in_list == [1.0]
assert point_dict_via_json["source_2"].fluxes.in_list == [2.0, 3.0]
assert point_dict_via_json["source_2"].fluxes_noise_map.in_list == [4.0, 5.0]
|
alipay/aop/api/domain/AlipayCommerceOperationPoiVendingUploadModel.py | antopen/alipay-sdk-python-all | 213 | 11190336 | <filename>alipay/aop/api/domain/AlipayCommerceOperationPoiVendingUploadModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BusinessHoursDesc import BusinessHoursDesc
class AlipayCommerceOperationPoiVendingUploadModel(object):
def __init__(self):
self._address_desc = None
self._business_hours_desc = None
self._category_code = None
self._contact_number = None
self._enabled = None
self._entity_code = None
self._entity_name = None
self._ext_infos = None
self._latitude = None
self._longitude = None
self._upload_time = None
@property
def address_desc(self):
return self._address_desc
@address_desc.setter
def address_desc(self, value):
self._address_desc = value
@property
def business_hours_desc(self):
return self._business_hours_desc
@business_hours_desc.setter
def business_hours_desc(self, value):
if isinstance(value, list):
self._business_hours_desc = list()
for i in value:
if isinstance(i, BusinessHoursDesc):
self._business_hours_desc.append(i)
else:
self._business_hours_desc.append(BusinessHoursDesc.from_alipay_dict(i))
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def contact_number(self):
return self._contact_number
@contact_number.setter
def contact_number(self, value):
self._contact_number = value
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def entity_code(self):
return self._entity_code
@entity_code.setter
def entity_code(self, value):
self._entity_code = value
@property
def entity_name(self):
return self._entity_name
@entity_name.setter
def entity_name(self, value):
self._entity_name = value
@property
def ext_infos(self):
return self._ext_infos
@ext_infos.setter
def ext_infos(self, value):
self._ext_infos = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def upload_time(self):
return self._upload_time
@upload_time.setter
def upload_time(self, value):
self._upload_time = value
def to_alipay_dict(self):
params = dict()
if self.address_desc:
if hasattr(self.address_desc, 'to_alipay_dict'):
params['address_desc'] = self.address_desc.to_alipay_dict()
else:
params['address_desc'] = self.address_desc
if self.business_hours_desc:
if isinstance(self.business_hours_desc, list):
for i in range(0, len(self.business_hours_desc)):
element = self.business_hours_desc[i]
if hasattr(element, 'to_alipay_dict'):
self.business_hours_desc[i] = element.to_alipay_dict()
if hasattr(self.business_hours_desc, 'to_alipay_dict'):
params['business_hours_desc'] = self.business_hours_desc.to_alipay_dict()
else:
params['business_hours_desc'] = self.business_hours_desc
if self.category_code:
if hasattr(self.category_code, 'to_alipay_dict'):
params['category_code'] = self.category_code.to_alipay_dict()
else:
params['category_code'] = self.category_code
if self.contact_number:
if hasattr(self.contact_number, 'to_alipay_dict'):
params['contact_number'] = self.contact_number.to_alipay_dict()
else:
params['contact_number'] = self.contact_number
if self.enabled:
if hasattr(self.enabled, 'to_alipay_dict'):
params['enabled'] = self.enabled.to_alipay_dict()
else:
params['enabled'] = self.enabled
if self.entity_code:
if hasattr(self.entity_code, 'to_alipay_dict'):
params['entity_code'] = self.entity_code.to_alipay_dict()
else:
params['entity_code'] = self.entity_code
if self.entity_name:
if hasattr(self.entity_name, 'to_alipay_dict'):
params['entity_name'] = self.entity_name.to_alipay_dict()
else:
params['entity_name'] = self.entity_name
if self.ext_infos:
if hasattr(self.ext_infos, 'to_alipay_dict'):
params['ext_infos'] = self.ext_infos.to_alipay_dict()
else:
params['ext_infos'] = self.ext_infos
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.upload_time:
if hasattr(self.upload_time, 'to_alipay_dict'):
params['upload_time'] = self.upload_time.to_alipay_dict()
else:
params['upload_time'] = self.upload_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceOperationPoiVendingUploadModel()
if 'address_desc' in d:
o.address_desc = d['address_desc']
if 'business_hours_desc' in d:
o.business_hours_desc = d['business_hours_desc']
if 'category_code' in d:
o.category_code = d['category_code']
if 'contact_number' in d:
o.contact_number = d['contact_number']
if 'enabled' in d:
o.enabled = d['enabled']
if 'entity_code' in d:
o.entity_code = d['entity_code']
if 'entity_name' in d:
o.entity_name = d['entity_name']
if 'ext_infos' in d:
o.ext_infos = d['ext_infos']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'upload_time' in d:
o.upload_time = d['upload_time']
return o
|
jasmin/protocols/cli/configs.py | paradiseng/jasmin | 750 | 11190386 | """
Config file handler for 'jcli' section in jasmin.cfg
"""
import os
import logging
import binascii
from jasmin.config import ConfigFile
ROOT_PATH = os.getenv('ROOT_PATH', '/')
LOG_PATH = os.getenv('LOG_PATH', '%s/var/log/jasmin/' % ROOT_PATH)
class JCliConfig(ConfigFile):
"""Config handler for 'jcli' section"""
def __init__(self, config_file=None):
ConfigFile.__init__(self, config_file)
self.bind = self._get('jcli', 'bind', '127.0.0.1')
self.port = self._getint('jcli', 'port', 8990)
self.authentication = self._getbool('jcli', 'authentication', True)
self.admin_username = self._get('jcli', 'admin_username', 'jcliadmin')
self.admin_password = binascii.unhexlify(self._get('jcli', 'admin_password',
'<PASSWORD>'))
self.log_level = logging.getLevelName(self._get('jcli', 'log_level', 'INFO'))
self.log_file = self._get('jcli', 'log_file', '%s/jcli.log' % LOG_PATH)
self.log_rotate = self._get('jcli', 'log_rotate', 'W6')
self.log_format = self._get(
'jcli', 'log_format', '%(asctime)s %(levelname)-8s %(process)d %(message)s')
self.log_date_format = self._get('jcli', 'log_date_format', '%Y-%m-%d %H:%M:%S')
|
elliot/recommender/latent_factor_models/SVDpp/__init__.py | gategill/elliot | 175 | 11190409 | from .svdpp import SVDpp |
scripts/release_test/tests/simpixel.py | rec/leds | 253 | 11190476 | import common
FEATURES = 'browser',
PROJECTS = 'sim-strip.yml', 'sim-matrix.yml', 'sim-cube.yml', 'sim-circle.yml',
def run():
common.test_prompt('simpixel')
common.run_project(*PROJECTS, flag='-s')
|
tests/core/permissions/test_models.py | jeanmask/opps | 159 | 11190477 | <reponame>jeanmask/opps
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from opps.channels.models import Channel
from opps.core.permissions.models import Permission, PermissionGroup
User = get_user_model()
class PermissionModelTest(TestCase):
def test_create(self):
user = User.objects.create(username='user')
instance = Permission.objects.create(user=user)
self.assertTrue(instance)
def test_empty_get_by_user(self):
user = User.objects.create(username='another')
result = Permission.get_by_user(user)
self.assertEqual(len(result['sites_id']), 0)
self.assertEqual(len(result['all_sites_id']), 0)
self.assertEqual(len(result['channels_id']), 0)
self.assertEqual(len(result['channels_sites_id']), 0)
def test_get_by_user_with_user_permission(self):
user = User.objects.create(username='john_doe')
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = Permission.objects.create(user=user)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
def test_get_by_user_with_group_permission(self):
group = Group.objects.create(name='programmers')
user = User.objects.create(username='john_doe')
user.groups.add(group)
site = Site.objects.all()[0]
channel = Channel.objects.create(
name='Home',
slug='home',
site=site,
user=user
)
permission = PermissionGroup.objects.create(group=group)
permission.channel.add(channel)
permission.save()
result = Permission.get_by_user(user)
self.assertTrue(site.pk in result['all_sites_id'])
self.assertTrue(channel.pk in result['channels_id'])
|
scale/ingest/views.py | kaydoh/scale | 121 | 11190507 | """Defines the views for the RESTful ingest and Strike services"""
from __future__ import unicode_literals
import datetime
import logging
import json
import rest_framework.status as status
from rest_framework.renderers import JSONRenderer
from django.http import JsonResponse
from django.http.response import Http404
import django.utils.timezone as timezone
from rest_framework.generics import GenericAPIView, ListAPIView, ListCreateAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from job.models import Job
import util.rest as rest_util
from ingest.models import Ingest, Scan, Strike
from ingest.scan.configuration.exceptions import InvalidScanConfiguration
from ingest.scan.configuration.json.configuration_v6 import ScanConfigurationV6
from ingest.serializers import (IngestDetailsSerializerV6, IngestSerializerV6,
IngestStatusSerializerV6,
ScanSerializerV6, ScanDetailsSerializerV6,
StrikeSerializerV6, StrikeDetailsSerializerV6)
from ingest.strike.configuration.exceptions import InvalidStrikeConfiguration
from ingest.strike.configuration.json.configuration_v6 import StrikeConfigurationV6
#from queue.models import Queue
from queue.messages.requeue_jobs import create_requeue_jobs_messages, QueuedJob
from messaging.manager import CommandMessageManager
from util.rest import BadParameter
from util.rest import title_to_name
logger = logging.getLogger(__name__)
class IngestsView(ListAPIView):
"""This view is the endpoint for retrieving the list of all ingests."""
queryset = Ingest.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API"""
if self.request.version == 'v6':
return IngestSerializerV6
elif self.request.version == 'v7':
return IngestSerializerV6
def list(self, request):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.list_impl(request)
elif request.version == 'v7':
return self.list_impl(request)
raise Http404()
def list_impl(self, request):
"""Retrieves the list of all ingests and returns it in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
started = rest_util.parse_timestamp(request, 'started', required=False)
ended = rest_util.parse_timestamp(request, 'ended', required=False)
rest_util.check_time_range(started, ended)
ingest_statuses = rest_util.parse_string_list(request, 'status', required=False)
strike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)
scan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)
file_name = rest_util.parse_string(request, 'file_name', required=False)
order = rest_util.parse_string_list(request, 'order', required=False)
ingests = Ingest.objects.get_ingests(started=started, ended=ended,
statuses=ingest_statuses,
scan_ids=scan_ids,
strike_ids=strike_ids,
file_name=file_name,
order=order)
page = self.paginate_queryset(ingests)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
class IngestDetailsView(RetrieveAPIView):
"""This view is the endpoint for retrieving/updating details of an ingest."""
queryset = Ingest.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API"""
if self.request.version == 'v6':
return IngestDetailsSerializerV6
elif self.request.version == 'v7':
return IngestDetailsSerializerV6
def retrieve(self, request, ingest_id=None, file_name=None):
"""Determine api version and call specific method
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param ingest_id: The id of the ingest
:type ingest_id: int encoded as a str
:param file_name: The name of the ingest
:type file_name: string
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6' or request.version == 'v7':
return self.retrieve_v6(request, ingest_id)
raise Http404()
def retrieve_v6(self, request, ingest_id):
"""Retrieves the details for an ingest and return them in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param ingest_id: The id of the ingest
:type ingest_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
try:
is_staff = False
if request.user:
is_staff = request.user.is_staff
ingest = Ingest.objects.get_details(ingest_id, is_staff)
except Ingest.DoesNotExist:
raise Http404
serializer = self.get_serializer(ingest)
return Response(serializer.data)
class IngestsStatusView(ListAPIView):
"""This view is the endpoint for retrieving summarized ingest status."""
queryset = Ingest.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API"""
if self.request.version == 'v6':
return IngestStatusSerializerV6
elif self.request.version == 'v7':
return IngestStatusSerializerV6
def list(self, request):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6' or request.version == 'v7':
return self.list_impl(request)
raise Http404()
def list_impl(self, request):
"""Retrieves the ingest status information and returns it in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
started = rest_util.parse_timestamp(request, 'started', required=False)
ended = rest_util.parse_timestamp(request, 'ended', required=False)
rest_util.check_time_range(started, ended, max_duration=datetime.timedelta(days=31))
use_ingest_time = rest_util.parse_bool(request, 'use_ingest_time', default_value=False)
ingests = Ingest.objects.get_status(started, ended, use_ingest_time)
page = self.paginate_queryset(ingests)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
class ScansProcessView(GenericAPIView):
"""This view is the endpoint for launching a scan execution to ingest"""
queryset = Scan.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API. """
if self.request.version == 'v6':
return ScanDetailsSerializerV6
elif self.request.version == 'v7':
return ScanDetailsSerializerV6
def post(self, request, scan_id=None):
"""Launches a scan to ingest from an existing scan model instance
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:param scan_id: ID for Scan record to pull configuration from
:type scan_id: int
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._post_v6(request, scan_id)
elif request.version == 'v7':
return self._post_v6(request, scan_id)
raise Http404()
def _post_v6(self, request, scan_id=None):
"""Launches a scan to ingest from an existing scan model instance
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:param scan_id: ID for Scan record to pull configuration from
:type scan_id: int
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
ingest = rest_util.parse_bool(request, 'ingest', default_value=False)
try:
scan = Scan.objects.queue_scan(scan_id, dry_run=not ingest)
except Scan.DoesNotExist:
raise Http404
serializer = self.get_serializer(scan)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class ScansView(ListCreateAPIView):
"""This view is the endpoint for retrieving the list of all Scan process."""
queryset = Scan.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API. """
if self.request.version == 'v6':
return ScanSerializerV6
elif self.request.version == 'v7':
return ScanSerializerV6
def list(self, request):
"""Retrieves the list of all Scan process and returns it in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._list_v6(request)
elif request.version == 'v7':
return self._list_v6(request)
raise Http404()
def _list_v6(self, request):
"""Retrieves the list of all Scan process and returns it in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
started = rest_util.parse_timestamp(request, 'started', required=False)
ended = rest_util.parse_timestamp(request, 'ended', required=False)
rest_util.check_time_range(started, ended)
names = rest_util.parse_string_list(request, 'name', required=False)
order = rest_util.parse_string_list(request, 'order', required=False)
scans = Scan.objects.get_scans(started, ended, names, order)
page = self.paginate_queryset(scans)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def create(self, request):
"""Creates a new Scan process and returns a link to the detail URL
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._create_v6(request)
elif request.version == 'v7':
return self._create_v6(request)
raise Http404()
def _create_v6(self, request):
"""Creates a new Scan process and returns a link to the detail URL
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
title = rest_util.parse_string(request, 'title', required=True)
name = title_to_name(self.queryset, title)
description = rest_util.parse_string(request, 'description', required=False)
configuration = rest_util.parse_dict(request, 'configuration')
try:
config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()
except InvalidScanConfiguration as ex:
raise BadParameter('Scan configuration invalid: %s' % unicode(ex))
try:
scan = Scan.objects.create_scan(name, title, description, config)
except InvalidScanConfiguration as ex:
raise BadParameter('Scan configuration invalid: %s' % unicode(ex))
serializer = ScanDetailsSerializerV6(scan)
scan_url = reverse('scans_details_view', args=[scan.id], request=request)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=scan_url))
class CancelScansView(GenericAPIView):
"""This view is the endpoint for canceling a scan in progress."""
queryset = Scan.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API. """
if self.request.version == 'v6':
return ScanSerializerV6
elif self.request.version == 'v7':
return ScanSerializerV6
def post(self, request, scan_id):
try:
if self.request.version == 'v6' or self.request.version == 'v7':
return self._cancel_v6(request, scan_id)
else:
raise Http404
except Scan.DoesNotExist:
raise Http404
def _cancel_v6(self, request, scan_id):
"""Cancels a scan job
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:param scan_id: The ID of the Scan process
:type scan_id: int encoded as a str
:returns: The HTTP response to send back to the user
:rtype: :class:`rest_framework.response.Response`
"""
canceled_ids = Scan.objects.cancel_scan(scan_id)
resp_dict = {'id': scan_id, 'canceled_jobs': canceled_ids}
return JsonResponse(resp_dict, status=status.HTTP_202_ACCEPTED)
# return Response(resp_dict, status=status.HTTP_202_ACCEPTED)
# return Response(JSONRenderer().render(canceled_ids), status=status.HTTP_202_ACCEPTED)
class ScansDetailsView(GenericAPIView):
"""This view is the endpoint for retrieving/updating details of a Scan process."""
queryset = Scan.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API. """
if self.request.version == 'v6':
return ScanDetailsSerializerV6
elif self.request.version == 'v7':
return ScanDetailsSerializerV6
def get(self, request, scan_id):
"""Retrieves the details for a Scan process and return them in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param scan_id: The ID of the Scan process
:type scan_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._get_v6(request, scan_id)
elif request.version == 'v7':
return self._get_v6(request, scan_id)
raise Http404()
def _get_v6(self, request, scan_id):
"""Retrieves the details for a Scan process and return them in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param scan_id: The ID of the Scan process
:type scan_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
try:
scan = Scan.objects.get_details(scan_id)
except Scan.DoesNotExist:
raise Http404
serializer = self.get_serializer(scan)
return Response(serializer.data)
def patch(self, request, scan_id):
"""Edits an existing Scan process and returns the updated details
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param scan_id: The ID of the Scan process
:type scan_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._patch_v6(request, scan_id)
elif request.version == 'v7':
return self._patch_v6(request, scan_id)
raise Http404()
def _patch_v6(self, request, scan_id):
"""Edits an existing Scan process and returns the updated details
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param scan_id: The ID of the Scan process
:type scan_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
title = rest_util.parse_string(request, 'title', required=False)
description = rest_util.parse_string(request, 'description', required=False)
configuration = rest_util.parse_dict(request, 'configuration', required=False)
config = None
try:
if configuration:
config = ScanConfigurationV6(configuration, do_validate=True).get_configuration()
except InvalidScanConfiguration as ex:
raise BadParameter('Scan configuration invalid: %s' % unicode(ex))
try:
Scan.objects.edit_scan(scan_id, title, description, config)
except Scan.DoesNotExist:
raise Http404
except InvalidScanConfiguration as ex:
logger.exception('Unable to edit Scan process: %s', scan_id)
raise BadParameter(unicode(ex))
return Response(status=status.HTTP_204_NO_CONTENT)
class ScansValidationView(APIView):
"""This view is the endpoint for validating a new Scan process before attempting to actually create it"""
queryset = Scan.objects.all()
def post(self, request):
"""Validates a new Scan process and returns any warnings discovered
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self._post_v6(request)
elif request.version == 'v7':
return self._post_v6(request)
raise Http404()
def _post_v6(self, request):
"""Validates a new Scan process and returns any warnings discovered
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
configuration = rest_util.parse_dict(request, 'configuration')
# Validate the Scan configuration
validation = Scan.objects.validate_scan_v6(configuration=configuration)
resp_dict = {'is_valid': validation.is_valid, 'errors': [e.to_dict() for e in validation.errors],
'warnings': [w.to_dict() for w in validation.warnings]}
return Response(resp_dict)
class StrikesView(ListCreateAPIView):
"""This view is the endpoint for retrieving the list of all Strike process."""
queryset = Strike.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API"""
if self.request.version == 'v6':
return StrikeSerializerV6
elif self.request.version == 'v7':
return StrikeSerializerV6
def list(self, request):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.list_impl(request)
elif request.version == 'v7':
return self.list_impl(request)
raise Http404()
def list_impl(self, request):
"""Retrieves the list of all Strike process and returns it in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
started = rest_util.parse_timestamp(request, 'started', required=False)
ended = rest_util.parse_timestamp(request, 'ended', required=False)
rest_util.check_time_range(started, ended)
names = rest_util.parse_string_list(request, 'name', required=False)
order = rest_util.parse_string_list(request, 'order', required=False)
strikes = Strike.objects.get_strikes(started, ended, names, order)
page = self.paginate_queryset(strikes)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def create(self, request):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.create_impl_v6(request)
elif request.version == 'v7':
return self.create_impl_v6(request)
raise Http404()
def create_impl_v6(self, request):
"""Creates a new Strike process and returns a link to the detail URL
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
title = rest_util.parse_string(request, 'title', required=True)
name = title_to_name(self.queryset, title)
description = rest_util.parse_string(request, 'description', required=False)
configuration = rest_util.parse_dict(request, 'configuration')
config = None
try:
if configuration:
config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()
except InvalidStrikeConfiguration as ex:
raise BadParameter('Strike configuration invalid: %s' % unicode(ex))
try:
strike = Strike.objects.create_strike(name, title, description, config)
except InvalidStrikeConfiguration as ex:
raise BadParameter('Strike configuration invalid: %s' % unicode(ex))
# Fetch the full strike process with details
try:
strike = Strike.objects.get_details(strike.id)
except Strike.DoesNotExist:
raise Http404
serializer = StrikeDetailsSerializerV6(strike)
strike_url = reverse('strike_details_view', args=[strike.id], request=request)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=dict(location=strike_url))
class StrikeDetailsView(GenericAPIView):
"""This view is the endpoint for retrieving/updating details of a Strike process."""
queryset = Strike.objects.all()
def get_serializer_class(self):
"""Returns the appropriate serializer based off the requests version of the REST API"""
if self.request.version == 'v6':
return StrikeDetailsSerializerV6
elif self.request.version == 'v7':
return StrikeDetailsSerializerV6
def get(self, request, strike_id):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:param strike_id: The ID of the Strike process
:type strike_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.get_impl(request, strike_id)
elif request.version == 'v7':
return self.get_impl(request, strike_id)
raise Http404()
def get_impl(self, request, strike_id):
"""Retrieves the details for a Strike process and return them in JSON form
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param strike_id: The ID of the Strike process
:type strike_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
try:
is_staff = False
if request.user:
is_staff = request.user.is_staff
strike = Strike.objects.get_details(strike_id, is_staff)
except Strike.DoesNotExist:
raise Http404
serializer = self.get_serializer(strike)
return Response(serializer.data)
def patch(self, request, strike_id):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:param strike_id: The ID of the Strike process
:type strike_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.patch_impl_v6(request, strike_id)
elif request.version == 'v7':
return self.patch_impl_v6(request, strike_id)
raise Http404()
def patch_impl_v6(self, request, strike_id):
"""Edits an existing Strike process and returns the updated details
:param request: the HTTP GET request
:type request: :class:`rest_framework.request.Request`
:param strike_id: The ID of the Strike process
:type strike_id: int encoded as a str
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
title = rest_util.parse_string(request, 'title', required=False)
description = rest_util.parse_string(request, 'description', required=False)
configuration = rest_util.parse_dict(request, 'configuration', required=False)
config = None
try:
if configuration:
config = StrikeConfigurationV6(configuration, do_validate=True).get_configuration()
except InvalidStrikeConfiguration as ex:
raise BadParameter('Strike configuration invalid: %s' % unicode(ex))
try:
# must collect old config before editing strike
if config:
new_config = config.get_dict()
old_config = Strike.objects.get_details(strike_id)
Strike.objects.edit_strike(strike_id, title, description, config)
# if workspace has changed strike job must be restarted for changes to take effect
if config and old_config.configuration["workspace"] != new_config["workspace"]:
strike_job = old_config.job
Job.objects.update_jobs_to_canceled([strike_job], timezone.now())
requeue_jobs = []
requeue_jobs.append(QueuedJob(strike_job.id, strike_job.num_exes))
msg = create_requeue_jobs_messages(requeue_jobs)
CommandMessageManager().send_messages(msg)
except Strike.DoesNotExist:
raise Http404
except InvalidStrikeConfiguration as ex:
logger.exception('Unable to edit Strike process: %s', strike_id)
raise BadParameter(unicode(ex))
return Response(status=status.HTTP_204_NO_CONTENT)
class StrikesValidationView(APIView):
"""This view is the endpoint for validating a new Strike process before attempting to actually create it"""
queryset = Strike.objects.all()
def post(self, request):
"""Determine api version and call specific method
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
if request.version == 'v6':
return self.post_impl_v6(request)
elif request.version == 'v7':
return self.post_impl_v6(request)
raise Http404()
def post_impl_v6(self, request):
"""Validates a new Strike process and returns any warnings discovered
:param request: the HTTP POST request
:type request: :class:`rest_framework.request.Request`
:rtype: :class:`rest_framework.response.Response`
:returns: the HTTP response to send back to the user
"""
configuration = rest_util.parse_dict(request, 'configuration')
# Validate the Strike configuration
validation = Strike.objects.validate_strike_v6(configuration=configuration)
resp_dict = {'is_valid': validation.is_valid, 'errors': [e.to_dict() for e in validation.errors],
'warnings': [w.to_dict() for w in validation.warnings]}
return Response(resp_dict)
|
minihack/scripts/download_boxoban_levels.py | samvelyan/minihack-1 | 217 | 11190509 | <reponame>samvelyan/minihack-1
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import zipfile
import pkg_resources
DESTINATION_PATH = pkg_resources.resource_filename("minihack", "dat")
BOXOBAN_REPO_URL = (
"https://github.com/deepmind/boxoban-levels/archive/refs/heads/master.zip"
)
def download_boxoban_levels():
print("Downloading Boxoban levels...")
os.system(
f"wget -c --read-timeout=5 --tries=0 "
f'"{BOXOBAN_REPO_URL}" -P {DESTINATION_PATH}'
)
print("Boxoban levels downloaded, unpacking...")
zip_file = os.path.join(DESTINATION_PATH, "master.zip")
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(DESTINATION_PATH)
os.remove(zip_file)
if __name__ == "__main__":
download_boxoban_levels()
|
the_archive/archived_rapids_blog_notebooks/azureml/rapids/init_dask.py | ssayyah/notebooks-contrib | 155 | 11190526 | import sys
import argparse
import time
import threading
import subprocess
import socket
from mpi4py import MPI
from azureml.core import Run
from notebook.notebookapp import list_running_servers
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == '' and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
if __name__ == '__main__':
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
ip = socket.gethostbyname(socket.gethostname())
parser = argparse.ArgumentParser()
parser.add_argument("--datastore")
parser.add_argument("--n_gpus_per_node")
parser.add_argument("--jupyter_token")
args = parser.parse_args()
n_gpus_per_node = eval(args.n_gpus_per_node)
print("number of GPUs per node:", n_gpus_per_node)
print("- my rank is ", rank)
print("- my ip is ", ip)
if rank == 0:
cluster = {
"scheduler" : ip + ":8786",
"dashboard" : ip + ":8787"
}
scheduler = cluster["scheduler"]
dashboard = cluster["dashboard"]
else:
cluster = None
cluster = comm.bcast(cluster, root=0)
scheduler = cluster["scheduler"]
dashboard = cluster["dashboard"]
if rank == 0:
Run.get_context().log("headnode", ip)
Run.get_context().log("cluster",
"scheduler: {scheduler}, dashboard: {dashboard}".format(scheduler=cluster["scheduler"],
dashboard=cluster["dashboard"]))
Run.get_context().log("datastore", args.datastore)
cmd = ("jupyter lab --ip 0.0.0.0 --port 8888" + \
" --NotebookApp.token={token}" + \
" --allow-root --no-browser").format(token=args.jupyter_token)
jupyter_log = open("jupyter_log.txt", "a")
jupyter_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
jupyter_flush = threading.Thread(target=flush, args=(jupyter_proc, jupyter_log))
jupyter_flush.start()
while not list(list_running_servers()):
time.sleep(5)
jupyter_servers = list(list_running_servers())
assert (len(jupyter_servers) == 1), "more than one jupyter server is running"
Run.get_context().log("jupyter",
"ip: {ip_addr}, port: {port}".format(ip_addr=ip, port=jupyter_servers[0]["port"]))
Run.get_context().log("jupyter-token", jupyter_servers[0]["token"])
cmd = "dask-scheduler " + "--port " + scheduler.split(":")[1] + " --dashboard-address " + dashboard
scheduler_log = open("scheduler_log.txt", "w")
scheduler_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
os.environ["CUDA_VISIBLE_DEVICES"] = str(list(range(n_gpus_per_node))).strip("[]")
cmd = "dask-cuda-worker " + scheduler + " --memory-limit 0"
worker_log = open("worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
worker_flush = threading.Thread(target=flush, args=(worker_proc, worker_log))
worker_flush.start()
flush(scheduler_proc, scheduler_log)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(list(range(n_gpus_per_node))).strip("[]")
cmd = "dask-cuda-worker " + scheduler + " --memory-limit 0"
worker_log = open("worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flush(worker_proc, worker_log)
|
tests/data/photos_data.py | nicx/icloud-drive-docker | 196 | 11190590 | DATA = {
"query?remapEnums=True&getCurrentSyncToken=True": [
{
"data": {
"query": {"recordType": "CheckIndexingState"},
"zoneID": {"zoneName": "PrimarySync"},
},
"response": {
"records": [
{
"recordName": "_5c82ba39-fa99-4f36-ad2a-1a87028f8fa4",
"recordType": "CheckIndexingState",
"fields": {
"progress": {"value": 100, "type": "INT64"},
"state": {"value": "FINISHED", "type": "STRING"},
},
"pluginFields": {},
"recordChangeTag": "0",
"created": {
"timestamp": 1629754179814,
"userRecordName": "_10",
"deviceID": "1",
},
"modified": {
"timestamp": 1629754179814,
"userRecordName": "_10",
"deviceID": "1",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
}
],
"syncToken": "<PASSWORD>",
},
},
{
"data": {
"query": {"recordType": "CPLAlbumByPositionLive"},
"zoneID": {"zoneName": "PrimarySync"},
},
"response": {
"records": [
{
"recordName": "----Project-Root-Folder----",
"recordType": "CPLAlbum",
"fields": {
"recordModificationDate": {
"value": 1611165435221,
"type": "TIMESTAMP",
},
"sortAscending": {"value": 1, "type": "INT64"},
"sortType": {"value": 0, "type": "INT64"},
"albumType": {"value": 3, "type": "INT64"},
"position": {"value": 0, "type": "INT64"},
"sortTypeExt": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3mp3",
"created": {
"timestamp": 1600316906967,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1611168194962,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "----Root-Folder----",
"recordType": "CPLAlbum",
"fields": {
"recordModificationDate": {
"value": 1611165435221,
"type": "TIMESTAMP",
},
"sortAscending": {"value": 1, "type": "INT64"},
"sortType": {"value": 0, "type": "INT64"},
"albumType": {"value": 3, "type": "INT64"},
"position": {"value": 0, "type": "INT64"},
"sortTypeExt": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3mp2",
"created": {
"timestamp": 1442497886351,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "361FF63C0FF75A92666117B37B680CB75643625D4D9DDAA5A6E5876E606350AA",
},
"modified": {
"timestamp": 1611168194962,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "E803E065-D8A4-4398-DE23-23F8FD0886EB",
"recordType": "CPLAlbum",
"fields": {
"recordModificationDate": {
"value": 1608493740571,
"type": "TIMESTAMP",
},
"sortAscending": {"value": 1, "type": "INT64"},
"sortType": {"value": 0, "type": "INT64"},
"albumType": {"value": 0, "type": "INT64"},
"albumNameEnc": {
"value": "YWxidW0tMQ==", # album-1
"type": "ENCRYPTED_BYTES",
},
"position": {"value": 1063936, "type": "INT64"},
"sortTypeExt": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3km2",
"created": {
"timestamp": 1496340770777,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1608493742434,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "D41A228F-D89E-494A-8EEF-853D461B68CF",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLAlbum",
"fields": {
"recordModificationDate": {
"value": 1608493740571,
"type": "TIMESTAMP",
},
"sortAscending": {"value": 1, "type": "INT64"},
"sortType": {"value": 0, "type": "INT64"},
"albumType": {"value": 0, "type": "INT64"},
"albumNameEnc": {
"value": "YWxidW0gMg==", # album 2
"type": "ENCRYPTED_BYTES",
},
"position": {"value": 1065984, "type": "INT64"},
"sortTypeExt": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3km7",
"created": {
"timestamp": 1546997094145,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1608493743317,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "D41A228F-D89E-494A-8EEF-853D461B68CF",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
],
"syncToken": "<PASSWORD>",
},
},
{
"data": {
"query": {
"filterBy": [
{
"fieldName": "startRank",
"fieldValue": {"type": "INT64", "value": 0},
"comparator": "EQUALS",
},
{
"fieldName": "direction",
"fieldValue": {"type": "STRING", "value": "ASCENDING"},
"comparator": "EQUALS",
},
{
"fieldName": "parentId",
"comparator": "EQUALS",
"fieldValue": {
"type": "STRING",
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
},
},
],
"recordType": "CPLContainerRelationLiveByAssetDate",
},
"resultsLimit": 200,
"desiredKeys": [
"resJPEGFullWidth",
"resJPEGFullHeight",
"resJPEGFullFileType",
"resJPEGFullFingerprint",
"resJPEGFullRes",
"resJPEGLargeWidth",
"resJPEGLargeHeight",
"resJPEGLargeFileType",
"resJPEGLargeFingerprint",
"resJPEGLargeRes",
"resJPEGMedWidth",
"resJPEGMedHeight",
"resJPEGMedFileType",
"resJPEGMedFingerprint",
"resJPEGMedRes",
"resJPEGThumbWidth",
"resJPEGThumbHeight",
"resJPEGThumbFileType",
"resJPEGThumbFingerprint",
"resJPEGThumbRes",
"resVidFullWidth",
"resVidFullHeight",
"resVidFullFileType",
"resVidFullFingerprint",
"resVidFullRes",
"resVidMedWidth",
"resVidMedHeight",
"resVidMedFileType",
"resVidMedFingerprint",
"resVidMedRes",
"resVidSmallWidth",
"resVidSmallHeight",
"resVidSmallFileType",
"resVidSmallFingerprint",
"resVidSmallRes",
"resSidecarWidth",
"resSidecarHeight",
"resSidecarFileType",
"resSidecarFingerprint",
"resSidecarRes",
"itemType",
"dataClassType",
"filenameEnc",
"originalOrientation",
"resOriginalWidth",
"resOriginalHeight",
"resOriginalFileType",
"resOriginalFingerprint",
"resOriginalRes",
"resOriginalAltWidth",
"resOriginalAltHeight",
"resOriginalAltFileType",
"resOriginalAltFingerprint",
"resOriginalAltRes",
"resOriginalVidComplWidth",
"resOriginalVidComplHeight",
"resOriginalVidComplFileType",
"resOriginalVidComplFingerprint",
"resOriginalVidComplRes",
"isDeleted",
"isExpunged",
"dateExpunged",
"remappedRef",
"recordName",
"recordType",
"recordChangeTag",
"masterRef",
"adjustmentRenderType",
"assetDate",
"addedDate",
"isFavorite",
"isHidden",
"orientation",
"duration",
"assetSubtype",
"assetSubtypeV2",
"assetHDRType",
"burstFlags",
"burstFlagsExt",
"burstId",
"captionEnc",
"locationEnc",
"locationV2Enc",
"locationLatitude",
"locationLongitude",
"adjustmentType",
"timeZoneOffset",
"vidComplDurValue",
"vidComplDurScale",
"vidComplDispValue",
"vidComplDispScale",
"vidComplVisibilityState",
"customRenderedValue",
"containerId",
"itemId",
"position",
"isKeyAsset",
],
"zoneID": {"zoneName": "PrimarySync"},
},
"response": {
"records": [
{
"recordName": "YN1v8eGiHYYZ/aKUkMuGtSf0P1BN",
"recordType": "CPLMaster",
"fields": {
"itemType": {"value": "public.jpeg", "type": "STRING"},
"resJPEGThumbFingerprint": {
"value": "DmK0xzSiAUSFrAsYYAvby7QHrMDe",
"type": "STRING",
},
"filenameEnc": {
"value": "SU1HXzMzMjguSlBH",
"type": "ENCRYPTED_BYTES",
},
"resJPEGMedRes": {
"value": {
"fileChecksum": "EeGlt2PppPTgd0Q7mp8GenIugSh7",
"size": 2194253,
"wrappingKey": "amaCdL9Z+QxfzgD4+aYATg==",
"referenceChecksum": "AQYmx+DRYXnMs0tkDZ3rorp4IB99",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/EeGlt2PppPTgd0Q7mp8GenIugSh7AQYmx-DRYXnMs0tkDZ3rorp4IB99/${f}?o=Ai6vEWSVp5w5zaBTm7XvC55prdq006u5yUW5EfZs4KLT&v=1&x=3&a=CAogvhLnXY3DD7gxkuzbuKlak-NMlKvq37s7a-beQRlkZCsSbRCbkq2nty8Ym--IqbcvIgEAUgQugSh6WgR4IB99aiYQWP8altHEtfDsXqRVOJ19O49YwikLbHn5Ha6IeAIHhXVRK7Fpa3ImBoK0z2Usv0QeZBog1G6uVLc1ZapiFVtXuoc52Ijt3dpb4J3VMIA&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=amaCdL9Z-QxfzgD4-aYATg&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=J9LA0hC_xBV3TqYvwg_zAPWPwH8",
},
"type": "ASSETID",
},
"originalOrientation": {"value": 1, "type": "INT64"},
"resJPEGMedHeight": {"value": 1559, "type": "INT64"},
"resOriginalRes": {
"value": {
"fileChecksum": "YN1v8eGiHYYZ/aKUkMuGtSf0P1BN",
"size": 2194253,
"wrappingKey": "Y40xDPUr6DmxfeoSqxaQ7A==",
"referenceChecksum": "AXKVYPcDa+9Mjvnap0ZS+p2Z24V3",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/YN1v8eGiHYYZ_aKUkMuGtSf0P1BNAXKVYPcDa-9Mjvnap0ZS-p2Z24V3/${f}?o=Ame-Q1e_1nWqIn7YG7VfVZk-XAs8bVdcHo-owaNRmfPn&v=1&x=3&a=CAogwS503Q9EkCdnzvD-kLG0VNwrlEmARONCS-hADMtqg1QSbRCckq2nty8YnO-IqbcvIgEAUgT0P1BNWgSZ24V3aiYLdjzdjGLXPtKfjwtH_PG0ralgbDDBOIftNXxyRxdhzz8OuZztNnImb65YPlo1qUOy4i7tW1pcyAZcjqS8kYfxPQD6SKIAKNk3dUid7mE&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=Y40xDPUr6DmxfeoSqxaQ7A&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=X91oiOo0Avp6TR4d27MGupd_cqY",
},
"type": "ASSETID",
},
"resJPEGMedFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resJPEGThumbHeight": {"value": 365, "type": "INT64"},
"resJPEGThumbWidth": {"value": 472, "type": "INT64"},
"resOriginalWidth": {"value": 3100, "type": "INT64"},
"resJPEGThumbFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"dataClassType": {"value": 1, "type": "INT64"},
"resOriginalFingerprint": {
"value": "YN1v8eGiHYYZ/aKUkMuGtSf0P1BN",
"type": "STRING",
},
"resJPEGMedWidth": {"value": 2016, "type": "INT64"},
"resJPEGThumbRes": {
"value": {
"fileChecksum": "DmK0xzSiAUSFrAsYYAvby7QHrMDe",
"size": 2194253,
"wrappingKey": "r7EeA3tyPsWdcECp6X9dHA==",
"referenceChecksum": "AR5TiM9Qko4rHwmoDH1BgNRVZpF4",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/DmK0xzSiAUSFrAsYYAvby7QHrMDeAR5TiM9Qko4rHwmoDH1BgNRVZpF4/${f}?o=AjM3SMy6F9O-5AWTv2HnEp_GiL7ycAx1ls3yOqypKX-3&v=1&x=3&a=CAogiUnx0vJRhNr8Xt_dbOGrxiu8gKNAz_l_8Z5TVGmok64SbRCckq2nty8YnO-IqbcvIgEAUgQHrMObWgRVZpF4aiYwQUojYj2kyD-EyrtVjkw5sVJ60NK0x8nKjsjNXzTYH__dA6VcCHImQduy0Vis9tCiB3ox2KXKiyf3NOaih9TbQ8KfJ8H_8sFzdtXHw8I&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=r7EeA3tyPsWdcECp6X9dHA&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=m2Z7uOxYG9iNHiAZbLm6OE2O6hE",
},
"type": "ASSETID",
},
"resOriginalFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resOriginalHeight": {"value": 2398, "type": "INT64"},
"resJPEGMedFingerprint": {
"value": "EeGlt2PppPTgd0Q7mp8GenIugSh7",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3pjq",
"created": {
"timestamp": 1596388681827,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1619641691167,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "ENKzBUr+DdmTaP/GEAglTurWtsen",
"recordType": "CPLMaster",
"fields": {
"itemType": {"value": "public.jpeg", "type": "STRING"},
"resJPEGThumbFingerprint": {
"value": "ASy6f/leU1+xkR1aPmQyvYmwHUpE",
"type": "STRING",
},
"filenameEnc": {
"value": "SU1HXzMzMjcuSlBH",
"type": "ENCRYPTED_BYTES",
},
"resJPEGMedRes": {
"value": {
"fileChecksum": "NOHzBUr+DdmTaP/SAVglTurWtsen",
"size": 2194253,
"wrappingKey": "<KEY>
"referenceChecksum": "Ab5Vyk36t2jwuON7WSxvon/DvGtK",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ARKzBUr-DdmTaP_SAVglTurWtsmrAb5Vyk36t2jwuON7WSxvon_DvGtK/${f}?o=AnHR_fOkcKyPuLPOCXcp9C52pM-oZefmc9efp0e0ahAO&v=1&x=3&a=CAogf7lfgZsLeoZdnUfnSzBMLzy9WrbD7vMgHjmY9CI7_uESbRCmkq2nty8Ypu-IqbcvIgEAUgTWtsmrWgTDvGtKaiYANfDoXLBqjbu3_O1AGa62AuKbnBEsqXqysujWIiFYxe-i-AiEb3ImWrZCs4OP45m3SoQL7fh49dD-aHcXkEMAfevtQ6xh5-RH-5bq3sQ&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=eqUQfTajfGXgQHbBJh8Qwg&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=ci4ad9AWukocHK1gYXbJrx-Ok9M",
},
"type": "ASSETID",
},
"originalOrientation": {"value": 1, "type": "INT64"},
"resJPEGMedHeight": {"value": 1788, "type": "INT64"},
"resOriginalRes": {
"value": {
"fileChecksum": "ENKzBUr+DdmTaP/GEAglTurWtsen",
"size": 2194253,
"wrappingKey": "tPtP2Y7mGQ4yOsOCMFG/sg==",
"referenceChecksum": "Ad0I2qxdsqlGuSLlqtTBgoKndHE/",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/AUvKU8j-Z5pfqGI_fe-9tibuqfVRAd0I2qxdsqlGuSLlqtTBgoKndHE_/${f}?o=AuoILQZ6O-MHJ-g-prxkKJNvAz0wU24Va6re5l5JIhrW&v=1&x=3&a=CAogiV6FTwlLeQt348ipvPuax8JBYrtL7o0q7WMX775pR4YSbRCmkq2nty8Ypu-IqbcvIgEAUgTuqfVRWgSndHE_aiZN21K0DzyVdoR0roYdRIUTUdT16tIhKWq2fJfrIDzHjd0YU3MhW3ImUfLx8SZ3FmkyDDQA-J5nJkGVtdKMsxmegM4H68EIUA9-idz8C-g&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=tPtP2Y7mGQ4yOsOCMFG_sg&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=4MygIoxG8NYN-VK3zWB-a-wqy7c",
},
"type": "ASSETID",
},
"resJPEGMedFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resJPEGThumbHeight": {"value": 419, "type": "INT64"},
"resJPEGThumbWidth": {"value": 412, "type": "INT64"},
"resOriginalWidth": {"value": 2268, "type": "INT64"},
"resJPEGThumbFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"dataClassType": {"value": 1, "type": "INT64"},
"resOriginalFingerprint": {
"value": "ENKzBUr+DdmTaP/GEAglTurWtsen",
"type": "STRING",
},
"resJPEGMedWidth": {"value": 1759, "type": "INT64"},
"resJPEGThumbRes": {
"value": {
"fileChecksum": "ASy6f/leU1+xkR1aPmQyvYmwHUpE",
"size": 2194253,
"wrappingKey": "E1zCp4gxgoHQNQHWjS3Wag==",
"referenceChecksum": "ARHOzkI3sbX3SZDmNQgttNJ9DqQa",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ASy6f_leU1-xkR1aPmQyvYmwHUpEARHOzkI3sbX3SZDmNQgttNJ9DqQa/${f}?o=AiE0LlRJclp9DPkfhwdmJUfxo_vgP7JLWn3qtvPUeTuS&v=1&x=3&a=CAogroLnmMZUEfwNczwEl6zmt6YvBGhwJnPwcJogyD0-dYESbRCmkq2nty8Ypu-IqbcvIgEAUgSwHUpEWgR9DqQaaiZ5SoSlUQbaa-uaRlv6ga8Vyh14lIf466mlURl-3DYa6jr_6SsjQnImlqDZof_hQbcHONiYRrB9MXnKpJ9akb7rPc8_GAwPduNtPHAhBBk&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=E1zCp4gxgoHQNQHWjS3Wag&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=5uuV1dFCaVoK0EhvreVHqYZiBNM",
},
"type": "ASSETID",
},
"resOriginalFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resOriginalHeight": {"value": 2306, "type": "INT64"},
"resJPEGMedFingerprint": {
"value": "NOHzBUr+DdmTaP/SAVglTurWtsen",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3pjr",
"created": {
"timestamp": 1596388681829,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1619641691167,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "AUxVFT2yVsQ5739tmU5c1497duFD",
"recordType": "CPLMaster",
"fields": {
"itemType": {"value": "public.jpeg", "type": "STRING"},
"resJPEGThumbFingerprint": {
"value": "ASPVZ/Pft6gIN2VEA/oUbqQzh6Wy",
"type": "STRING",
},
"filenameEnc": {
"value": "SU1HXzMzMjIuSlBH",
"type": "ENCRYPTED_BYTES",
},
"resJPEGMedRes": {
"value": {
"fileChecksum": "ASTSuc7S58IPmVCJIUslbeCRjsno",
"size": 2194253,
"wrappingKey": "<KEY>==",
"referenceChecksum": "ASIoOBi88potAS0gE8tfnojuSlrb",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ASTSuc7S58IPmVCJIUslbeCRjsnoASIoOBi88potAS0gE8tfnojuSlrb/${f}?o=AotG4ZrSZDU3u6kP4yWDFXfZ3_6tKEyLXvh4gcpo4ELn&v=1&x=3&a=CAogJr6QVcZkN2p9iAoOF0Tr4qnsmeCHSHrFoTzxEHq7NQUSbRCwkq2nty8YsO-IqbcvIgEAUgSRjsnoWgTuSlrbaibzwCZQjqQ5JaCOYReCcRWatftcle04VKPDs1BnZT75v_W_X7tuyXImMWPY8r1ICHzS3Us89foRJ0jtqcXwvVd3nT7EM6EPexOdJAI4_qQ&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=ysHoAuqERA8H3MadxO6-PA&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=TiejMZ9ftbHECNhJzeowymOGu3I",
},
"type": "ASSETID",
},
"originalOrientation": {"value": 1, "type": "INT64"},
"resJPEGMedHeight": {"value": 2101, "type": "INT64"},
"resOriginalRes": {
"value": {
"fileChecksum": "AUxVFT2yVsQ5739tmU5c1497duFD",
"size": 2194253,
"wrappingKey": "8ydPkuUeW1rXYBf+8EUhWQ==",
"referenceChecksum": "AdkoZP1534bwlULpwCdn2fd44LAt",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/AUxVFT2yVsQ5739tmU5c1497duFDAdkoZP1534bwlULpwCdn2fd44LAt/${f}?o=AtyLCU3HpSWYfXnAPtzDXkhqPhQVX_2KI1m03qQMcrX8&v=1&x=3&a=CAoguSOx9xIzgn8O-3JZPJEFmuCqSpNEdkQUdkK-kdTjfyQSbRCwkq2nty8YsO-IqbcvIgEAUgR7duFDWgR44LAtaiYyf1-bnKqpPXGMfJ_iZeMO0Ar6T3qqD2Nwc5hia_fAPn-qOLNguXImMEz0ks6Sun_tBea1p7Gs39vk_ERXdi-KrSpKwpkhrUNPNhAl3t4&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=8ydPkuUeW1rXYBf-8EUhWQ&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=hYKaX0XrV6ghQqijbnjdyNejnec",
},
"type": "ASSETID",
},
"resJPEGMedFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resJPEGThumbHeight": {"value": 492, "type": "INT64"},
"resJPEGThumbWidth": {"value": 351, "type": "INT64"},
"resOriginalWidth": {"value": 2899, "type": "INT64"},
"resJPEGThumbFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"dataClassType": {"value": 1, "type": "INT64"},
"resOriginalFingerprint": {
"value": "AUxVFT2yVsQ5739tmU5c1497duFD",
"type": "STRING",
},
"resJPEGMedWidth": {"value": 1498, "type": "INT64"},
"resJPEGThumbRes": {
"value": {
"fileChecksum": "ASPVZ/Pft6gIN2VEA/oUbqQzh6Wy",
"size": 2194253,
"wrappingKey": "3EAjgkS2+Mr38eqQFk7C0A==",
"referenceChecksum": "AXd258pYF6LLhmADLoZAumNqI+8M",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ASPVZ_Pft6gIN2VEA_oUbqQzh6WyAXd258pYF6LLhmADLoZAumNqI-8M/${f}?o=AtA8VtFypX8-ayXsVGPnssT56G8EM8ZdWr3nFmug2dPM&v=1&x=3&a=CAogZYCSkK2TW_pwByMq7sMg791XyNwx5u0lgyxvV0cKs3YSbRCxkq2nty8Yse-IqbcvIgEAUgQzh6WyWgRqI-8MaibbeMKtZNDpNkTWx6jOoPY4npOZt2t0xHw4QfV3u2b-KI47DIvSOXImJp7IX_oKObj3XLk-Gvvg-9z_e9JYfqbOJyUNAxz_e5wdxr6wQxs&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=3EAjgkS2-Mr38eqQFk7C0A&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=PbVsX_tz5q6QJFzf_TxHZ1SH79I",
},
"type": "ASSETID",
},
"resOriginalFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resOriginalHeight": {"value": 4067, "type": "INT64"},
"resJPEGMedFingerprint": {
"value": "ASTSuc7S58IPmVCJIUslbeCRjsno",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3pjs",
"created": {
"timestamp": 1595274371241,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1619641691167,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "Ab/8kUAhnGzSxnl9yWvh8JKBpOvV",
"recordType": "CPLMaster",
"fields": {
"itemType": {"value": "public.jpeg", "type": "STRING"},
"resJPEGThumbFingerprint": {
"value": "AQNND5zpteAXnnBP2BmDd0ropjY9",
"type": "STRING",
},
"filenameEnc": {
"value": "SU1HXzMyMDYuSlBH",
"type": "ENCRYPTED_BYTES",
},
"resJPEGMedRes": {
"value": {
"fileChecksum": "ATTRy6p+Q3U1HqcF6BUKrrOMnjvn",
"size": 2194253,
"wrappingKey": "<KEY>
"referenceChecksum": "ATqG89bMsXhtmMRMw009uhyJc/Kh",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ATTRy6p-Q3U1HqcF6BUKrrOMnjvnATqG89bMsXhtmMRMw009uhyJc_Kh/${f}?o=AovA4TUyNl2kYkqOdInhEXGZ_6Lgkx1fTEsqpkkMh3hm&v=1&x=3&a=CAogVnr-sKWlefxaxarlxJ-k7EPRB-Q851T9df9zyhCvis0SbRC7kq2nty8Yu--IqbcvIgEAUgSMnjvnWgSJc_KhaiZCiMEZuykdl4ex2Ra8y53DbEEtJi6ItoX1e6b8TOoWXYiLA-mkr3Im7aDvMFg_m7tYuslgLZFXL8hxJftHL4oTy1ZpuVaP__2nTQTPLp4&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=pYDpdhqdaL9SAxHilZEj3Q&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=IKz0oqClwHpM9shdTb3e5liYV5E",
},
"type": "ASSETID",
},
"originalOrientation": {"value": 1, "type": "INT64"},
"resJPEGMedHeight": {"value": 1781, "type": "INT64"},
"resOriginalRes": {
"value": {
"fileChecksum": "Ab/8kUAhnGzSxnl9yWvh8JKBpOvV",
"size": 2194253,
"wrappingKey": "YIFaf0awZsX16khQaJ5pHw==",
"referenceChecksum": "AVLSGMHt+PAQ9/krqqfXATNX57d5",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/Ab_8kUAhnGzSxnl9yWvh8JKBpOvVAVLSGMHt-PAQ9_krqqfXATNX57d5/${f}?o=AvHBwurT0LTKni3mzYNLu4FnSeLeXfxYgSThZ4ImxjO8&v=1&x=3&a=CAogTIPbEVbPukTNLTRMbKPr3KEw-OwlmwJ6E2P4TWSVmS0SbRC7kq2nty8Yu--IqbcvIgEAUgSBpOvVWgRX57d5aibAoDs2oxjwpsMmZzKDj2ndE0sAhXdcwzBu-U_oZGpb059mW6D0dnImIjbNA_Bqcyw_VKQmNxeLtnGtGwyFB16OPwFKYcs1KsSFvHFAD7Y&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=YIFaf0awZsX16khQaJ5pHw&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=1bdqdYxBN6JqLAkjMyHSEGNkqDA",
},
"type": "ASSETID",
},
"resJPEGMedFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resJPEGThumbHeight": {"value": 417, "type": "INT64"},
"resJPEGThumbWidth": {"value": 413, "type": "INT64"},
"resOriginalWidth": {"value": 1995, "type": "INT64"},
"resJPEGThumbFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"dataClassType": {"value": 1, "type": "INT64"},
"resOriginalFingerprint": {
"value": "Ab/8kUAhnGzSxnl9yWvh8JKBpOvV",
"type": "STRING",
},
"resJPEGMedWidth": {"value": 1765, "type": "INT64"},
"resJPEGThumbRes": {
"value": {
"fileChecksum": "AQNND5zpteAXnnBP2BmDd0ropjY9",
"size": 2194253,
"wrappingKey": "lxLQBw46n1nvea4s30UY+A==",
"referenceChecksum": "AV2Zh7WygJu74eNWVuuMT4lM8qme",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/AQNND5zpteAXnnBP2BmDd0ropjY9AV2Zh7WygJu74eNWVuuMT4lM8qme/${f}?o=ArcD2SL9b5Gy5zcnrnT2luycDRZzFLjOiX-8u9IWdQM2&v=1&x=3&a=CAogfF6-sW-XhnsFDy-vqHQjR8LXVO4OmxBUqG4CZf1zmOwSbRC8kq2nty8YvO-IqbcvIgEAUgTopjY9WgRM8qmeaibiG79B2YhfcchV4W9EgxQXAN4Bpi57NX82WXqo_YW-xi1qLAH9-HImRd8oYhd7r27sXPkUL3GT-rKGSKG-leLeNevi3ay090liNNZH-2U&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=lxLQBw46n1nvea4s30UY-A&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=34vgVK6vLEdlpYceHAmIfqIp1Fk",
},
"type": "ASSETID",
},
"resOriginalFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resOriginalHeight": {"value": 2013, "type": "INT64"},
"resJPEGMedFingerprint": {
"value": "ATTRy6p+Q3U1HqcF6BUKrrOMnjvn",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "2wtv",
"created": {
"timestamp": 1574869225887,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595187513229,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "AVx3/VKkbWPdNbWw68mrWzSuemXg",
"recordType": "CPLMaster",
"fields": {
"itemType": {"value": "public.jpeg", "type": "STRING"},
"resJPEGThumbFingerprint": {
"value": "ARpHiouI3Ib/ziuZYTCiSikohvMY",
"type": "STRING",
},
"filenameEnc": {
"value": "SU1HXzMxNDguSlBH",
"type": "ENCRYPTED_BYTES",
},
"resJPEGMedRes": {
"value": {
"fileChecksum": "ARZd/GzpY62XRtXt+jP6UsV4fBZH",
"size": 2194253,
"wrappingKey": "dmZqsyvxEA4s3CvifNMApA==",
"referenceChecksum": "ATi6BbOzDuHl6RONNFCub9eqZqSm",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ARZd_GzpY62XRtXt-jP6UsV4fBZHATi6BbOzDuHl6RONNFCub9eqZqSm/${f}?o=AkMi62tflbXgUrgSyQZ94SinXG9TYXZ6tydGOmDQx9HG&v=1&x=3&a=CAogc8yqpMgxKnf363cp0n1CujaxnsWY_KrZ3VEN9QchlhcSbRDGkq2nty8Yxu-IqbcvIgEAUgR4fBZHWgSqZqSmaiaxcg1zIsiESwGaEOecYR84r83ltACA6SY5ypGyvYxKD0M3LmqI8HIm7n2S2UL6EBM2Z3a9YFIGX8MrKABFDMA5TXFPUVUP6AfsnKigVMc&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=dmZqsyvxEA4s3CvifNMApA&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=rYmwpGBg6DPYHSGj6UAOnCfuMPk",
},
"type": "ASSETID",
},
"originalOrientation": {"value": 1, "type": "INT64"},
"resJPEGMedHeight": {"value": 1747, "type": "INT64"},
"resOriginalRes": {
"value": {
"fileChecksum": "AVx3/VKkbWPdNbWw68mrWzSuemXg",
"size": 2194253,
"wrappingKey": "Nz2a7ohpe3KPptCk0J0lWA==",
"referenceChecksum": "AdUIDFzHC2rVOvwTz0jPi/tKihnb",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/AVx3_VKkbWPdNbWw68mrWzSuemXgAdUIDFzHC2rVOvwTz0jPi_tKihnb/${f}?o=AksMTyqi4NosuW50ei90oXcv82fP1r-6QocLorp20RpO&v=1&x=3&a=CAogfvU0-_8L-3qRcy6jZsj3Vuqt4aL2rk5xVXF7lwVV6A8SbRDGkq2nty8Yxu-IqbcvIgEAUgSuemXgWgRKihnbaiZoWboa3qYl3KVDo1VGIHrRDoySixw8lzXtf1Y-AnoVN1Pd4hLkPnImXYuLGS8iK7BRJcQg25R5hk54OD04duy2TscnYu1mACOSERXpXEI&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=Nz2a7ohpe3KPptCk0J0lWA&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=t3NT5mCLmsRjPqAGvROVsMrAjfg",
},
"type": "ASSETID",
},
"resJPEGMedFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resJPEGThumbHeight": {"value": 409, "type": "INT64"},
"resJPEGThumbWidth": {"value": 421, "type": "INT64"},
"resOriginalWidth": {"value": 2132, "type": "INT64"},
"resJPEGThumbFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"dataClassType": {"value": 1, "type": "INT64"},
"resOriginalFingerprint": {
"value": "AVx3/VKkbWPdNbWw68mrWzSuemXg",
"type": "STRING",
},
"resJPEGMedWidth": {"value": 1799, "type": "INT64"},
"resJPEGThumbRes": {
"value": {
"fileChecksum": "ARpHiouI3Ib/ziuZYTCiSikohvMY",
"size": 2194253,
"wrappingKey": "UiIQr3rRvyIcoAz/sxDugQ==",
"referenceChecksum": "ARtMrcvA8cbMefPDnmwSWQwe+mBd",
# pylint: disable=C0321
"downloadURL": "https://cvws.icloud-content.com/B/ARpHiouI3Ib_ziuZYTCiSikohvMYARtMrcvA8cbMefPDnmwSWQwe-mBd/${f}?o=Auh2MA-6wuqdRGUDQ4kZL3fuuuMVWVVnTnTcThej9ad5&v=1&x=3&a=CAogaHp1wKKc8QF3MI-2OrLYdQx8V4PIVZvFQyuN1m6pXFMSbRDHkq2nty8Yx--IqbcvIgEAUgQohvMYWgQe-mBdaibQsOQuSEfHUK0xs9nLWG6nHKAvRCwkkmsvXL1Ku9aCARYpDg4mWHImDCoL_RiyOC-KXU_0Jpntuid9MdC08bvpHUp5hkzlctbjsBvT654&e=1629757781&fl=&r=4d5c62f6-c81b-4e60-a785-4139aad087a7-1&k=UiIQr3rRvyIcoAz_sxDugQ&ckc=com.apple.photos.cloud&ckz=PrimarySync&y=1&p=104&s=Rx2sZoVhs_Phm_Ps3RvVwJ2mgvA",
},
"type": "ASSETID",
},
"resOriginalFileType": {
"value": "public.jpeg",
"type": "STRING",
},
"resOriginalHeight": {"value": 2070, "type": "INT64"},
"resJPEGMedFingerprint": {
"value": "ARZd/GzpY62XRtXt+jP6UsV4fBZH",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "2wwt",
"created": {
"timestamp": 1572285894332,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595187518048,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "248AFAAE-062C-40BB-92C6-B47084527A9E",
"recordType": "CPLAsset",
"fields": {
"assetDate": {"value": 1596386628825, "type": "TIMESTAMP"},
"orientation": {"value": 1, "type": "INT64"},
"addedDate": {"value": 1596386628833, "type": "TIMESTAMP"},
"assetSubtypeV2": {"value": 0, "type": "INT64"},
"assetHDRType": {"value": 0, "type": "INT64"},
"timeZoneOffset": {"value": -25200, "type": "INT64"},
"masterRef": {
"value": {
"recordName": "YN1v8eGiHYYZ/aKUkMuGtSf0P1BN",
"action": "DELETE_SELF",
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
"type": "REFERENCE",
},
"adjustmentRenderType": {"value": 0, "type": "INT64"},
"vidComplDispScale": {"value": 0, "type": "INT64"},
"isHidden": {"value": 0, "type": "INT64"},
"duration": {"value": 0, "type": "INT64"},
"burstFlags": {"value": 0, "type": "INT64"},
"assetSubtype": {"value": 0, "type": "INT64"},
"vidComplDurScale": {"value": 0, "type": "INT64"},
"vidComplDurValue": {"value": 0, "type": "INT64"},
"vidComplVisibilityState": {"value": 0, "type": "INT64"},
"customRenderedValue": {"value": 0, "type": "INT64"},
"isFavorite": {"value": 0, "type": "INT64"},
"vidComplDispValue": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3qdm",
"created": {
"timestamp": 1596388681828,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1622829385777,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "32187DDB-371D-4616-A311-8A3ACA0FA5FE",
"recordType": "CPLAsset",
"fields": {
"assetDate": {"value": 1596386602837, "type": "TIMESTAMP"},
"orientation": {"value": 1, "type": "INT64"},
"addedDate": {"value": 1596386602843, "type": "TIMESTAMP"},
"assetSubtypeV2": {"value": 0, "type": "INT64"},
"assetHDRType": {"value": 0, "type": "INT64"},
"timeZoneOffset": {"value": -25200, "type": "INT64"},
"masterRef": {
"value": {
"recordName": "ENKzBUr+DdmTaP/GEAglTurWtsen",
"action": "DELETE_SELF",
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
"type": "REFERENCE",
},
"adjustmentRenderType": {"value": 0, "type": "INT64"},
"vidComplDispScale": {"value": 0, "type": "INT64"},
"isHidden": {"value": 0, "type": "INT64"},
"duration": {"value": 0, "type": "INT64"},
"burstFlags": {"value": 0, "type": "INT64"},
"assetSubtype": {"value": 0, "type": "INT64"},
"vidComplDurScale": {"value": 0, "type": "INT64"},
"vidComplDurValue": {"value": 0, "type": "INT64"},
"vidComplVisibilityState": {"value": 0, "type": "INT64"},
"customRenderedValue": {"value": 0, "type": "INT64"},
"isFavorite": {"value": 0, "type": "INT64"},
"vidComplDispValue": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3h64",
"created": {
"timestamp": 1596388681830,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1596466239177,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "D41A228F-D89E-494A-8EEF-853D461B68CF",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "BEF79215-3125-45B2-A54C-39E47DBFB23A",
"recordType": "CPLAsset",
"fields": {
"assetDate": {"value": 1595267003960, "type": "TIMESTAMP"},
"orientation": {"value": 1, "type": "INT64"},
"addedDate": {"value": 1595267004003, "type": "TIMESTAMP"},
"assetSubtypeV2": {"value": 0, "type": "INT64"},
"assetHDRType": {"value": 0, "type": "INT64"},
"timeZoneOffset": {"value": -25200, "type": "INT64"},
"masterRef": {
"value": {
"recordName": "AUxVFT2yVsQ5739tmU5c1497duFD",
"action": "DELETE_SELF",
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
"type": "REFERENCE",
},
"adjustmentRenderType": {"value": 0, "type": "INT64"},
"vidComplDispScale": {"value": 0, "type": "INT64"},
"isHidden": {"value": 0, "type": "INT64"},
"duration": {"value": 0, "type": "INT64"},
"burstFlags": {"value": 0, "type": "INT64"},
"assetSubtype": {"value": 0, "type": "INT64"},
"vidComplDurScale": {"value": 0, "type": "INT64"},
"vidComplDurValue": {"value": 0, "type": "INT64"},
"vidComplVisibilityState": {"value": 0, "type": "INT64"},
"customRenderedValue": {"value": 0, "type": "INT64"},
"isFavorite": {"value": 0, "type": "INT64"},
"vidComplDispValue": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "3gwf",
"created": {
"timestamp": 1595274371245,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595274371245,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "97362090-90E4-4F54-A564-14F7ECC02706",
"recordType": "CPLAsset",
"fields": {
"assetDate": {"value": 1574840964935, "type": "TIMESTAMP"},
"orientation": {"value": 1, "type": "INT64"},
"addedDate": {"value": 1574840964970, "type": "TIMESTAMP"},
"assetSubtypeV2": {"value": 0, "type": "INT64"},
"assetHDRType": {"value": 0, "type": "INT64"},
"timeZoneOffset": {"value": 19800, "type": "INT64"},
"masterRef": {
"value": {
"recordName": "Ab/8kUAhnGzSxnl9yWvh8JKBpOvV",
"action": "DELETE_SELF",
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
"type": "REFERENCE",
},
"adjustmentRenderType": {"value": 0, "type": "INT64"},
"vidComplDispScale": {"value": 0, "type": "INT64"},
"isHidden": {"value": 0, "type": "INT64"},
"duration": {"value": 0, "type": "INT64"},
"burstFlags": {"value": 0, "type": "INT64"},
"assetSubtype": {"value": 0, "type": "INT64"},
"vidComplDurScale": {"value": 0, "type": "INT64"},
"vidComplDurValue": {"value": 0, "type": "INT64"},
"vidComplVisibilityState": {"value": 0, "type": "INT64"},
"customRenderedValue": {"value": 0, "type": "INT64"},
"isFavorite": {"value": 0, "type": "INT64"},
"vidComplDispValue": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "2wtw",
"created": {
"timestamp": 1574869225891,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595187513229,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "986F607D-E8E0-45C4-B230-EF37B7E40B77",
"recordType": "CPLAsset",
"fields": {
"assetDate": {"value": 1572285626625, "type": "TIMESTAMP"},
"orientation": {"value": 1, "type": "INT64"},
"addedDate": {"value": 1572285626635, "type": "TIMESTAMP"},
"assetSubtypeV2": {"value": 0, "type": "INT64"},
"assetHDRType": {"value": 0, "type": "INT64"},
"timeZoneOffset": {"value": -25200, "type": "INT64"},
"masterRef": {
"value": {
"recordName": "AVx3/VKkbWPdNbWw68mrWzSuemXg",
"action": "DELETE_SELF",
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
"type": "REFERENCE",
},
"adjustmentRenderType": {"value": 0, "type": "INT64"},
"vidComplDispScale": {"value": 0, "type": "INT64"},
"isHidden": {"value": 0, "type": "INT64"},
"duration": {"value": 0, "type": "INT64"},
"burstFlags": {"value": 0, "type": "INT64"},
"assetSubtype": {"value": 0, "type": "INT64"},
"vidComplDurScale": {"value": 0, "type": "INT64"},
"vidComplDurValue": {"value": 0, "type": "INT64"},
"vidComplVisibilityState": {"value": 0, "type": "INT64"},
"customRenderedValue": {"value": 0, "type": "INT64"},
"isFavorite": {"value": 0, "type": "INT64"},
"vidComplDispValue": {"value": 0, "type": "INT64"},
},
"pluginFields": {},
"recordChangeTag": "2wwu",
"created": {
"timestamp": 1572285894335,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595187518048,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "248AFAAE-062C-40BB-92C6-B47084527A9E-IN-E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLContainerRelation",
"fields": {
"itemId": {
"value": "248AFAAE-062C-40BB-92C6-B47084527A9E",
"type": "STRING",
},
"isKeyAsset": {"value": 0, "type": "INT64"},
"position": {"value": 6144, "type": "INT64"},
"containerId": {
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3h4p",
"created": {
"timestamp": 1596388681830,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1596388681830,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "32187DDB-371D-4616-A311-8A3ACA0FA5FE-IN-E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLContainerRelation",
"fields": {
"itemId": {
"value": "32187DDB-371D-4616-A311-8A3ACA0FA5FE",
"type": "STRING",
},
"isKeyAsset": {"value": 0, "type": "INT64"},
"position": {"value": 5120, "type": "INT64"},
"containerId": {
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3h4q",
"created": {
"timestamp": 1596388681830,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1596388681830,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "BEF79215-3125-45B2-A54C-39E47DBFB23A-IN-E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLContainerRelation",
"fields": {
"itemId": {
"value": "BEF79215-3125-45B2-A54C-39E47DBFB23A",
"type": "STRING",
},
"isKeyAsset": {"value": 0, "type": "INT64"},
"position": {"value": 4096, "type": "INT64"},
"containerId": {
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3gwj",
"created": {
"timestamp": 1595274371883,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595274371883,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "97362090-90E4-4F54-A564-14F7ECC02706-IN-E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLContainerRelation",
"fields": {
"itemId": {
"value": "97362090-90E4-4F54-A564-14F7ECC02706",
"type": "STRING",
},
"isKeyAsset": {"value": 0, "type": "INT64"},
"position": {"value": 3072, "type": "INT64"},
"containerId": {
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3f4w",
"created": {
"timestamp": 1574869421700,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595190034254,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
{
"recordName": "986F607D-E8E0-45C4-B230-EF37B7E40B77-IN-E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "CPLContainerRelation",
"fields": {
"itemId": {
"value": "986F607D-E8E0-45C4-B230-EF37B7E40B77",
"type": "STRING",
},
"isKeyAsset": {"value": 0, "type": "INT64"},
"position": {"value": 2048, "type": "INT64"},
"containerId": {
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"type": "STRING",
},
},
"pluginFields": {},
"recordChangeTag": "3f7a",
"created": {
"timestamp": 1572285894336,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"modified": {
"timestamp": 1595190039047,
"userRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"deviceID": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
},
],
"syncToken": "<PASSWORD>",
},
},
{
"data": {
"query": {
"filterBy": [
{
"fieldName": "startRank",
"fieldValue": {"type": "INT64", "value": 5},
"comparator": "EQUALS",
},
{
"fieldName": "direction",
"fieldValue": {"type": "STRING", "value": "ASCENDING"},
"comparator": "EQUALS",
},
{
"fieldName": "parentId",
"comparator": "EQUALS",
"fieldValue": {
"type": "STRING",
"value": "E4RT4FB7-4A35-4958-1D42-5769E66BE407",
},
},
],
"recordType": "CPLContainerRelationLiveByAssetDate",
},
"resultsLimit": 200,
"desiredKeys": [
"resJPEGFullWidth",
"resJPEGFullHeight",
"resJPEGFullFileType",
"resJPEGFullFingerprint",
"resJPEGFullRes",
"resJPEGLargeWidth",
"resJPEGLargeHeight",
"resJPEGLargeFileType",
"resJPEGLargeFingerprint",
"resJPEGLargeRes",
"resJPEGMedWidth",
"resJPEGMedHeight",
"resJPEGMedFileType",
"resJPEGMedFingerprint",
"resJPEGMedRes",
"resJPEGThumbWidth",
"resJPEGThumbHeight",
"resJPEGThumbFileType",
"resJPEGThumbFingerprint",
"resJPEGThumbRes",
"resVidFullWidth",
"resVidFullHeight",
"resVidFullFileType",
"resVidFullFingerprint",
"resVidFullRes",
"resVidMedWidth",
"resVidMedHeight",
"resVidMedFileType",
"resVidMedFingerprint",
"resVidMedRes",
"resVidSmallWidth",
"resVidSmallHeight",
"resVidSmallFileType",
"resVidSmallFingerprint",
"resVidSmallRes",
"resSidecarWidth",
"resSidecarHeight",
"resSidecarFileType",
"resSidecarFingerprint",
"resSidecarRes",
"itemType",
"dataClassType",
"filenameEnc",
"originalOrientation",
"resOriginalWidth",
"resOriginalHeight",
"resOriginalFileType",
"resOriginalFingerprint",
"resOriginalRes",
"resOriginalAltWidth",
"resOriginalAltHeight",
"resOriginalAltFileType",
"resOriginalAltFingerprint",
"resOriginalAltRes",
"resOriginalVidComplWidth",
"resOriginalVidComplHeight",
"resOriginalVidComplFileType",
"resOriginalVidComplFingerprint",
"resOriginalVidComplRes",
"isDeleted",
"isExpunged",
"dateExpunged",
"remappedRef",
"recordName",
"recordType",
"recordChangeTag",
"masterRef",
"adjustmentRenderType",
"assetDate",
"addedDate",
"isFavorite",
"isHidden",
"orientation",
"duration",
"assetSubtype",
"assetSubtypeV2",
"assetHDRType",
"burstFlags",
"burstFlagsExt",
"burstId",
"captionEnc",
"locationEnc",
"locationV2Enc",
"locationLatitude",
"locationLongitude",
"adjustmentType",
"timeZoneOffset",
"vidComplDurValue",
"vidComplDurScale",
"vidComplDispValue",
"vidComplDispScale",
"vidComplVisibilityState",
"customRenderedValue",
"containerId",
"itemId",
"position",
"isKeyAsset",
],
"zoneID": {"zoneName": "PrimarySync"},
},
"response": {
"records": [],
"syncToken": "<PASSWORD>",
},
},
],
"query/batch?remapEnums=True&getCurrentSyncToken=True": [
{
"data": {
"batch": [
{
"resultsLimit": 1,
"query": {
"filterBy": {
"fieldName": "indexCountID",
"fieldValue": {
"type": "STRING_LIST",
"value": [
"CPLContainerRelationNotDeletedByAssetDate:E4RT4FB7-4A35-4958-1D42-5769E66BE407"
],
},
"comparator": "IN",
},
"recordType": "HyperionIndexCountLookup",
},
"zoneWide": True,
"zoneID": {"zoneName": "PrimarySync"},
}
]
},
"response": {
"batch": [
{
"records": [
{
# pylint: disable=C0321
"recordName": "CPLContainerRelationNotDeletedByAssetDate:E4RT4FB7-4A35-4958-1D42-5769E66BE407",
"recordType": "IndexCountResult",
"fields": {"itemCount": {"value": 5, "type": "INT64"}},
"pluginFields": {},
"recordChangeTag": "0",
"created": {
"timestamp": 1629754181247,
"userRecordName": "_10",
"deviceID": "1",
},
"modified": {
"timestamp": 1629754181247,
"userRecordName": "_10",
"deviceID": "1",
},
"deleted": False,
"zoneID": {
"zoneName": "PrimarySync",
"ownerRecordName": "_1d5r3c201b3a4r5daac8ff7e7fbc0c23",
"zoneType": "REGULAR_CUSTOM_ZONE",
},
}
],
"syncToken": "AQ<PASSWORD>//////////<PASSWORD>",
}
]
},
}
],
"https://cvws.icloud-content.com/B/": [],
}
|
gitplus/cmd_git_semver.py | tkrajina/git-plus | 170 | 11190629 | #!/usr/bin/env python3
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
from . import git
from . import semver
from typing import *
git.assert_in_git_repository()
parser = argparse.ArgumentParser(description='List / update semver tags')
parser.add_argument('--major', action='store_true', default=False, help='Increase major version')
parser.add_argument('--minor', action='store_true', default=False, help='Increase minor version')
parser.add_argument('--patch', action='store_true', default=False, help='Increase patch version')
parser.add_argument('--suffix', type=str, default="", help='Suffix (for example v1.2.3-suffix)')
args = parser.parse_args()
incr_major: bool = args.major
incr_minor: bool = args.minor
incr_patch: bool = args.patch
suffix: str = args.suffix
versions = semver.get_all_versions_ordered(output_non_versions=True)
max_version = versions[-1] if versions else semver.Version("v0.0.0", "v", 0, 0, 0, "")
if not versions:
print("No tags")
sys.exit(0)
for n, version in enumerate(versions):
if n > 0:
previous_ver = versions[n-1]
if previous_ver.major != version.major:
print(f"New major: {version.major}")
if previous_ver.minor != version.minor:
print(f"New minor: {version.major}.{version.minor}")
print(f" * {version.tag}")
print()
print(f"Last version: {max_version.tag}")
print()
if incr_major or incr_minor or incr_patch:
if incr_patch:
max_version.patch += 1
if incr_minor:
max_version.minor += 1
max_version.patch = 0
if incr_major:
max_version.major += 1
max_version.minor = 0
max_version.patch = 0
new_tag = f'{max_version.prefix}{max_version.major}.{max_version.minor}.{max_version.patch}'
if suffix:
new_tag += "-" + suffix
print(f"Creating new version/tag: {new_tag}")
success, output = git.execute_git(f"tag {new_tag}")
if not success:
print(f'Error creating tag: {output}')
sys.exit(1)
print(f'Tag {new_tag} created, you can push it now') |
tests/test_02_app/app_with_installs/app/main.py | coinforensics/uwsgi-nginx-docker | 597 | 11190640 | import sys
from flask import Flask
application = Flask(__name__)
@application.route("/")
def hello():
version = "{}.{}".format(sys.version_info.major, sys.version_info.minor)
message = "Hello World from Nginx uWSGI Python {} app in a Docker container".format(
version
)
return message
|
tests/integrational/native_sync/test_fire.py | panchiwalashivani/python | 146 | 11190648 | from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import pn_vcr
from pubnub.structures import Envelope
from pubnub.pubnub import PubNub
from pubnub.models.consumer.pubsub import PNFireResult
from pubnub.models.consumer.common import PNStatus
@pn_vcr.use_cassette('tests/integrational/fixtures/native_sync/publish/fire_get.yaml',
filter_query_parameters=['uuid', 'seqn', 'pnsdk'])
def test_single_channel():
config = pnconf_copy()
pn = PubNub(config)
chan = 'unique_sync'
envelope = pn.fire().channel(chan).message('bla').sync()
assert(isinstance(envelope, Envelope))
assert not envelope.status.is_error()
assert isinstance(envelope.result, PNFireResult)
assert isinstance(envelope.status, PNStatus)
|
interactive.py | bckim92/sequential-knowledge-transformer | 135 | 11190675 | import os
import math
from pprint import PrettyPrinter
import random
import numpy as np
import torch
import sklearn
import tensorflow as tf
import better_exceptions
from tqdm import tqdm, trange
import colorlog
import colorful
from utils.etc_utils import set_logger, set_tcmalloc, set_gpus, check_none_gradients
from utils import config_utils, custom_argparsers
from models import MODELS
from modules.checkpoint_tracker import CheckpointTracker
from modules.trainer import run_wow_evaluation, Trainer
from modules.from_parlai import download_from_google_drive, unzip
from data.wizard_of_wikipedia import WowDatasetReader
from data.interactive_helper import (
TopicsGenerator,
WikiTfidfRetriever,
InteractiveInputProcessor
)
from data.interactive_world import InteractiveWorld
from data import vocabulary as data_vocab
better_exceptions.hook()
_command_args = config_utils.CommandArgs()
pprint = PrettyPrinter().pprint
def main():
# Argument passing/parsing
args, model_args = config_utils.initialize_argparser(
MODELS, _command_args, custom_argparsers.DialogArgumentParser)
hparams, hparams_dict = config_utils.create_or_load_hparams(
args, model_args, args.cfg)
pprint(hparams_dict)
if hparams.test_mode == 'wow':
os.makedirs('./tmp', exist_ok=True)
if not os.path.exists('tmp/wow_pretrained'):
fname = 'wow_pretrained.zip'
gd_id = '1lkF1QENr45j0vl-Oja3wEiqkxoNTxkXT'
colorlog.info(f"Download pretrained checkpoint {fname}")
download_from_google_drive(gd_id, os.path.join('tmp', fname))
unzip('tmp', fname)
ckpt_fname = os.path.join('tmp/wow_pretrained', 'ckpt-46070')
else:
raise ValueError("Only 'wow' is currently supported")
# Set environment variables & gpus
set_logger()
set_gpus(hparams.gpus)
set_tcmalloc()
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus, 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Set random seed
#tf.random.set_seed(hparams.random_seed)
#np.random.seed(hparams.random_seed)
#random.seed(hparams.random_seed)
# Set gpu
assert hparams.num_gpus == 1
mirrored_strategy = None
# Make dataset reader
os.makedirs(hparams.cache_dir, exist_ok=True)
reader = WowDatasetReader(
hparams.batch_size, hparams.num_epochs,
buffer_size=hparams.buffer_size,
bucket_width=hparams.bucket_width,
max_length=hparams.max_length,
max_episode_length=hparams.max_episode_length,
max_knowledge=hparams.max_knowledge,
knowledge_truncate=hparams.knowledge_truncate,
cache_dir=hparams.cache_dir,
bert_dir=hparams.bert_dir,
)
train_dataset, iters_in_train = reader.read('train', mirrored_strategy)
test_dataset, iters_in_test = reader.read('test', mirrored_strategy)
vocabulary = reader.vocabulary
# Build model & optimizer & trainer
model = MODELS[hparams.model](hparams, vocabulary)
optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
clipnorm=hparams.clipnorm)
trainer = Trainer(model, optimizer, mirrored_strategy,
hparams.enable_function,
WowDatasetReader.remove_pad)
# Setup checkpoint
global_step = tf.compat.v1.train.get_or_create_global_step()
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=global_step)
train_example = next(iter(train_dataset))
_ = trainer.train_step(train_example)
checkpoint.restore(ckpt_fname)
# Load retriever and input processor
dictionary = reader._dictionary
tokenize_fn = lambda x: [data_vocab.BERT_CLS_ID] \
+ dictionary.convert_tokens_to_ids(dictionary.tokenize(x)) \
+ [data_vocab.BERT_SEP_ID]
input_processor = InteractiveInputProcessor(tokenize_fn, 5)
# Compile graph
colorlog.info("Compile model")
dummy_input = input_processor.get_dummy_input()
for _ in trange(5, ncols=70):
trainer.test_step(dummy_input)
# Module for interactive mode
wiki_tfidf_retriever = WikiTfidfRetriever(hparams.cache_dir)
topics_generator = TopicsGenerator(hparams.cache_dir)
interactive_world = InteractiveWorld(
responder=trainer,
input_processor=input_processor,
wiki_retriever=wiki_tfidf_retriever,
topics_generator=topics_generator
)
# Loop!
while True:
interactive_world.run()
interactive_world.reset()
if __name__ == '__main__':
main()
|
test/programytest/config/brain/test_tokenizer.py | cdoebler1/AIML2 | 345 | 11190683 | import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.brain.tokenizer import BrainTokenizerConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
class BrainTokenizerConfigurationTests(unittest.TestCase):
def test_with_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
tokenizer:
classname: programy.utils.language.chinese.ChineseLanguage
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
tokenizer_config = BrainTokenizerConfiguration()
tokenizer_config.load_config_section(yaml, brain_config, ".")
self.assertEqual("programy.utils.language.chinese.ChineseLanguage", tokenizer_config.classname)
def test_with_default_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
tokenizer:
classname: programy.utils.language.default.DefaultLangauge
split_chars: .:'
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
tokenizer_config = BrainTokenizerConfiguration()
tokenizer_config.load_config_section(yaml, brain_config, ".")
self.assertEqual("programy.utils.language.default.DefaultLangauge", tokenizer_config.classname)
self.assertEqual(".:'", tokenizer_config.split_chars)
def test_without_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
tokenizer:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
tokenizer_config = BrainTokenizerConfiguration()
tokenizer_config.load_config_section(yaml, brain_config, ".")
def test_with_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
tokenizer_config = BrainTokenizerConfiguration()
tokenizer_config.load_config_section(yaml, brain_config, ".")
def test_defaults(self):
tokenizer_config = BrainTokenizerConfiguration()
data = {}
tokenizer_config.to_yaml(data, True)
BrainTokenizerConfigurationTests.assert_defaults(self, data)
@staticmethod
def assert_defaults(test, data):
test.assertEqual(data['classname'], "programy.dialog.tokenizer.tokenizer.Tokenizer")
test.assertEqual(data['split_chars'], ' ')
|
app/utils/proxy.py | ErictheSam/ASoulCnki | 384 | 11190686 | # timer
from threading import Timer
from requests import get
class ProxyPool():
def __init__(self, proxyConfig, interval=True, intervalRange=60):
'''
@param proxyConfig: like proxies in requests.get()
@param interval: start interval
@param intervalRange: interval range of timer (seconds), default is 60s
'''
self.proxyConfig = proxyConfig
self.nowProxy = {}
self.intervalRange = intervalRange
if (interval):
Timer(intervalRange, self.flush).start()
def get(self):
'''
get now proxy, if no proxy, return {}
'''
return self.nowProxy
def setDirect(self):
'''
when no proxy is usable, set direct
'''
self.nowProxy = {}
def flush(self):
# use requests.get() to test proxy
def isProxyUsable():
testUrl = 'https://baidu.com'
try:
get(testUrl, proxies=self.proxyConfig, timeout=5)
return True
except Exception:
return False
if isProxyUsable():
self.nowProxy = self.proxyConfig
else:
self.nowProxy = {}
Timer(self.intervalRange, self.flush).start()
|
sympy/parsing/autolev/test-examples/ruletest10.py | iamabhishek0/sympy | 603 | 11190689 | <reponame>iamabhishek0/sympy
import sympy.physics.mechanics as me
import sympy as sm
import math as m
import numpy as np
x, y = me.dynamicsymbols('x y')
a, b = sm.symbols('a b', real=True)
e = a*(b*x+y)**2
m = sm.Matrix([e,e]).reshape(2, 1)
e = e.expand()
m = sm.Matrix([i.expand() for i in m]).reshape((m).shape[0], (m).shape[1])
e = sm.factor(e, x)
m = sm.Matrix([sm.factor(i,x) for i in m]).reshape((m).shape[0], (m).shape[1])
eqn = sm.Matrix([[0]])
eqn[0] = a*x+b*y
eqn = eqn.row_insert(eqn.shape[0], sm.Matrix([[0]]))
eqn[eqn.shape[0]-1] = 2*a*x-3*b*y
print(sm.solve(eqn,x,y))
rhs_y = sm.solve(eqn,x,y)[y]
e = (x+y)**2+2*x**2
e.collect(x)
a, b, c = sm.symbols('a b c', real=True)
m = sm.Matrix([a,b,c,0]).reshape(2, 2)
m2 = sm.Matrix([i.subs({a:1,b:2,c:3}) for i in m]).reshape((m).shape[0], (m).shape[1])
eigvalue = sm.Matrix([i.evalf() for i in (m2).eigenvals().keys()])
eigvec = sm.Matrix([i[2][0].evalf() for i in (m2).eigenvects()]).reshape(m2.shape[0], m2.shape[1])
frame_n = me.ReferenceFrame('n')
frame_a = me.ReferenceFrame('a')
frame_a.orient(frame_n, 'Axis', [x, frame_n.x])
frame_a.orient(frame_n, 'Axis', [sm.pi/2, frame_n.x])
c1, c2, c3 = sm.symbols('c1 c2 c3', real=True)
v=c1*frame_a.x+c2*frame_a.y+c3*frame_a.z
point_o = me.Point('o')
point_p = me.Point('p')
point_o.set_pos(point_p, c1*frame_a.x)
v = (v).express(frame_n)
point_o.set_pos(point_p, (point_o.pos_from(point_p)).express(frame_n))
frame_a.set_ang_vel(frame_n, c3*frame_a.z)
print(frame_n.ang_vel_in(frame_a))
point_p.v2pt_theory(point_o,frame_n,frame_a)
particle_p1 = me.Particle('p1', me.Point('p1_pt'), sm.Symbol('m'))
particle_p2 = me.Particle('p2', me.Point('p2_pt'), sm.Symbol('m'))
particle_p2.point.v2pt_theory(particle_p1.point,frame_n,frame_a)
point_p.a2pt_theory(particle_p1.point,frame_n,frame_a)
body_b1_cm = me.Point('b1_cm')
body_b1_cm.set_vel(frame_n, 0)
body_b1_f = me.ReferenceFrame('b1_f')
body_b1 = me.RigidBody('b1', body_b1_cm, body_b1_f, sm.symbols('m'), (me.outer(body_b1_f.x,body_b1_f.x),body_b1_cm))
body_b2_cm = me.Point('b2_cm')
body_b2_cm.set_vel(frame_n, 0)
body_b2_f = me.ReferenceFrame('b2_f')
body_b2 = me.RigidBody('b2', body_b2_cm, body_b2_f, sm.symbols('m'), (me.outer(body_b2_f.x,body_b2_f.x),body_b2_cm))
g = sm.symbols('g', real=True)
force_p1 = particle_p1.mass*(g*frame_n.x)
force_p2 = particle_p2.mass*(g*frame_n.x)
force_b1 = body_b1.mass*(g*frame_n.x)
force_b2 = body_b2.mass*(g*frame_n.x)
z = me.dynamicsymbols('z')
v=x*frame_a.x+y*frame_a.z
point_o.set_pos(point_p, x*frame_a.x+y*frame_a.y)
v = (v).subs({x:2*z, y:z})
point_o.set_pos(point_p, (point_o.pos_from(point_p)).subs({x:2*z, y:z}))
force_o = -1*(x*y*frame_a.x)
force_p1 = particle_p1.mass*(g*frame_n.x)+ x*y*frame_a.x
|
Ch9/CG.py | jason-168/MLCode | 146 | 11190735 |
# Code from Chapter 9 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
# The conjugate gradients algorithm
import numpy as np
def Jacobian(x):
#return np.array([.4*x[0],2*x[1]])
return np.array([x[0], 0.4*x[1], 1.2*x[2]])
def Hessian(x):
#return np.array([[.2,0],[0,1]])
return np.array([[1,0,0],[0,0.4,0],[0,0,1.2]])
def CG(x0):
i=0
k=0
r = -Jacobian(x0)
p=r
betaTop = np.dot(r.transpose(),r)
beta0 = betaTop
iMax = 3
epsilon = 10**(-2)
jMax = 5
# Restart every nDim iterations
nRestart = np.shape(x0)[0]
x = x0
while i < iMax and betaTop > epsilon**2*beta0:
j=0
dp = np.dot(p.transpose(),p)
alpha = (epsilon+1)**2
# Newton-Raphson iteration
while j < jMax and alpha**2 * dp > epsilon**2:
# Line search
alpha = -np.dot(Jacobian(x).transpose(),p) / (np.dot(p.transpose(),np.dot(Hessian(x),p)))
print "N-R",x, alpha, p
x = x + alpha * p
j += 1
print x
# Now construct beta
r = -Jacobian(x)
print "r: ", r
betaBottom = betaTop
betaTop = np.dot(r.transpose(),r)
beta = betaTop/betaBottom
print "Beta: ",beta
# Update the estimate
p = r + beta*p
print "p: ",p
print "----"
k += 1
if k==nRestart or np.dot(r.transpose(),p) <= 0:
p = r
k = 0
print "Restarting"
i +=1
print x
x0 = np.array([-2,2,-2])
CG(x0)
|
vilt/utils/write_nlvr2.py | kris927b/ViLT | 587 | 11190743 | import json
import pandas as pd
import pyarrow as pa
import os
from tqdm import tqdm
from collections import defaultdict
def process(root, iden, row):
texts = [r["sentence"] for r in row]
labels = [r["label"] for r in row]
split = iden.split("-")[0]
if iden.startswith("train"):
directory = row[0]["directory"]
path = f"{root}/images/train/{directory}/{iden}"
else:
path = f"{root}/{split}/{iden}"
with open(f"{path}-img0.png", "rb") as fp:
img0 = fp.read()
with open(f"{path}-img1.png", "rb") as fp:
img1 = fp.read()
return [img0, img1, texts, labels, iden]
def make_arrow(root, dataset_root):
train_data = list(
map(json.loads, open(f"{root}/nlvr2/data/train.json").readlines())
)
test1_data = list(
map(json.loads, open(f"{root}/nlvr2/data/test1.json").readlines())
)
dev_data = list(map(json.loads, open(f"{root}/nlvr2/data/dev.json").readlines()))
balanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_test1.json").readlines(),
)
)
balanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_dev.json").readlines(),
)
)
unbalanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_test1.json").readlines(),
)
)
unbalanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_dev.json").readlines(),
)
)
splits = [
"train",
"dev",
"test1",
"balanced_dev",
"balanced_test1",
"unbalanced_dev",
"unbalanced_test1",
]
datas = [
train_data,
dev_data,
test1_data,
balanced_dev_data,
balanced_test1_data,
unbalanced_dev_data,
unbalanced_test1_data,
]
annotations = dict()
for split, data in zip(splits, datas):
_annot = defaultdict(list)
for row in tqdm(data):
_annot["-".join(row["identifier"].split("-")[:-1])].append(row)
annotations[split] = _annot
for split in splits:
bs = [
process(root, iden, row) for iden, row in tqdm(annotations[split].items())
]
dataframe = pd.DataFrame(
bs, columns=["image_0", "image_1", "questions", "answers", "identifier"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/nlvr2_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
|
sky/legacy/titletests.py | Asteur/sky_python_crawler | 325 | 11190747 | <filename>sky/legacy/titletests.py
import numpy
# TITLE TESTS
try:
from .training import Training
from .helper import *
from .findTitle import getTitle
except SystemError:
from training import Training
from helper import *
from findTitle import getTitle
# title = normalize(''.join([x for x in z.xpath('//span[@class="x-nc-sel1"]/text()')]))
# body = normalize(''.join([x for x in z.xpath('//span[@class="x-nc-sel2"]/text()')]))
with open('/Users/pascal/Downloads/L3S-GN1-20100130203947-00001/url-mapping.txt') as f:
urlmapping = {x[10:46] : [x[48:], extractDomain(x[48:])] for x in f.read().split('\n')}
template = '/Users/pascal/Downloads/L3S-GN1-20100130203947-00001/annotated/{}.html'
domains = {}
for h in urlmapping:
if h:
with open(template.format(h)) as f:
html = f.read()
tr = makeTree(html, h)
if urlmapping[h][1] not in domains:
domains[urlmapping[h][1]] = []
domains[urlmapping[h][1]].append((urlmapping[h][0], h, tr))
filtered_domains = {}
for x in domains:
if len(domains[x]) > 1:
filtered_domains[x] = domains[x]
def f1(x,y):
try:
z = sum([w in y for w in x]) / len(x)
z2 = sum([w in x for w in y]) / len(y)
return (2 * z * z2) / (z + z2)
except:
return 0
# 0.86593087007951342
#def fn(num):
num = 5
it = 0
s = 0
fscores = []
fouten = 0
printing = True
wrongs = []
for x in filtered_domains:
for case in filtered_domains[x]:
try:
t = case[2]
realy = normalize(''.join([x for x in t.xpath('//span[@class="x-nc-sel1"]/text()')]))
#ys = [x['text'] for x in getTitle(t,False)]
ys = [getTitle(t)]
#if any([z in y for z in x.split('.')]):
if any(['-' in y or '|' in y for y in ys]):
fscores.append(1)
it += 1
continue
f = f1(set(re.sub('[^a-zA-Z0-9]+', ' ', ys[0].lower()).split()), set(re.sub('[^a-zA-Z0-9]+', ' ', realy.lower()).split()))
fscores.append(f)
y = " ".join(re.sub('[^a-zA-Z0-9]+', ' ', ys[0].lower()).split())
realy = " ".join(re.sub('[^a-zA-Z0-9]+', ' ', realy.lower()).split())
if f < 1:
fouten += 1
wrongs.append(it)
if fouten == num and printing:
a = t
view_tree(t)
printing = False
'a'+1
print(realy, '-----------', y)
print('\n'.join([(x['xpath'] + ' ||| ' + x['text']) for x in getTitle(t, False)]))
print(case)
it += 1
except IndexError:
pass
#return np.mean(fscores)
it = 0
s = 0
fscores = []
for x in domains:
for case in domains[x]:
try:
t = case[2]
realy = normalize(''.join([x for x in t.xpath('//span[@class="x-nc-sel1"]/text()')]))
y = getTitle(t)
#print(realy, '----------', y)
it += 1
#if any([z in y for z in x.split('.')]):
f = f1(set(re.sub('[^a-zA-Z0-9]+', ' ', y.lower()).split()), set(re.sub('[^a-zA-Z0-9]+', ' ', realy.lower()).split()))
fscores.append(f)
y = " ".join(re.sub('[^a-zA-Z0-9]+', ' ', y.lower()).split())
if y.startswith('jeremy'):
'a'+1
realy = " ".join(re.sub('[^a-zA-Z0-9]+', ' ', realy.lower()).split())
if f < 1:
print(realy, '-----------', y)
except IndexError:
pass
# for meta in doc.xpath('//meta[re:test(@name, "description", "i")]', namespaces={"re": "http://exslt.org/regular-expressions"}):
# print(html.tostring(meta, pretty_print=True))
|
benchmarks/django-workload/uwsgi/files/django-workload/django_workload/feed_timeline.py | jonasbn/cloudsuite | 103 | 11190783 | <reponame>jonasbn/cloudsuite
class FeedTimeline(object):
def __init__(self, request):
self.request = request
def get_timeline(self):
user = self.request.user
feed = user.feed_entries().limit(20)
user_info = user.json_data
result = {
'num_results': len(feed),
'items': [
{
'pk': str(e.id),
'comment_count': e.comment_count,
'published': e.published.timestamp(),
'user': user_info
}
for e in feed]
}
return result
def post_process(self, result):
item_list = result['items']
conf = FeedTimelineConfig()
# duplicate the data
for i in range(conf.mult_factor):
conf.list_extend(item_list)
sorted_list = sorted(conf.get_list(),
key=lambda x: x['published'],
reverse=True)
final_items = []
for item in sorted_list:
conf.user = item['user']['name']
conf.comments_total = conf.comments_total + item['comment_count']
conf.comments_per_user[conf.user] = item['comment_count']
# un-duplicate the data
exists = False
for final_item in final_items:
if final_item['pk'] == item['pk']:
exists = True
break
if not exists:
final_items.append(item)
result['comments_total'] = int(conf.comments_total / conf.mult_factor)
result['items'] = final_items
return result
class FeedTimelineConfig(object):
def __init__(self):
# Number of times the original items list is duplicated in order
# to make the view more Python intensive
self.mult_factor = 5
self.work_list = []
self.user = ""
self.comments_total = 0
self.comments_per_user = {}
def list_extend(self, l):
self.work_list.extend(l)
def get_list(self):
return self.work_list
|
mercari/main_mx.py | lammda/mercari-solution | 249 | 11190786 | <reponame>lammda/mercari-solution<filename>mercari/main_mx.py<gh_stars>100-1000
import sys
from mercari.config import TEST_SIZE
from mercari.datasets_mx import (
prepare_vectorizer_1, prepare_vectorizer_3, prepare_vectorizer_2,
)
from mercari.main_helpers import main
from mercari.mx_sparse import MXRegression, MXRegressionClf
def define_models_1(n_jobs, seed):
h0 = 256 # reduced from 384 due to kaggle slowdown
n_epoch = 3 if TEST_SIZE == 1 else 1
models = [
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber'),
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber'),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4),
]
for i, model in enumerate(models, seed * 100):
model.seed = i
return models, prepare_vectorizer_1(n_jobs=n_jobs)
def define_models_2(n_jobs, seed):
h0 = 256
n_epoch = 3 if TEST_SIZE == 1 else 1
models = [
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber', binary_X=True),
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber'),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4, binary_X=True),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4),
]
for i, model in enumerate(models, seed * 100):
model.seed = i
return models, prepare_vectorizer_2(n_jobs=n_jobs)
def define_models_3(n_jobs, seed):
h0 = 256
n_epoch = 3 if TEST_SIZE == 1 else 1
models = [
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber', binary_X=True),
MXRegression(
n_hidden=(h0, 128, 64), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=1e-3, loss='huber'),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4, binary_X=True),
MXRegressionClf(
n_hidden=(h0, 128), n_epoch=n_epoch, batch_size=2 ** 11,
learning_rate=0.4e-2, reg_l2=2e-4),
]
# 4 more same models in the best submission
for i, model in enumerate(models, seed * 100):
model.seed = i
return models, prepare_vectorizer_3(n_jobs)
if __name__ == '__main__':
main(
'mx',
sys.argv[1],
{
1: define_models_1(n_jobs=4, seed=1),
2: define_models_2(n_jobs=4, seed=2),
3: define_models_3(n_jobs=4, seed=3),
},
fit_parallel='mp',
predict_parallel=None,
)
|
Source/ThirdParty/aes/python_binding/setup.py | HanasakiHonoka/ds3os | 651 | 11190792 | #!/usr/bin/env python
import platform
from distutils.core import setup, Extension
source_files = ['aesmodule.c', '../aeskey.c', '../aes_modes.c', '../aestab.c', '../aescrypt.c']
cflags = []
if platform.system() == 'Linux':
cflags.append('-Wno-sequence-point')
if platform.machine() == 'x86_64':
source_files.append('../aes_ni.c')
cflags.append('-D__PROFILE_AES__')
setup(name='aes',
version='1.0',
ext_modules=[Extension('aes', source_files, include_dirs=['..'], extra_compile_args=cflags)],
)
|
tests/loan-closing/test_LoanClosing.py | DryptoBZX/contractsV2 | 177 | 11190845 | # #!/usr/bin/python3
# import pytest
# from brownie import Wei, reverts
# more complex scenario here in the future
|
scripts/automation/trex_control_plane/interactive/trex/wireless/services/trex_stl_ap.py | timgates42/trex-core | 956 | 11190861 | import base64
import random
import struct
import sys
import threading
import time
from collections import deque
from enum import Enum
import simpy
import wireless.pubsub.pubsub as pubsub
from scapy.all import *
from scapy.contrib.capwap import *
from scapy.contrib.capwap import CAPWAP_PKTS
from trex_openssl import *
# from trex_stl_lib.api import *
from trex.common.services.trex_service import Service, ServiceFilter
from trex.utils.common import PassiveTimer
from ..pubsub.broker import deregister_sub, register_sub, subscribers
from ..pubsub.message import PubSubMessage
from ..trex_wireless_ap_state import *
from ..trex_wireless_client_state import *
from .trex_wireless_service_event import WirelessServiceEvent
'''
FSMs for AP:
* Discover WLC
* Establish DTLS session
* Join WLC
* Add client (station)
* Shutdown DTLS session
* Maintenance (arp, ping, capwap echo request, fetches rx and dispatches to rx_buffer of APs)
'''
sleep_for_packet = 0.3
sleep_for_state = 0.5
'''
Just assign services to AP, it will get packets from AP's rx_buffer
'''
class APJoinDisconnectedEvent(WirelessServiceEvent):
"""Raised when AP is deassociated from the WLC."""
def __init__(self, env, device):
service = ServiceAPRun.__name__
value = "disconnected"
super().__init__(env, device, service, value)
class APJoinConnectedEvent(WirelessServiceEvent):
"""Raised when AP is Joined to the WLC."""
def __init__(self, env, device):
service = ServiceAPRun.__name__
value = "joined"
super().__init__(env, device, service, value)
class APVAPReceivedEvent(WirelessServiceEvent):
"""Raised when AP received VAPs from the WLC."""
def __init__(self, env, device):
service = ServiceAPRun.__name__
value = "vap_received"
super().__init__(env, device, service, value)
class APDTLSEstablishedEvent(WirelessServiceEvent):
"""Raised when AP's DTLS is established with WLC."""
def __init__(self, env, device):
service = ServiceAPRun.__name__
value = "established"
super().__init__(env, device, service, value)
class ServiceFilterPerAp(ServiceFilter):
def __init__(self):
self.services_per_ap = {}
def add(self, service):
if service.ap in self.services_per_ap:
self.services_per_ap[service.ap].append(service)
else:
self.services_per_ap[service.ap] = [service]
class ServiceAP(Service):
requires_dtls = True
client_concurrent = 0
max_client_concurrent = 1
concurrent = 0
max_concurrent = float('inf') # changed when called by join_aps
def __init__(self, worker, ap, env, topics_to_subs, verbose_level=Service.WARN):
Service.__init__(self, verbose_level)
self.worker = worker
self.ap = ap
self.name = self.__class__.__name__
self.env = env
self.topics_to_subs = topics_to_subs
def raise_event(self, event):
"""Raise a WirelessServiceEvent.
Args:
event: the WirelessServiceEvent to raise
"""
self.ap.pubsub.publish(event.value, event.topics)
# create a pubsubmessage
pubsub_message = PubSubMessage(event.value, event.topics)
# if others are waiting on this event, wake them and deregister
if self.topics_to_subs:
subscriptions = subscribers(self.topics_to_subs, pubsub_message)
for sub in subscriptions:
sub.trigger()
deregister_sub(self.topics_to_subs, sub)
def stop_and_launch_service(self, service, pipe):
with self.worker.services_lock:
if self in self.worker.stl_services:
del self.worker.stl_services[self]
self.worker.stl_services[service] = {'pipe': pipe}
return self.ap.register_service(service, self.env.process(service.run(pipe)))
def get_filter_type(self):
return ServiceFilterPerAp
def timeout(self):
self.ap.warn('Timeout in FSM %s' % self.name)
def err(self, msg):
self.ap.logger.warn('Error in FSM %s: %s' % (self.name, msg))
def send(self, v):
self.worker.pkt_pipe.send(v)
def wait(self, pipe, val):
return pipe.async_wait(val)
def run(self, pipe):
try:
run_gen = self.run_with_buffer()
send_data = None
while True:
try:
action = run_gen.send(send_data)
except StopIteration:
action = 'done'
if type(action) is tuple and len(action) == 2:
action, val = action
elif type(action) is tuple and len(action) == 3:
action, val1, val2 = action
# if action == 'get':
# send_data = None
# v = wait_for_pkt()
# resp = yield v
# if resp:
# send_data = resp[0]['pkt']
if action == 'put':
# send packet
if type(val) is list:
for v in val:
# pipe.async_tx_pkt(v)
self.send(v)
else:
self.send(val)
# pipe.async_tx_pkt(val)
elif action == 'sleep':
# async sleep
v = self.wait(pipe, val)
yield v
elif action == 'service':
# launch a service in the current service
# current service will therefore stop running
# when done, give hand back to former service
service = val
with self.worker.services_lock:
if self in self.worker.stl_services:
del self.worker.stl_services[self]
self.worker.stl_services[service] = {'pipe': pipe}
# launch process
stop = yield self.ap.register_service(service, self.env.process(service.run(pipe)))
if stop:
return True
# process returned
with self.worker.services_lock:
if service in self.worker.stl_services:
del self.worker.stl_services[service]
self.worker.stl_services[self] = {'pipe': pipe}
elif action == 'done':
# stop the service
self.ap.logger.debug(
'Finished successfully FSM %s' % self.name)
break
elif action == 'process_with_timeout':
# yields the given simpy process and a timeout
# wakes up when either completes
yield simpy.events.AnyOf(self.env, [val1, self.env.timeout(val2)])
elif action == 'start_ap':
# async wait until free spot to continue
self.ap.ap_concurrent_request = simpy.resources.resource.Request(
ServiceAP.ap_concurrent)
yield self.ap.ap_concurrent_request
elif action == 'done_ap':
# free the current spot for other APs to start join process
yield simpy.resources.resource.Release(ServiceAP.ap_concurrent, self.ap.ap_concurrent_request)
elif action == 'err':
# for service errors
self.err(val)
elif action == 'time':
# for timeouts, stops the service
self.timeout()
break
elif not action:
break
else:
raise Exception('Incorrect action in FSM %s: %s' %
(self.name, action))
except simpy.events.Interrupt as e:
pass
def hex(buf, delimiter=' '):
if not buf:
return 'Empty buffer'
return delimiter.join(['%02x' % (c if type(c) is int else ord(c)) for c in buf])
################ FSMs ##################
class ServiceAPDiscoverWLC(ServiceAP):
"""Discovery Service for one AP."""
requires_dtls = False
def run_with_buffer(self):
from ..trex_wireless_config import config
# request a slot
yield ("start_ap")
self.ap.logger.info("Service DiscoverWLC started")
self.ap.start_join_time = time.time()
while True:
if self.ap.state > APState.DISCOVER:
return
yield("service", ServiceAPShutdownDTLS(self.worker, self.ap, self.env, self.topics_to_subs))
self.ap.reset_vars()
self.ap.state = APState.DISCOVER
self.ap.active_service = self
self.ap._create_ssl(config.openssl.buffer_size)
self.ap.rx_responses[2] = -1
self.ap.retries += 1
# First resolve WLC MAC if needed
if self.ap.wlc_ip_bytes and not self.ap.wlc_mac_bytes:
self.ap.logger.info(
"Resolving WLC MAC for IP: %s" % self.ap.wlc_ip)
while not self.ap.wlc_mac_bytes:
RetransmitInterval = config.capwap.retransmit_interval
for _ in range(config.capwap.max_retransmit):
if self.ap.wlc_mac_bytes:
break
RetransmitInterval *= 2
self.ap.logger.debug("sending who-as for WLC IP")
arp = self.ap.get_arp_pkt(
'who-has', src_mac_bytes=self.ap.mac_bytes, src_ip_bytes=self.ap.ip_bytes, dst_ip_bytes=self.ap.wlc_ip_bytes)
yield('put', arp)
# waiting for an arp response
self.waiting_on = simpy.events.Event(self.env)
yield ("process_with_timeout", self.waiting_on, RetransmitInterval)
del self.waiting_on
if self.ap.wlc_mac_bytes:
self.ap.logger.debug("got MAC of WLC")
# done
break
if self.ap.wlc_ip_bytes and not self.ap.wlc_mac_bytes:
self.err(
'err', 'Unable to resolve MAC address of WLC for %s' % self.ap.wlc_ip)
RetransmitInterval = config.capwap.retransmit_interval
for _ in range(config.capwap.max_retransmit):
RetransmitInterval *= 2
discovery_pkt = self.ap.wrap_capwap_pkt(
CAPWAP_PKTS.discovery(self.ap), is_discovery=True)
self.ap.logger.debug("sending discovery request to WLC")
yield('put', discovery_pkt)
self.waiting_on = simpy.events.Event(self.env)
yield ("process_with_timeout", self.waiting_on, RetransmitInterval)
del self.waiting_on
try:
result_code = self.ap.rx_responses[2]
except KeyError:
result_code = -1
yield("err", 'No response')
if result_code in (None, 0, 2):
self.ap.state = APState.DTLS
self.ap.logger.info("Service DiscoverWLC finished")
yield ("service", ServiceAPEstablishDTLS(self.worker, self.ap, self.env, self.topics_to_subs))
elif result_code != -1:
self.ap.wlc_mac_bytes = None
self.ap.wlc_mac = None
yield("err", 'Not successful result %s - %s.' % (result_code,
capwap_result_codes.get(result_code, 'Unknown')))
break
self.ap.logger.info("DiscoverWLC retries expired")
class ServiceAPEstablishDTLS(ServiceAP):
"""Service which tries to setup DTLS with remote controller."""
requires_dtls = False
aps_by_ssl = {}
@staticmethod
def openssl_callback(ssl, where, ret):
pkt_pipe = ServiceAPEstablishDTLS.aps_by_ssl[ssl]['pkt_pipe']
ap = ServiceAPEstablishDTLS.aps_by_ssl[ssl]['ap']
logger = ServiceAPEstablishDTLS.aps_by_ssl[ssl]['logger']
if where & SSL_CONST.SSL_CB_ALERT:
ap.state = APState.DISCOVER
ap.logger.info("Received DTLS Alert")
return 0
if not ap.mac_bytes:
return 0
if libcrypto.BIO_ctrl_pending(ap.out_bio):
ssl_data = ap.ssl_read()
ap.nb_write += 1
if ap.state != APState.DTLS:
return 0
if ssl_data:
# # This is due to a bug in the ordering of UDP packet received... to be investigated
# if ap.nb_write > 7 and len(ssl_data) > 1000 and not ap.is_handshake_done_libssl():
# with ap.ssl_lock:
# timeout = libssl.DTLSv1_get_timeout(ap.ssl)
# timeout = timeout.tv_sec + timeout.tv_usec * 1e-6
# print("Sending bad DTLS packet ????: %d timeout %d %f %s" % (len(ssl_data), ap.timeout_dtls, timeout, ap.name))
# return 0
pkt = ap.wrap_capwap_pkt(b'\1\0\0\0' + ssl_data)
logger.debug(
"sending dtls packet (openssl callback @ DTLS service)")
pkt_pipe.send(pkt)
return 0
ssl_info_callback_type = CFUNCTYPE(c_int, c_void_p, c_int, c_int)
ssl_info_callback_func = ssl_info_callback_type(openssl_callback.__func__)
def run(self, pipe):
from ..trex_wireless_config import config
# assert self.ap.ssl and (self.ap.ssl not in self.aps_by_ssl)
self.ap.logger.info("Service ApEstablishDTLS started")
try:
while True:
if self.ap.state != APState.DTLS:
self.ap.logger.info("Service ApEstablishDTLS rollback")
return
self.ap.active_service = self
self.aps_by_ssl[self.ap.ssl] = {
'ap': self.ap, 'pkt_pipe': self.worker.pkt_pipe, 'logger': self.ap.logger}
self.ap.nb_write = 0
self.ap.timeout_dtls = 0
with self.ap.ssl_lock:
libssl.SSL_clear(self.ap.ssl)
libssl.SSL_set_info_callback(
self.ap.ssl, self.ssl_info_callback_func) # set ssl callback
timeout = libssl.DTLSv1_get_timeout(self.ap.ssl)
timeout = min(
max(timeout.tv_sec + timeout.tv_usec * 1e-6, sleep_for_packet), 3)
libssl.SSL_do_handshake(self.ap.ssl)
try:
timer = PassiveTimer(config.dtls.timeout)
while not timer.has_expired():
if self.ap.state != APState.DTLS:
return
if self.ap.is_handshake_done_libssl():
# session established
self.ap.state = APState.JOIN
event = APDTLSEstablishedEvent(self.env, self.ap.mac)
self.raise_event(event)
self.ap.logger.info(
"Service ApEstablishDTLS finished successfully")
stop = yield self.stop_and_launch_service(ServiceAPJoinWLC(self.worker, self.ap, self.env, self.topics_to_subs), pipe)
if stop:
return True
return
resps = yield pipe.async_wait_for_pkt(time_sec=timeout, limit=1)
self.ap.logger.debug(
"got %s packets at DTLS service" % len(resps))
if not resps:
if self.ap.is_handshake_done_libssl():
# session established
self.ap.state = APState.JOIN
event = APDTLSEstablishedEvent(self.env, self.ap.mac)
self.raise_event(event)
self.ap.logger.info(
"Service ApEstablishDTLS finished successfully")
stop = yield self.stop_and_launch_service(ServiceAPJoinWLC(self.worker, self.ap, self.env, self.topics_to_subs), pipe)
if stop:
return True
return
# Make DTLS timeout to retransmit
with self.ap.ssl_lock:
libssl.DTLSv1_handle_timeout(self.ap.ssl)
timeout = libssl.DTLSv1_get_timeout(self.ap.ssl)
timeout = min(
max(timeout.tv_sec + timeout.tv_usec * 1e-6, sleep_for_packet), 3)
self.ap.timeout_dtls += 1
ret = libssl.SSL_do_handshake(self.ap.ssl)
if ret <= 0:
try:
# looking up the error
self.ap.logger.error("SSL Handshake error: %s: %s" % (
ret, SSL_CONST.ssl_err[libssl.SSL_get_error(self.ap.ssl, ret)]))
except:
pass
continue
if self.ap.state != APState.DTLS:
return
pkt_bytes = resps[0]['pkt']
is_dtls = struct.unpack('?', pkt_bytes[42:43])[0]
if is_dtls:
self.ap.decrypt(pkt_bytes[46:])
if self.ap.state <= APState.DTLS:
self.ap.state = APState.DISCOVER
self.ap.logger.info(
"Service ApEstablishDTLS rollback: timer expired")
stop = yield self.stop_and_launch_service(ServiceAPDiscoverWLC(self.worker, self.ap, self.env, self.topics_to_subs), pipe)
return stop
finally:
with self.ap.ssl_lock:
libssl.SSL_set_info_callback(
self.ap.ssl, None) # remove ssl callback
if self.ap.ssl in self.aps_by_ssl:
del self.aps_by_ssl[self.ap.ssl]
except simpy.events.Interrupt:
self.ap.logger.debug("Service ApEstablishDTLS interrupted")
return True
class ServiceAPEncryptedControl(ServiceAP):
def control_round_trip(self, tx_pkt, expected_response_type, debug_msg=None):
"""Send the packet, wait for the expected answer, and retry."""
from ..trex_wireless_config import config
self.ap.rx_responses[expected_response_type] = -1
RetransmitInterval = config.capwap.retransmit_interval
for i in range(config.capwap.max_retransmit):
if not self.ap.is_dtls_established:
yield 'dtls_broke'
return
if not self.ap.is_dtls_established:
break
RetransmitInterval *= 2
encrypted = self.ap.encrypt(tx_pkt)
if encrypted and encrypted != '':
tx_pkt_wrapped = self.ap.wrap_capwap_pkt(
b'\1\0\0\0' + self.ap.encrypt(tx_pkt))
self.ap.logger.debug("sending packet")
yield ('put', tx_pkt_wrapped)
else:
continue
timer = PassiveTimer(RetransmitInterval)
self.waiting_on = simpy.events.Event(self.env)
yield ("process_with_timeout", self.waiting_on, RetransmitInterval)
del self.waiting_on
if expected_response_type in self.ap.rx_responses:
result_code = self.ap.rx_responses[expected_response_type]
if result_code in (None, 0, 2):
# expected response
yield 'good_resp'
if result_code != -1:
# non expected response
yield ('err', 'Not successful result %s - %s.' % (result_code, capwap_result_codes.get(result_code, 'Unknown')))
return
else:
continue
else:
continue
# timeout
self.ap.state = APState.DISCOVER
if self.ap.is_dtls_established:
self.ap.logger.info(
"Service ApJoinWLC rollback: dtls broke")
else:
self.ap.logger.info(
"Service ApJoinWLC rollback: timeout: too many trials")
yield 'time'
class ServiceAPJoinWLC(ServiceAPEncryptedControl):
""""Join state and Configuration state simulation for an AP."""
def run_with_buffer(self):
from ..trex_wireless_config import config
def rollback(reason=None):
if reason:
self.ap.logger.info("Service ApJoinWLC rollback: %s" % reason)
self.ap.state = APState.DISCOVER
return ('service', ServiceAPDiscoverWLC(self.worker, self.ap, self.env, self.topics_to_subs))
try:
while True:
if self.ap.state != APState.JOIN:
return
self.ap.logger.info("Service ApJoinWLC started")
self.ap.active_service = self
self.ap.logger.debug('Sending Join Request')
join_req = CAPWAP_PKTS.join(self.ap)
ctrl_gen = self.control_round_trip(
join_req, 4, debug_msg='Join Request')
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action in ('good_resp', 'time', 'dtls_broke', 'err'):
ctrl_gen.close()
break
else:
send_data = yield action
if action == 'dtls_broke':
yield(rollback("dtls session broken"))
return
elif action == 'time':
yield rollback("timeout: join request")
return
elif action == 'err':
yield rollback("error")
return
self.ap.logger.debug('Got Join Response')
self.ap.logger.debug('Sending Configuration Status Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.conf_status_req(
self.ap), 6, debug_msg='Config status request')
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action in ('good_resp', 'time', 'dtls_broke', 'err'):
ctrl_gen.close()
break
else:
send_data = yield action
if action == 'dtls_broke':
yield(rollback("dtls session broken"))
return
elif action == 'time':
yield rollback("timeout: join request")
return
elif action == 'err':
yield rollback("error")
return
self.ap.logger.debug('Got Configuration Status Response')
self.ap.logger.debug('Sending Change State Event Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.change_state(
self.ap, radio_id=0), 12, debug_msg='Change state event request')
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action in ('good_resp', 'time', 'dtls_broke', 'err'):
ctrl_gen.close()
break
else:
send_data = yield action
if action == "err":
break
if action == 'dtls_broke':
yield(rollback("dtls session broken"))
return
elif action == 'time':
yield rollback("timeout: join request")
return
elif action == 'err':
yield rollback("error")
return
self.ap.logger.debug('Got Change State Event Response')
self.ap.logger.debug('Sending Change State Event Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.change_state(
self.ap, radio_id=1), 12, debug_msg='Change state event request')
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action in ('good_resp', 'time', 'dtls_broke', 'err'):
ctrl_gen.close()
break
else:
send_data = yield action
if action == "err":
break
if action == 'dtls_broke':
yield(rollback("dtls session broken"))
return
elif action == 'time':
yield rollback("timeout: join request")
return
elif action == 'err':
yield rollback("error")
return
self.ap.logger.debug('Got Change State Event Response')
self.ap.logger.debug(
'Going to ack all config updates and try to get SSID')
# ack all config updates in worker_traffic_handler thread
# while not self.ap.last_recv_ts or self.ap.last_recv_ts + 5 >= time.time():
# self.waiting_on = simpy.events.Event(self.env)
# yield ("process_with_timeout", self.waiting_on, 5)
# del self.waiting_on
# if not self.ap.is_dtls_established:
# yield rollback("dtls not established")
# return
# elif self.ap.SSID:
# break
# if not self.ap.last_recv_ts:
# break
self.waiting_on = simpy.events.Event(self.env)
yield ("process_with_timeout", self.waiting_on, config.capwap.specific.ssid_timeout)
del self.waiting_on
if not self.ap.SSID:
yield rollback("no SSID")
return
self.ap.logger.info("received SSID, proceding with sending data keep_alive")
RetransmitInterval = config.capwap.retransmit_interval
for _ in range(config.capwap.max_retransmit):
if self.ap.state == APState.RUN:
break
if not self.ap.is_dtls_established:
yield rollback("dtls not established")
return
RetransmitInterval *= 2
tx_pkt = self.ap.wrap_capwap_pkt(
CAPWAP_PKTS.keep_alive(self.ap), dst_port=5247)
self.ap.logger.debug("Sending keep-alive")
yield ('put', tx_pkt)
self.waiting_on = simpy.events.Event(self.env)
yield ("process_with_timeout", self.waiting_on, RetransmitInterval)
del self.waiting_on
if self.ap.got_keep_alive:
self.ap.got_keep_alive = False
self.ap.logger.debug('Received Keep-alive response.')
self.ap.last_echo_req_ts = time.time()
self.ap.join_time = time.time()
self.ap.join_duration = self.ap.join_time - self.ap.start_join_time
self.ap.state = APState.RUN
self.ap.logger.info("Service ApJoinWLC finished")
# release spot
yield("done_ap")
yield ('service', ServiceAPRun(self.worker, self.ap, self.env, self.topics_to_subs))
return
if not self.ap.is_dtls_established:
break
# timeout
if not self.ap.is_dtls_established:
yield rollback("DTLS session broken")
return
# too many trials or failure
if self.ap.state == APState.JOIN:
yield rollback("too many trials")
return
except simpy.events.Interrupt:
self.ap.logger.debug("Service APJoinWLC interrupted")
return True
class ServiceAPRun(ServiceAP):
"""Run state simulation for an AP.
Send periodic Echo Request to the WLC.
"""
def run_with_buffer(self):
from ..trex_wireless_config import config
try:
ap = self.ap
self.publish = pubsub.Publisher(ap.pubsub, "").publish
# publish Joined event
event = APJoinConnectedEvent(self.env, ap.mac)
self.raise_event(event)
def rollback(reason=None):
if reason:
self.ap.logger.info("Service ApRun rollback: %s" % reason)
self.ap.state = APState.DISCOVER
return ('service', ServiceAPDiscoverWLC(self.worker, self.ap, self.env, self.topics_to_subs))
self.ap.logger.info("Service ApRun started")
wait_time = config.capwap.echo_interval
while True:
if ap.state != APState.RUN:
break
if ap.got_disconnect:
event = APJoinDisconnectedEvent(self.env, ap.name)
self.raise_event(event)
ap.got_disconnect = False
# if disconnected, disconnect the clients
for client in ap.clients:
client.got_disconnect = True
try:
client.got_disconnected_event.succeed()
except (RuntimeError, AttributeError):
# already triggered or not waiting for this packet
pass
rollback("got disconnected")
yield ("sleep", wait_time)
# echo_resp_timer is set when a response is stil expected (waiting), otherwise is None
if ap.echo_resp_timer and ap.echo_resp_timer.has_expired():
# no echo response received, retry
if ap.echo_resp_retry <= config.capwap.max_retransmit:
ap.echo_resp_timeout *= 2
ap.echo_resp_timer = PassiveTimer(ap.echo_resp_timeout)
ap.echo_resp_retry += 1
tx_pkt = ap.get_echo_capwap()
ap.logger.debug(
"sending echo request after timeout, retries: %s" % ap.echo_resp_retry)
encrypted = ap.encrypt(tx_pkt)
if encrypted:
self.worker.pkt_pipe.send(ap.get_echo_wrap(encrypted))
else:
event = APJoinDisconnectedEvent(self.env, ap.mac)
self.raise_event(event)
self.ap.logger.warn(
"Timeout in echo response, disconnecting AP")
ap.echo_resp_timeout = config.capwap.retransmit_interval
ap.echo_resp_retry = 0
yield rollback("timeout in echo response")
if time.time() > ap.last_echo_req_ts + config.capwap.echo_interval:
# echo_req_timer passed, send new echo
tx_pkt = ap.get_echo_capwap()
ap.last_echo_req_ts = time.time()
ap.echo_resp_timer = PassiveTimer(ap.echo_resp_timeout)
ap.logger.debug("sending echo request")
encrypted = ap.encrypt(tx_pkt)
if encrypted:
self.worker.pkt_pipe.send(ap.get_echo_wrap(encrypted))
except simpy.events.Interrupt:
self.ap.logger.debug("Service ApRun interrupted")
return True
self.ap.logger.info("Service ApRun finished")
class ServiceAPShutdownDTLS(ServiceAP):
def run(self, pipe):
yield pipe.async_wait(0)
self.ap.logger.info("Service ApShutdownDTLS started")
with self.ap.ssl_lock:
libssl.SSL_shutdown(self.ap.ssl)
ssl_data = self.ap.ssl_read()
if ssl_data:
try:
tx_pkt = self.ap.wrap_capwap_pkt(b'\1\0\0\0' + ssl_data)
self.send(tx_pkt)
except:
return
class ServiceAPShutdown(ServiceAP):
"""Service stopping an AP, sending a 'close notify' to the controller if needed.
When done, the state of the AP is CLOSED.
"""
aps_by_ssl = {}
@staticmethod
def openssl_callback(ssl, where, ret):
pkt_pipe = ServiceAPShutdown.aps_by_ssl[ssl]['pkt_pipe']
ap = ServiceAPShutdown.aps_by_ssl[ssl]['ap']
logger = ServiceAPShutdown.aps_by_ssl[ssl]['logger']
if libcrypto.BIO_ctrl_pending(ap.out_bio):
ssl_data = ap.ssl_read()
ap.nb_write += 1
if ap.state != APState.CLOSING:
return 0
if ssl_data:
# This is due to a bug in the ordering of UDP packet received... to be investigated
if ap.nb_write > 7 and len(ssl_data) > 1000 and not ap.is_handshake_done_libssl():
with ap.ssl_lock:
timeout = libssl.DTLSv1_get_timeout(ap.ssl)
timeout = timeout.tv_sec + timeout.tv_usec * 1e-6
return 0
pkt = ap.wrap_capwap_pkt(b'\1\0\0\0' + ssl_data)
logger.debug(
"sending dtls packet (openssl callback @ ApShutdown)")
pkt_pipe.send(pkt)
return 0
ssl_info_callback_type = CFUNCTYPE(c_int, c_void_p, c_int, c_int)
ssl_info_callback_func = ssl_info_callback_type(openssl_callback.__func__)
def run(self, pipe):
try:
from ..trex_wireless_config import config
yield pipe.async_wait(0)
event = APJoinDisconnectedEvent(self.env, self.ap.name)
self.raise_event(event)
self.ap.logger.info("Service ApShutdown started")
if self.ap.state <= APState.DISCOVER or self.ap.state > APState.CLOSING:
self.ap.state = APState.CLOSED
self.ap.logger.info("Service ApShutdown stopped : AP is already shut")
return
self.ap.active_service = self
ServiceAPShutdown.aps_by_ssl[self.ap.ssl] = {
'ap': self.ap, 'pkt_pipe': self.worker.pkt_pipe, 'logger': self.ap.logger}
self.ap.nb_write = 0
self.ap.timeout_dtls = 0
try:
# DTLS Shutdown
with self.ap.ssl_lock:
libssl.SSL_set_info_callback(
self.ap.ssl, self.ssl_info_callback_func) # set ssl callback
cnt = 0
with self.ap.ssl_lock:
ret = libssl.SSL_shutdown(self.ap.ssl)
# want read / want write
while ret == 0 or libssl.SSL_get_error(self.ap.ssl, ret) in (2, 3):
# retry
timeout = libssl.DTLSv1_get_timeout(self.ap.ssl)
timeout = min(
max(timeout.tv_sec + timeout.tv_usec * 1e-6, sleep_for_packet), 3)
yield pipe.async_wait(timeout)
ret = libssl.SSL_shutdown(self.ap.ssl)
cnt += 1
if cnt > config.dtls.shutdown_max_retransmit:
break
if ret == 1:
self.ap.logger.debug("SSL Shutdown success")
else:
try:
self.ap.logger.warn("SSL Shutdown error: %s: %s" % (
ret, SSL_CONST.ssl_err[libssl.SSL_get_error(self.ap.ssl, ret)]))
except KeyError:
# unknown error
pass
finally:
with self.ap.ssl_lock:
libssl.SSL_set_info_callback(
self.ap.ssl, None) # remove ssl callback
if self.ap.ssl in ServiceAPShutdown.aps_by_ssl:
del ServiceAPShutdown.aps_by_ssl[self.ap.ssl]
self.ap.logger.info("Service ApShutdown finished")
self.ap.state = APState.CLOSED
self.ap.reset_vars()
except simpy.events.Interrupt:
self.ap.logger.info("Service ApShutdown interrupted")
return True
return
class ServiceInfoEvent(Service):
"""Service gathering info on aps and clients joins."""
def __init__(self, worker, verbose_level=Service.WARN):
Service.__init__(self, verbose_level) # TODO
self.worker = worker
self.name = ServiceInfoEvent.__class__.__name__
def get_filter_type(self):
return None
def run(self, pipe):
ap_joined = False
client_joined = False
while not ap_joined or not client_joined:
with self.worker.aps_lock:
aps = list(self.worker.aps)
if not ap_joined and len([ap for ap in aps if ap.state != APState.RUN]) == 0:
# all aps are joined for the first time
self.worker.ap_join_done_time = time.time()
ap_joined = True
self.worker.ap_joined.set()
with self.worker.clients_lock:
clients = list(self.worker.clients)
if clients and ap_joined and not client_joined and (len([c for c in clients if c.state != ClientState.RUN]) == 0):
self.worker.client_join_done_time = time.time()
client_joined = True
# self.worker.client_joined.set()
yield pipe.async_wait(0.5)
# print( len([ap for ap in self.manager.aps if ap.state != APState.RUN]) )
class ServiceEventManager(Service):
"""Service waiting on queue for events to be 'triggered' (set)."""
def __init__(self, event_store):
"""Build a ServiceEventManager.
Args:
event_store: SynchronziedStore where all elements are events to be 'succeed'.
"""
super().__init__()
self.event_store = event_store
self.name = ServiceEventManager.__class__.__name__
def get_filter_type(self):
return None
def run(self):
while True:
events = yield self.event_store.get(None, 1)
for event in events:
try:
event.succeed()
except RuntimeError:
# already triggered
pass
|
gramformer/__init__.py | shashankdeshpande/Gramformer | 971 | 11190867 | <reponame>shashankdeshpande/Gramformer
from gramformer.gramformer import Gramformer
|
nucleoatac/diff_occ.py | RavelBio/NucleoATAC | 101 | 11190896 | <reponame>RavelBio/NucleoATAC<filename>nucleoatac/diff_occ.py
"""
Script to make nucleosome occupancy track!
@author: <NAME>
"""
##### IMPORT MODULES #####
# import necessary python modules
import multiprocessing as mp
import numpy as np
import traceback
import itertools
import pysam
from pyatac.utils import shell_command,read_chrom_sizes_from_bam
from pyatac.chunk import ChunkList
from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk
from pyatac.fragmentsizes import FragmentSizes
from pyatac.bias import PWM
def _diffHelper(arg):
"""function to get occupancy for a set of bed regions
"""
(chunk, params) = arg
try:
occ = OccChunk(chunk)
occ.process(params)
out = (occ.getNucDist(),
occ.occ, [occ.peaks[i] for i in sorted(occ.peaks.keys())])
occ.removeData()
except Exception as e:
print('Caught exception when processing:\n'+ chunk.asBed()+"\n")
traceback.print_exc()
print()
raise e
return out
def _writeDiff(pos_queue, out):
out_handle = open(out + '.occdiff.bed','a')
try:
for poslist in iter(pos_queue.get, 'STOP'):
for pos in poslist:
pos.write(out_handle)
pos_queue.task_done()
except Exception, e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle.close()
return True
def run_diff(args, bases = 500000):
"""run differential occupancy calling
"""
chrs = read_chrom_sizes_from_bam(args.bam)
pwm = PWM.open(args.pwm)
chunks = ChunkList.read(args.bed, chromDict = chrs, min_offset = args.flank + args.upper/2 + max(pwm.up,pwm.down))
chunks.merge()
maxQueueSize = max(2,int(100 * bases / np.mean([chunk.length() for chunk in chunks])))
#get fragmentsizes
fragment_dist1 = FragmentMixDistribution(0, upper = args.upper)
fragment_dist1.fragmentsizes = FragmentSizes(0, args.upper, vals = FragmentSizes.open(args.sizes1).get(0,args.upper))
fragment_dist1.modelNFR()
fragment_dist2 = FragmentMixDistribution(0, upper = args.upper)
fragment_dist2.fragmentsizes = FragmentSizes(0, args.upper, vals = FragmentSizes.open(args.sizes2).get(0,args.upper))
fragment_dist2.modelNFR()
params = OccupancyParameters(fragment_dist, args.upper, args.fasta, args.pwm, sep = args.nuc_sep, min_occ = args.min_occ,
flank = args.flank, bam = args.bam, ci = args.confidence_interval)
sets = chunks.split(bases = bases)
pool1 = mp.Pool(processes = max(1,args.cores-1))
diff_handle = open(args.out + '.occdiff.bed','w')
diff_handle.close()
diff_queue = mp.JoinableQueue()
diff_process = mp.Process(target = _writeDiff, args=(diff_queue, args.out))
diff_process.start()
nuc_dist = np.zeros(args.upper)
for j in sets:
tmp = pool1.map(_occHelper, zip(j,itertools.repeat(params)))
for result in tmp:
diff_queue.put(result[1])
pool1.close()
pool1.join()
diff_queue.put('STOP')
diff_process.join()
pysam.tabix_compress(args.out + '.occdiff.bed', args.out + '.occdiff.bed.gz',force = True)
shell_command('rm ' + args.out + '.occdiff.bed')
pysam.tabix_index(args.out + '.occdiff.bed.gz', preset = "bed", force = True)
|
finrl_meta/env_future_trading/wt4elegantrl/envs.py | eitin-infant/FinRL-Meta | 214 | 11190922 | from gym import Env
from gym.spaces import Box, Space
from features import Feature
from stoppers import Stopper
from assessments import Assessment
from wtpy.apps import WtBtAnalyst
from wtpy.WtBtEngine import WtBtEngine
from strategies import StateTransfer, EngineType
from multiprocessing import Pipe, Process
from os import getpid
# 一个进程只能有一个env
class WtEnv(Env):
TRAINER = 1
EVALUATOR = 2
DEBUGGER = 3
def __init__(self,
strategy: StateTransfer,
stopper: Stopper,
feature: Feature,
assessment: Assessment,
time_range: tuple,
slippage: int = 0,
id: int = getpid(),
mode=1,
):
self.reward_range
if mode == 3: # 调试模式
self._log_: str = './config/03research/log_debugger.json'
self._dump_: bool = True
self._mode_: str = 'WtDebugger'
elif mode == 2: # 评估模式
self._log_: str = './config/03research/log_evaluator.json'
self._dump_: bool = True
self._mode_: str = 'WtEvaluator'
else: # 训练模式
self._log_: str = './config/03research/log_trainer.json'
self._dump_: bool = False
self._mode_: str = 'WtTrainer'
self._id_: int = id
self._iter_: int = 0
self._run_: bool = False
self.__strategy__ = strategy
self._et_ = self.__strategy__.EngineType()
self.__stopper__: Stopper = stopper
self.__slippage__: int = slippage
self.__feature__: Feature = feature
self.observation_space: Box = Box(**self.__feature__.observation)
self.action_space: Space = self.__strategy__.Action(
len(self.__feature__.securities))
self._assessment_: Assessment = assessment
self.__time_range__ = time_range
def _debug_(self):
pass
def __step__(self):
finished = not self._cb_step_()
if self._assessment_.done or finished:
self._assessment_.finish()
self._debug_()
self.close()
# if self._dump_:
# self.analyst(self._iter_)
def close(self):
if self._run_ and hasattr(self, '_engine_'):
self._engine_.stop_backtest()
self._run_ = False
def reset(self):
self.close()
time_start, time_end = self.__time_range__[self._iter_%len(self.__time_range__)]
self._iter_ += 1
if not hasattr(self, '_engine_'):
# 创建一个运行环境
self._engine_: WtBtEngine = WtBtEngine(
eType=self._et_,
logCfg=self._log_,
)
if self._et_ == EngineType.ET_CTA:
self._engine_.init(
'./config/01commom/',
'./config/03research/cta.json')
self._cb_step_ = self._engine_.cta_step
elif self._et_ == EngineType.ET_HFT:
self._engine_.init(
'./config/01commom/',
'./config/03research/hft.json')
self._cb_step_ = self._engine_.hft_step
else:
raise AttributeError
self._engine_.configBacktest(time_start, time_end)
self._engine_.commitBTConfig()
else:
self._engine_.set_time_range(time_start, time_end)
# 重置奖励
self._assessment_.reset()
# 创建一个策略并加入运行环境
self._strategy_: StateTransfer = self.__strategy__(
name=self._name_(self._iter_),
feature=self.__feature__,
stopper=self.__stopper__,
assessment=self._assessment_,
)
# 设置策略的时候一定要安装钩子
if self._et_ == EngineType.ET_CTA:
self._engine_.set_cta_strategy(
self._strategy_, slippage=self.__slippage__, hook=True, persistData=self._dump_)
elif self._et_ == EngineType.ET_HFT:
self._engine_.set_hft_strategy(self._strategy_, hook=True)
else:
raise AttributeError
# 回测一定要异步运行
self._engine_.run_backtest(bAsync=True, bNeedDump=self._dump_)
self._run_ = True
self.__step__()
return self.__feature__.obs
def step(self, action):
assert hasattr(self, '_engine_')
self._strategy_.setAction(action)
self._cb_step_()
self.__step__()
return self.__feature__.obs, self._assessment_.reward, self._assessment_.done, {}
@property
def assets(self):
return self._assessment_.curr_assets
def analyst(self, iter: int):
name = self._name_(iter)
analyst = WtBtAnalyst()
folder = "./outputs_bt/%s/" % name
analyst.add_strategy(
name, folder=folder, init_capital=self._assessment_._init_assets_, rf=0.02, annual_trading_days=240)
try:
analyst.run_new('%s/PnLAnalyzing.xlsx' % folder)
except:
analyst.run('%s/PnLAnalyzing.xlsx' % folder)
def analysts(self):
for iter in range(1, self._iter_+1):
self.analysis(iter)
def _name_(self, iter):
time_start, time_end = self.__time_range__[(iter-1)%len(self.__time_range__)]
return '%s%s_%s_%s_%s-%s' % (self._mode_, self._id_, self.__strategy__.Name(), iter, str(time_start)[:8], str(time_end)[:8])
def __del__(self):
if hasattr(self, '_engine_'):
self._engine_.release_backtest()
def __sub_process_worker__(pipe: Pipe, _cmd_, _attr_, cli, kwargs):
env = cli(**kwargs)
while True:
cmd, kwargs = pipe.recv()
if cmd in _cmd_:
if cmd == 'stop':
pipe.send(True)
pipe.close()
break
call = getattr(env, cmd)
if kwargs:
# print(cmd, kwargs)
pipe.send(call(**kwargs))
else:
pipe.send(call())
elif cmd in _attr_:
pipe.send(getattr(env, cmd))
else:
pipe.send('unknow %s' % cmd)
class WtSubProcessEnv(Env):
_cmd_ = ('reset', 'step', 'close', 'stop')
_attr_ = ('reward_range', 'metadata',
'observation_space', 'action_space', 'assets')
def __init__(self, cli, **kwargs):
self._pipe_, pipe = Pipe()
self._process_ = Process(
target=__sub_process_worker__,
args=(pipe, self._cmd_, self._attr_, cli, kwargs),
daemon=True
)
self._process_.start()
def __do__(self, cmd, **kwargs):
self._pipe_.send((cmd, kwargs))
return self._pipe_.recv()
@property
def metadata(self):
return self.__do__('metadata')
@property
def reward_range(self):
return self.__do__('reward_range')
@property
def observation_space(self):
return self.__do__('observation_space')
@property
def action_space(self):
return self.__do__('action_space')
@property
def assets(self):
return self.__do__('assets')
def reset(self):
return self.__do__('reset')
def step(self, action):
# print(type(action))
return self.__do__('step', action=action)
def close(self):
return self.__do__('close')
def __del__(self):
self.__do__('stop')
self._process_.join()
self._process_.close()
|
climlab/surface/turbulent.py | nfeldl/climlab | 160 | 11190930 | '''Processes for surface turbulent heat and moisture fluxes
:class:`~climlab.surface.SensibleHeatFlux` and
:class:`~climlab.surface.LatentHeatFlux` implement standard bulk formulae
for the turbulent heat fluxes, assuming that the heating or moistening
occurs in the lowest atmospheric model level.
:Example:
Here is an example of setting up a single-column
Radiative-Convective model with interactive water vapor
and surface latent and sensible heat fluxes.
This example also demonstrates *asynchronous coupling*:
the radiation uses a longer timestep than the other model components::
import numpy as np
import climlab
from climlab import constants as const
# Temperatures in a single column
full_state = climlab.column_state(num_lev=30, water_depth=2.5)
temperature_state = {'Tatm':full_state.Tatm,'Ts':full_state.Ts}
# Initialize a nearly dry column (small background stratospheric humidity)
q = np.ones_like(full_state.Tatm) * 5.E-6
# Add specific_humidity to the state dictionary
full_state['q'] = q
# ASYNCHRONOUS COUPLING -- the radiation uses a much longer timestep
# The top-level model
model = climlab.TimeDependentProcess(state=full_state,
timestep=const.seconds_per_hour)
# Radiation coupled to water vapor
rad = climlab.radiation.RRTMG(state=temperature_state,
specific_humidity=full_state.q,
albedo=0.3,
timestep=const.seconds_per_day
)
# Convection scheme -- water vapor is a state variable
conv = climlab.convection.EmanuelConvection(state=full_state,
timestep=const.seconds_per_hour)
# Surface heat flux processes
shf = climlab.surface.SensibleHeatFlux(state=temperature_state, Cd=0.5E-3,
timestep=const.seconds_per_hour)
lhf = climlab.surface.LatentHeatFlux(state=full_state, Cd=0.5E-3,
timestep=const.seconds_per_hour)
# Couple all the submodels together
model.add_subprocess('Radiation', rad)
model.add_subprocess('Convection', conv)
model.add_subprocess('SHF', shf)
model.add_subprocess('LHF', lhf)
print(model)
# Run the model
model.integrate_years(1)
# Check for energy balance
print(model.ASR - model.OLR)
'''
from __future__ import division
import numpy as np
from climlab.utils.thermo import qsat
from climlab import constants as const
from climlab.process.energy_budget import EnergyBudget
from climlab.domain.field import Field
class _SurfaceFlux(EnergyBudget):
'''Abstract parent class for SensibleHeatFlux and LatentHeatFlux'''
def __init__(self, Cd=3E-3, resistance=1., **kwargs):
super(_SurfaceFlux, self).__init__(**kwargs)
self.Cd = Cd
self.add_input('resistance', resistance)
self.heating_rate['Tatm'] = np.zeros_like(self.Tatm)
# fixed wind speed (for now)
self.add_input('U', 5. * np.ones_like(self.Ts))
# retrieving surface pressure from model grid
self.ps = self.lev_bounds[-1]
def _compute_heating_rates(self):
'''Compute energy flux convergences to get heating rates in :math:`W/m^2`.'''
self._compute_flux()
self.heating_rate['Ts'] = -self._flux
# Modify only the lowest model level
self.heating_rate['Tatm'][..., -1, np.newaxis] = self._flux
def _air_density(self, Ta):
return self.ps * const.mb_to_Pa / const.Rd / Ta
class SensibleHeatFlux(_SurfaceFlux):
r'''Surface turbulent sensible heat flux implemented through a bulk aerodynamic formula.
The flux is computed from
.. math::
SH = r ~ c_p ~\rho ~ C_D ~ U \left( T_s - T_a \right)
where:
- :math:`c_p` and :math:`\rho` are the specific heat and density of air
- :math:`C_D` is a drag coefficient (stored as ``self.Cd``, default value is 3E-3)
- :math:`U` is the near-surface wind speed, stored as ``self.U``, default value is 5 m/s
- :math:`r` is an optional resistance parameter (stored as ``self.resistance``, default value = 1)
The surface temperature :math:`T_s` is taken directly from ``self.state['Ts']``,
while the near-surface air temperature :math:`T_a` is taken as the lowest model
level in ``self.state['Tatm']``
Diagnostic quantity ``self.SHF`` gives the sensible heat flux in W/m2.
Temperature tendencies associated with this flux are computed for
``Ts`` and for the lowest model level in ``Tatm``. All other tendencies
(including air temperature tendencies at other levels) are set to zero.
'''
def __init__(self, Cd=3E-3, **kwargs):
super(SensibleHeatFlux, self).__init__(Cd=Cd, **kwargs)
self.add_diagnostic('SHF', 0.*self.Ts)
def _compute_flux(self):
# this ensure same dimensions as Ts
# (and use only the lowest model level)
Ta = Field(self.Tatm[..., -1, np.newaxis], domain=self.Ts.domain)
Ts = self.Ts
DeltaT = Ts - Ta
rho = self._air_density(Ta)
# flux from bulk formula
self._flux = self.resistance * const.cp * rho * self.Cd * self.U * DeltaT
self.SHF = self._flux
class LatentHeatFlux(_SurfaceFlux):
r'''Surface turbulent latent heat flux implemented through a bulk aerodynamic formula.
The flux is computed from
.. math::
LH = r ~ L ~\rho ~ C_D ~ U \left( q_s - q_a \right)
where:
- :math:`L` and :math:`\rho` are the latent heat of vaporization and density of air
- :math:`C_D` is a drag coefficient (stored as ``self.Cd``, default value is 3E-3)
- :math:`U` is the near-surface wind speed, stored as ``self.U``, default value is 5 m/s
- :math:`r` is an optional resistance parameter (stored as ``self.resistance``, default value = 1)
The surface specific humidity :math:`q_s` is computed as the saturation specific
humidity at the surface temperature ``self.state['Ts']`` and surface pressure
``self.ps``, while the near-surface specific humidity :math:`q_a` is taken as the lowest model
level in the field ``self.q`` (which must be provided either as a state variable or as input).
Two diagnostics are computed:
- ``self.LHF`` gives the sensible heat flux in W/m2.
- ``self.evaporation`` gives the evaporation rate in kg/m2/s (or mm/s)
How the tendencies are computed depends on whether specific humidity ``q``
is a state variable (i.e. is present in ``self.state``):
- If ``q`` is in ``self.state`` then the evaporation determines the specific humidity tendency ``self.tendencies['q']``. The water vapor is added to the lowest model level only. Evaporation cools the surface through the surface tendency ``self.tendencies['Ts']``. Air temperature tendencies are zero everywhere.
- If ``q`` is not in ``self.state`` then we compute an equivalent air temperature tendency for the lowest model layer instead of a specific humidity tendency (i.e. the latent heat flux is applied in the same way as a sensible heat flux).
This process does not apply a tendency to the surface water amount.
In the absence of other water processes this implies an infinite water source at the surface (slab ocean).
'''
def __init__(self, Cd=3E-3, **kwargs):
super(LatentHeatFlux, self).__init__(Cd=Cd, **kwargs)
self.add_diagnostic('LHF', 0.*self.Ts)
self.add_diagnostic('evaporation', 0.*self.Ts) # in kg/m2/s or mm/s
def _compute_flux(self):
# specific humidity at lowest model level
# assumes pressure is the last axis
q = Field(self.q[..., -1, np.newaxis], domain=self.Ts.domain)
Ta = Field(self.Tatm[..., -1, np.newaxis], domain=self.Ts.domain)
qs = qsat(self.Ts, self.ps)
Deltaq = Field(qs - q, domain=self.Ts.domain)
rho = self._air_density(Ta)
# flux from bulk formula
self._flux = self.resistance * const.Lhvap * rho * self.Cd * self.U * Deltaq
self.LHF[:] = self._flux
# evporation rate, convert from W/m2 to kg/m2/s (or mm/s)
self.evaporation[:] = self.LHF/const.Lhvap
def _compute(self):
'''Overides the _compute method of EnergyBudget'''
tendencies = self._temperature_tendencies()
if 'q' in self.state:
# in a model with active water vapor, this flux should affect
# water vapor tendency, NOT air temperature tendency!
tendencies['Tatm'] *= 0.
Pa_per_hPa = 100.
air_mass_per_area = self.Tatm.domain.lev.delta[...,-1] * Pa_per_hPa / const.g
specific_humidity_tendency = 0.*self.q
specific_humidity_tendency[...,-1,np.newaxis] = self.LHF/const.Lhvap / air_mass_per_area
tendencies['q'] = specific_humidity_tendency
return tendencies
|
tests/modules/contrib/test_vpn.py | spxtr/bumblebee-status | 1,089 | 11190947 | import pytest
pytest.importorskip("tkinter")
def test_load_module():
__import__("modules.contrib.vpn")
|
persephone/tests/test_transcription_preprocessing.py | a-tsioh/persephone | 133 | 11190955 | <filename>persephone/tests/test_transcription_preprocessing.py
def test_segment_into_chars():
from persephone.preprocess.labels import segment_into_chars
input_1 = "hello"
output_1 = "h e l l o"
input_2 = "hello world"
output_2 = "h e l l o w o r l d"
input_3 = "hello wo rld"
output_3 = "h e l l o w o r l d"
input_4 = "hello wo rld\r\n"
output_4 = "h e l l o w o r l d"
assert segment_into_chars(input_1) == output_1
assert segment_into_chars(input_2) == output_2
assert segment_into_chars(input_3) == output_3
def test_segment_into_tokens():
from persephone.preprocess.labels import segment_into_tokens
from persephone.datasets.na import PHONEMES
from persephone.datasets.na import TONES
from persephone.datasets.na import SYMBOLS_TO_PREDICT
input_1 = "ə˧ʝi˧-ʂɯ˥ʝi˩ | -dʑo˩ … | ə˩-gi˩!"
output_1 = "ə ˧ ʝ i ˧ ʂ ɯ ˥ ʝ i ˩ | dʑ o ˩ | ə ˩ g i ˩"
input_2 = "ʈʂʰɯ˧ne˧ ʝi˥-kv̩˩-tsɯ˩ | -mv̩˩."
output_2 = "ʈʂʰ ɯ ˧ n e ˧ ʝ i ˥ k v̩ ˩ ts ɯ ˩ | m v̩ ˩"
input_3 = " ʈʂʰɯ˧ne˧ ʝi˥-kv̩˩-tsɯ˩ | -mv̩˩.\r\n"
output_3 = "ʈʂʰ ɯ ˧ n e ˧ ʝ i ˥ k v̩ ˩ ts ɯ ˩ | m v̩ ˩"
token_inv = PHONEMES.union(TONES).union(SYMBOLS_TO_PREDICT)
assert segment_into_tokens(input_1, token_inv) == output_1
assert segment_into_tokens(input_2, token_inv) == output_2
assert segment_into_tokens(input_3, token_inv) == output_3
def test_unicode_segmentation():
"""Test that unicode whitespace characters are correctly handled in segmentation"""
from persephone.preprocess.labels import segment_into_chars
no_break_space = "hello\u00A0world"
assert segment_into_chars(no_break_space) == "h e l l o w o r l d"
unicode_spaces = [
"\u2000", #EN QUAD
"\u2001", #EM QUAD
"\u2002", #EN SPACE
"\u2003", #EM SPACE
"\u2004", #THREE-PER-EM SPACE
"\u2005", #FOUR-PER-EM SPACE
"\u2006", #SIX-PER-EM SPACE
"\u2007", #FIGURE SPACE
"\u2008", #PUNCTUATION SPACE
"\u2009", #THIN SPACE
"\u200A", #HAIR SPACE
]
for space_character in unicode_spaces:
assert segment_into_chars("hello"+space_character+"world") == "h e l l o w o r l d" |
packages/vaex-core/vaex/registry.py | sethvargo/vaex | 337 | 11190973 | """This module contains the `register_function` decorator to add expression methods to vaex dataframe."""
import functools
import vaex.arrow
import vaex.expression
import vaex.multiprocessing
scopes = {
'str': vaex.expression.StringOperations,
'str_pandas': vaex.expression.StringOperationsPandas,
'dt': vaex.expression.DateTime,
'td': vaex.expression.TimeDelta,
'struct': vaex.expression.StructOperations
}
def register_function(scope=None, as_property=False, name=None, on_expression=True, df_accessor=None,
multiprocessing=False):
"""Decorator to register a new function with vaex.
If on_expression is True, the function will be available as a method on an
Expression, where the first argument will be the expression itself.
If `df_accessor` is given, it is added as a method to that dataframe accessor (see e.g. vaex/geo.py)
Example:
>>> import vaex
>>> df = vaex.example()
>>> @vaex.register_function()
>>> def invert(x):
>>> return 1/x
>>> df.x.invert()
>>> import numpy as np
>>> df = vaex.from_arrays(departure=np.arange('2015-01-01', '2015-12-05', dtype='datetime64'))
>>> @vaex.register_function(as_property=True, scope='dt')
>>> def dt_relative_day(x):
>>> return vaex.functions.dt_dayofyear(x)/365.
>>> df.departure.dt.relative_day
"""
import vaex.multiprocessing
prefix = ''
if scope:
prefix = scope + "_"
if scope not in scopes:
raise KeyError("unknown scope")
def wrapper(f, name=name):
name = name or f.__name__
# remove possible prefix
if name.startswith(prefix):
name = name[len(prefix):]
full_name = prefix + name
if df_accessor:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.df.func, full_name)
lazy_func = vaex.arrow.numpy_dispatch.autowrapper(lazy_func)
return vaex.multiprocessing.apply(lazy_func, args, kwargs, multiprocessing)
return functools.wraps(function)(wrapper)
if as_property:
setattr(df_accessor, name, property(closure()))
else:
setattr(df_accessor, name, closure())
else:
if on_expression:
if scope:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.expression.ds.func, full_name)
lazy_func = vaex.arrow.numpy_dispatch.autowrapper(lazy_func)
args = (self.expression,) + args
return vaex.multiprocessing.apply(lazy_func, args, kwargs, multiprocessing)
return functools.wraps(function)(wrapper)
if as_property:
setattr(scopes[scope], name, property(closure()))
else:
setattr(scopes[scope], name, closure())
else:
def closure(name=name, full_name=full_name, function=f):
def wrapper(self, *args, **kwargs):
lazy_func = getattr(self.ds.func, full_name)
lazy_func = vaex.arrow.numpy_dispatch.autowrapper(lazy_func)
args = (self,) + args
return vaex.multiprocessing.apply(lazy_func, args, kwargs, multiprocessing=multiprocessing)
return functools.wraps(function)(wrapper)
setattr(vaex.expression.Expression, name, closure())
vaex.expression.expression_namespace[prefix + name] = vaex.arrow.numpy_dispatch.autowrapper(f)
return f # we leave the original function as is
return wrapper
|
graphql_compiler/cost_estimation/__init__.py | manesioz/graphql-compiler | 521 | 11190981 | <gh_stars>100-1000
# Copyright 2019-present Kensho Technologies, LLC.
"""Query cost estimator.
Purpose
=======
Compiled GraphQL queries are sometimes too expensive to execute, in two ways:
- They return too many results: high *cardinality*.
- They require too many operations: high *execution cost*.
If these impractically-expensive queries are executed, they can overload our systems and cause the
querying system along with all dependent systems to crash.
In order to prevent this, we use schema information and graph statistics to estimate these two costs
at the GraphQL level given a query and parameters.
A separate module could then use these estimates to inform users about potentially expensive
queries, do automatic paging of the query, or suggest additions of indexes that may improve
performance.
Estimating Cardinality
======================
The *cardinality* of a query is a rough measure of the query result size and is defined as the
unfolded number of rows returned by the query.
We estimate cardinality by estimating the number of *result sets* (sets of graph vertices that match
with scopes in the query) found as the results are *expanded* (as we step through the query and
create or discard result sets).
Example:
Given the query
{
Region {
name @output(out_name: "region")
in_TropicalCyclone_LandfallRegion {
name @output(out_name: "cyclone")
}
in_Earthquake_AffectedRegion {
name @output(out_name: "earthquake")
}
}
}
and a graph with 6 Regions, 12 TropicalCyclones each linked to some Region, and 2 Earthquakes
each linked to some Region, we estimate cardinality as follows:
First, find all 6 Regions. For each Region, assuming the 12 relevant TropicalCyclones are evenly
distributed among the 6 Regions, we expect 12/6=2 TropicalCyclones connected to each Region. So,
after *expanding* each Region (going through each one and finding connected TropicalCyclones),
we expect 6*2=12 *result sets* (subgraphs of a Region vertex connected to a TropicalCyclone
vertex). Next, we expect only 2/6=.33 result sets in the *subexpansion* associated with
Earthquakes (expanding each Region looking just for Earthquakes). So of the 12 TropicalCyclone
result sets, we expect 12*.33=4 complete result sets for the full query (i.e. the query has
estimated cardinality of 4).
Approach Details:
Following this expansion model, we can think of queries as trees and find the number of expected
result sets as we recursively traverse the tree (i.e. step through the expansion).
Our calculation depends on two types of values:
(1) The root result set count (e.g. the 6 Regions in the graph)
(2) The expected result set count per parent (e.g. .33 Earthquake result sets per Region)
Both can be calculated with graph counts for every type in the schema which must be externally
provided. (1) can be looked up directly and (2) can be approximated as the number of
parent-child edges divided up over parent vertices present in the graph.
Type casting and directives can affect these calculations in many different ways. We naively
handle type casting, as well as optional, fold, recurse, and some filter directives. Additional
statistics can be recorded to improve the coverage and accuracy of these adjustments.
TODOs
=====
- Estimate execution cost by augmenting the cardinality calculation.
- Add recurse handling.
- Add additional statistics to improve directive coverage (e.g. histograms
to better model more filter operations).
"""
|
jacinle/random/rng.py | dapatil211/Jacinle | 114 | 11190992 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : rng.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 01/19/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import os
import random as sys_random
import numpy as np
import numpy.random as npr
from jacinle.utils.defaults import defaults_manager
from jacinle.utils.registry import Registry
__all__ = ['JacRandomState', 'get_default_rng', 'gen_seed', 'gen_rng', 'reset_global_seed']
class JacRandomState(npr.RandomState):
def choice_list(self, list_, size=1, replace=False, p=None):
"""Efficiently draw an element from an list, if the rng is given, use it instead of the system one."""
if size == 1:
if type(list_) in (list, tuple):
return list_[self.choice(len(list_), p=p)]
return self.choice(list_, p=p)
else:
if type(list_) in (list, tuple):
inds = self.choice(len(list_), size=size, replace=replace, p=p)
return [list_[i] for i in inds]
return self.choice(list_, size=size, replace=replace, p=p)
def shuffle_list(self, list_):
if type(list_) is list:
sys_random.shuffle(list_, random=self.random_sample)
else:
self.shuffle(list_)
def shuffle_multi(self, *arrs):
length = len(arrs[0])
for a in arrs:
assert len(a) == length, 'non-compatible length when shuffling multiple arrays'
inds = np.arange(length)
self.shuffle(inds)
return tuple(map(lambda x: x[inds], arrs))
@defaults_manager.wrap_custom_as_default(is_local=True)
def as_default(self):
yield self
_rng = JacRandomState()
get_default_rng = defaults_manager.gen_get_default(JacRandomState, default_getter=lambda: _rng)
def gen_seed():
return get_default_rng().randint(4294967296)
def gen_rng(seed=None):
return JacRandomState(seed)
global_rng_registry = Registry()
global_rng_registry.register('jacinle', lambda: _rng.seed)
global_rng_registry.register('numpy', lambda: npr.seed)
global_rng_registry.register('sys', lambda: sys_random.seed)
def reset_global_seed(seed=None, verbose=False):
if seed is None:
seed = gen_seed()
for k, seed_getter in global_rng_registry.items():
if verbose:
from jacinle.logging import get_logger
logger = get_logger(__file__)
logger.critical('Reset random seed for: {} (pid={}, seed={}).'.format(k, os.getpid(), seed))
seed_getter()(seed)
def _initialize_global_seed():
seed = os.getenv('JAC_RANDOM_SEED', None)
if seed is not None:
reset_global_seed(seed)
_initialize_global_seed()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.